var/home/core/zuul-output/0000755000175000017500000000000015112270432014522 5ustar corecorevar/home/core/zuul-output/logs/0000755000175000017500000000000015112303164015465 5ustar corecorevar/home/core/zuul-output/logs/kubelet.log0000644000000000000000005641624715112303156017711 0ustar rootrootNov 28 09:57:07 crc systemd[1]: Starting Kubernetes Kubelet... Nov 28 09:57:07 crc restorecon[4771]: Relabeled /var/lib/kubelet/config.json from system_u:object_r:unlabeled_t:s0 to system_u:object_r:container_var_lib_t:s0 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/device-plugins not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/device-plugins/kubelet.sock not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/volumes/kubernetes.io~configmap/nginx-conf/..2025_02_23_05_40_35.4114275528/nginx.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/22e96971 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/21c98286 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/0f1869e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/46889d52 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/5b6a5969 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c963 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/6c7921f5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/4804f443 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/2a46b283 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/a6b5573e not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/4f88ee5b not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/5a4eee4b not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c963 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/cd87c521 not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_33_42.2574241751 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_33_42.2574241751/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/38602af4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/1483b002 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/0346718b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/d3ed4ada not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/3bb473a5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/8cd075a9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/00ab4760 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/54a21c09 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c589,c726 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/70478888 not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/43802770 not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/955a0edc not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/bca2d009 not reset as customized by admin to system_u:object_r:container_file_t:s0:c140,c1009 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/b295f9bd not reset as customized by admin to system_u:object_r:container_file_t:s0:c589,c726 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..2025_02_23_05_21_22.3617465230 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..2025_02_23_05_21_22.3617465230/cnibincopy.sh not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/cnibincopy.sh not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..2025_02_23_05_21_22.2050650026 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..2025_02_23_05_21_22.2050650026/allowlist.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/allowlist.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/bc46ea27 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/5731fc1b not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/5e1b2a3c not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/943f0936 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/3f764ee4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/8695e3f9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/aed7aa86 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/c64d7448 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/0ba16bd2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/207a939f not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/54aa8cdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/1f5fa595 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/bf9c8153 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/47fba4ea not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/7ae55ce9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/7906a268 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/ce43fa69 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/7fc7ea3a not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/d8c38b7d not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/9ef015fb not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/b9db6a41 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/b1733d79 not reset as customized by admin to system_u:object_r:container_file_t:s0:c476,c820 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/afccd338 not reset as customized by admin to system_u:object_r:container_file_t:s0:c272,c818 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/9df0a185 not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/18938cf8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c476,c820 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/7ab4eb23 not reset as customized by admin to system_u:object_r:container_file_t:s0:c272,c818 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/56930be6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides/..2025_02_23_05_21_35.630010865 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..2025_02_23_05_21_35.1088506337 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..2025_02_23_05_21_35.1088506337/ovnkube.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/ovnkube.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/0d8e3722 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/d22b2e76 not reset as customized by admin to system_u:object_r:container_file_t:s0:c382,c850 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/e036759f not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/2734c483 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/57878fe7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/3f3c2e58 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/375bec3e not reset as customized by admin to system_u:object_r:container_file_t:s0:c382,c850 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/7bc41e08 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/48c7a72d not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/4b66701f not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/a5a1c202 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666/additional-cert-acceptance-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666/additional-pod-admission-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/additional-cert-acceptance-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/additional-pod-admission-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides/..2025_02_23_05_21_40.1388695756 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/26f3df5b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/6d8fb21d not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/50e94777 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/208473b3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/ec9e08ba not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/3b787c39 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/208eaed5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/93aa3a2b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/3c697968 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/ba950ec9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/cb5cdb37 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/f2df9827 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..2025_02_23_05_22_30.473230615 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..2025_02_23_05_22_30.473230615/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_24_06_22_02.1904938450 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_24_06_22_02.1904938450/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/fedaa673 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/9ca2df95 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/b2d7460e not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/2207853c not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/241c1c29 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/2d910eaf not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..2025_02_23_05_23_49.3726007728 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..2025_02_23_05_23_49.3726007728/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..2025_02_23_05_23_49.841175008 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..2025_02_23_05_23_49.841175008/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.843437178 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.843437178/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/c6c0f2e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/399edc97 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/8049f7cc not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/0cec5484 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/312446d0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c406,c828 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/8e56a35d not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.133159589 not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.133159589/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/2d30ddb9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c380,c909 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/eca8053d not reset as customized by admin to system_u:object_r:container_file_t:s0:c380,c909 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/c3a25c9a not reset as customized by admin to system_u:object_r:container_file_t:s0:c168,c522 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/b9609c22 not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/e8b0eca9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c106,c418 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/b36a9c3f not reset as customized by admin to system_u:object_r:container_file_t:s0:c529,c711 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/38af7b07 not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/ae821620 not reset as customized by admin to system_u:object_r:container_file_t:s0:c106,c418 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/baa23338 not reset as customized by admin to system_u:object_r:container_file_t:s0:c529,c711 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/2c534809 not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3532625537 not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3532625537/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/59b29eae not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c381 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/c91a8e4f not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c381 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/4d87494a not reset as customized by admin to system_u:object_r:container_file_t:s0:c442,c857 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/1e33ca63 not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/8dea7be2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/d0b04a99 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/d84f01e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/4109059b not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/a7258a3e not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/05bdf2b6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/f3261b51 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/315d045e not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/5fdcf278 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/d053f757 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/c2850dc7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..2025_02_23_05_22_30.2390596521 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..2025_02_23_05_22_30.2390596521/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/fcfb0b2b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/c7ac9b7d not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/fa0c0d52 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/c609b6ba not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/2be6c296 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/89a32653 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/4eb9afeb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/13af6efa not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/b03f9724 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/e3d105cc not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/3aed4d83 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1906041176 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1906041176/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/0765fa6e not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/2cefc627 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/3dcc6345 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/365af391 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-Default.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-TechPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-DevPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-TechPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-DevPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-Default.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/b1130c0f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/236a5913 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/b9432e26 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/5ddb0e3f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/986dc4fd not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/8a23ff9a not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/9728ae68 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/665f31d0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1255385357 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1255385357/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_23_57.573792656 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_23_57.573792656/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_22_30.3254245399 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_22_30.3254245399/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/136c9b42 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/98a1575b not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/cac69136 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/5deb77a7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/2ae53400 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3608339744 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3608339744/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/e46f2326 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/dc688d3c not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/3497c3cd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/177eb008 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3819292994 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3819292994/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/af5a2afa not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/d780cb1f not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/49b0f374 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/26fbb125 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.3244779536 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.3244779536/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/cf14125a not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/b7f86972 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/e51d739c not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/88ba6a69 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/669a9acf not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/5cd51231 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/75349ec7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/15c26839 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/45023dcd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/2bb66a50 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/64d03bdd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/ab8e7ca0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/bb9be25f not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.2034221258 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.2034221258/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/9a0b61d3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/d471b9d2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/8cb76b8e not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/11a00840 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/ec355a92 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/992f735e not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1782968797 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1782968797/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/d59cdbbc not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/72133ff0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/c56c834c not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/d13724c7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/0a498258 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fa471982 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fc900d92 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fa7d68da not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/4bacf9b4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/424021b1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/fc2e31a3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/f51eefac not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/c8997f2f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/7481f599 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..2025_02_23_05_22_49.2255460704 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..2025_02_23_05_22_49.2255460704/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/fdafea19 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/d0e1c571 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/ee398915 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/682bb6b8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/a3e67855 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/a989f289 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/915431bd not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/7796fdab not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/dcdb5f19 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/a3aaa88c not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/5508e3e6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/160585de not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/e99f8da3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/8bc85570 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/a5861c91 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/84db1135 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/9e1a6043 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/c1aba1c2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/d55ccd6d not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/971cc9f6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/8f2e3dcf not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/ceb35e9c not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/1c192745 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/5209e501 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/f83de4df not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/e7b978ac not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/c64304a1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/5384386b not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/multus-admission-controller/cce3e3ff not reset as customized by admin to system_u:object_r:container_file_t:s0:c435,c756 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/multus-admission-controller/8fb75465 not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/kube-rbac-proxy/740f573e not reset as customized by admin to system_u:object_r:container_file_t:s0:c435,c756 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/kube-rbac-proxy/32fd1134 not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/0a861bd3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/80363026 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/bfa952a8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_23_05_33_31.2122464563 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_23_05_33_31.2122464563/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config/..2025_02_23_05_33_31.333075221 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/793bf43d not reset as customized by admin to system_u:object_r:container_file_t:s0:c381,c387 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/7db1bb6e not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/4f6a0368 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/c12c7d86 not reset as customized by admin to system_u:object_r:container_file_t:s0:c381,c387 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/36c4a773 not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/4c1e98ae not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/a4c8115c not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/setup/7db1802e not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver/a008a7ab not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-cert-syncer/2c836bac not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-cert-regeneration-controller/0ce62299 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-insecure-readyz/945d2457 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-check-endpoints/7d5c1dd8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/advanced-cluster-management not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/advanced-cluster-management/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-broker-rhel8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-broker-rhel8/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-online not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-online/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams-console not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams-console/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq7-interconnect-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq7-interconnect-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-automation-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-automation-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-cloud-addons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-cloud-addons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry-3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry-3/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-load-balancer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-load-balancer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-businessautomation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-businessautomation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator/index.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/businessautomation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/businessautomation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cephcsi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cephcsi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cincinnati-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cincinnati-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-kube-descheduler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-kube-descheduler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-logging not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-logging/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/compliance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/compliance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/container-security-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/container-security-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/costmanagement-metrics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/costmanagement-metrics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cryostat-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cryostat-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datagrid not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datagrid/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devspaces not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devspaces/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devworkspace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devworkspace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dpu-network-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dpu-network-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eap not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eap/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-dns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-dns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/file-integrity-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/file-integrity-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-apicurito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-apicurito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-console not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-console/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-online not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-online/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gatekeeper-operator-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gatekeeper-operator-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jws-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jws-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management-hub not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management-hub/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kiali-ossm not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kiali-ossm/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubevirt-hyperconverged not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubevirt-hyperconverged/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logic-operator-rhel8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logic-operator-rhel8/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lvms-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lvms-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mcg-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mcg-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mta-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mta-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtr-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtr-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-client-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-client-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-csi-addons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-csi-addons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-multicluster-orchestrator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-multicluster-orchestrator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-prometheus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-prometheus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-hub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-hub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/bundle-v1.15.0.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/channel.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/package.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-custom-metrics-autoscaler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-custom-metrics-autoscaler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-gitops-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-gitops-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-pipelines-operator-rh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-pipelines-operator-rh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-secondary-scheduler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-secondary-scheduler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-bridge-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-bridge-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/recipe not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/recipe/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-camel-k not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-camel-k/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-hawtio-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-hawtio-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redhat-oadp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redhat-oadp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rh-service-binding-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rh-service-binding-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhacs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhacs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhbk-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhbk-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhdh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhdh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-prometheus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-prometheus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhpam-kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhpam-kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhsso-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhsso-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rook-ceph-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rook-ceph-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/run-once-duration-override-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/run-once-duration-override-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sandboxed-containers-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sandboxed-containers-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/security-profiles-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/security-profiles-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/serverless-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/serverless-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-registry-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-registry-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator3/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/submariner not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/submariner/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tang-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tang-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustee-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustee-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volsync-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volsync-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/web-terminal not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/web-terminal/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/bc8d0691 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/6b76097a not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/34d1af30 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/312ba61c not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/645d5dd1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/16e825f0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/4cf51fc9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/2a23d348 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/075dbd49 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/dd585ddd not reset as customized by admin to system_u:object_r:container_file_t:s0:c377,c642 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/17ebd0ab not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c343 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/005579f4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_23_05_23_11.449897510 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_23_05_23_11.449897510/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_23_11.1287037894 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..2025_02_23_05_23_11.1301053334 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..2025_02_23_05_23_11.1301053334/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/bf5f3b9c not reset as customized by admin to system_u:object_r:container_file_t:s0:c49,c263 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/af276eb7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c701 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/ea28e322 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/692e6683 not reset as customized by admin to system_u:object_r:container_file_t:s0:c49,c263 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/871746a7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c701 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/4eb2e958 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..2025_02_24_06_09_06.2875086261 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..2025_02_24_06_09_06.2875086261/console-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/console-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_09_06.286118152 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_09_06.286118152/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..2025_02_24_06_09_06.3865795478 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..2025_02_24_06_09_06.3865795478/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..2025_02_24_06_09_06.584414814 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..2025_02_24_06_09_06.584414814/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/containers/console/ca9b62da not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/containers/console/0edd6fce not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.openshift-global-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.openshift-global-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.1071801880 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.1071801880/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..2025_02_24_06_20_07.2494444877 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..2025_02_24_06_20_07.2494444877/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/containers/controller-manager/89b4555f not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..2025_02_23_05_23_22.4071100442 not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..2025_02_23_05_23_22.4071100442/Corefile not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/Corefile not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/655fcd71 not reset as customized by admin to system_u:object_r:container_file_t:s0:c457,c841 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/0d43c002 not reset as customized by admin to system_u:object_r:container_file_t:s0:c55,c1022 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/e68efd17 not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/9acf9b65 not reset as customized by admin to system_u:object_r:container_file_t:s0:c457,c841 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/5ae3ff11 not reset as customized by admin to system_u:object_r:container_file_t:s0:c55,c1022 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/1e59206a not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/27af16d1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c304,c1017 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/7918e729 not reset as customized by admin to system_u:object_r:container_file_t:s0:c853,c893 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/5d976d0e not reset as customized by admin to system_u:object_r:container_file_t:s0:c585,c981 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..2025_02_23_05_38_56.1112187283 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..2025_02_23_05_38_56.1112187283/controller-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/controller-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_38_56.2839772658 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_38_56.2839772658/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/d7f55cbb not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/f0812073 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/1a56cbeb not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/7fdd437e not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/cdfb5652 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_24_06_17_29.3844392896 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_24_06_17_29.3844392896/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..2025_02_24_06_17_29.848549803 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..2025_02_24_06_17_29.848549803/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..2025_02_24_06_17_29.780046231 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..2025_02_24_06_17_29.780046231/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_17_29.2729721485 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_17_29.2729721485/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/fix-audit-permissions/fb93119e not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/openshift-apiserver/f1e8fc0e not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/openshift-apiserver-check-endpoints/218511f3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs/k8s-webhook-server not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs/k8s-webhook-server/serving-certs not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/ca8af7b3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/72cc8a75 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/6e8a3760 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..2025_02_23_05_27_30.557428972 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..2025_02_23_05_27_30.557428972/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/4c3455c0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/2278acb0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/4b453e4f not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/3ec09bda not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132/anchors not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132/anchors/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/anchors not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/edk2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/edk2/cacerts.bin not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/java not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/java/cacerts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/openssl not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/openssl/ca-bundle.trust.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/email-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/objsign-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2ae6433e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fde84897.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/75680d2e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/openshift-service-serving-signer_1740288168.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/facfc4fa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8f5a969c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CFCA_EV_ROOT.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9ef4a08a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ingress-operator_1740288202.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2f332aed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/248c8271.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d10a21f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ACCVRAIZ1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a94d09e5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c9a4d3b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/40193066.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AC_RAIZ_FNMT-RCM.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cd8c0d63.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b936d1c6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CA_Disig_Root_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4fd49c6c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AC_RAIZ_FNMT-RCM_SERVIDORES_SEGUROS.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b81b93f0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f9a69fa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certigna.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b30d5fda.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ANF_Secure_Server_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b433981b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/93851c9e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9282e51c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e7dd1bc4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Actalis_Authentication_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/930ac5d2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f47b495.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e113c810.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5931b5bc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Commercial.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2b349938.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e48193cf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/302904dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a716d4ed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Networking.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/93bc0acc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/86212b19.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certigna_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Premium.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b727005e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dbc54cab.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f51bb24c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c28a8a30.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Premium_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9c8dfbd4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ccc52f49.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cb1c3204.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ce5e74ef.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fd08c599.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6d41d539.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fb5fa911.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e35234b1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8cb5ee0f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a7c655d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f8fc53da.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/de6d66f3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d41b5e2a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/41a3f684.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1df5a75f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_2011.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e36a6752.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b872f2b4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9576d26b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/228f89db.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_Root_CA_ECC_TLS_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fb717492.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2d21b73c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0b1b94ef.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/595e996b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_Root_CA_RSA_TLS_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9b46e03d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/128f4b91.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Buypass_Class_3_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/81f2d2b1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Autoridad_de_Certificacion_Firmaprofesional_CIF_A62634068.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3bde41ac.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d16a5865.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_EC-384_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/BJCA_Global_Root_CA1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0179095f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ffa7f1eb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9482e63a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d4dae3dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/BJCA_Global_Root_CA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3e359ba6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7e067d03.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/95aff9e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d7746a63.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Baltimore_CyberTrust_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/653b494a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3ad48a91.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Network_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Buypass_Class_2_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/54657681.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/82223c44.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e8de2f56.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2d9dafe4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d96b65e2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee64a828.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/40547a79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5a3f0ff8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a780d93.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/34d996fb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_ECC_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/eed8c118.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/89c02a45.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certainly_Root_R1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b1159c4c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_RSA_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d6325660.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d4c339cb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8312c4c1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certainly_Root_E1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8508e720.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5fdd185d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/48bec511.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/69105f4f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0b9bc432.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Network_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/32888f65.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_ECC_Root-01.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b03dec0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/219d9499.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_ECC_Root-02.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5acf816d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cbf06781.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_RSA_Root-01.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dc99f41e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_RSA_Root-02.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AAA_Certificate_Services.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/985c1f52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8794b4e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_BR_Root_CA_1_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e7c037b4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ef954a4e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_EV_Root_CA_1_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2add47b6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/90c5a3c8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_Root_Class_3_CA_2_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0f3e76e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/53a1b57a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_Root_Class_3_CA_2_EV_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5ad8a5d6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/68dd7389.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9d04f354.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d6437c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/062cdee6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bd43e1dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7f3d5d1d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c491639e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_E46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3513523f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/399e7759.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/feffd413.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d18e9066.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/607986c7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c90bc37d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1b0f7e5c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e08bfd1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dd8e9d41.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ed39abd0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a3418fda.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bc3f2570.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_High_Assurance_EV_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/244b5494.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/81b9768f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4be590e0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_TLS_ECC_P384_Root_G5.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9846683b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/252252d2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e8e7201.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ISRG_Root_X1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_TLS_RSA4096_Root_G5.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d52c538d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c44cc0c0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_R46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Trusted_Root_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/75d1b2ed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a2c66da8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ecccd8db.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust.net_Certification_Authority__2048_.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/aee5f10d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3e7271e8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0e59380.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4c3982f2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b99d060.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bf64f35b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0a775a30.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/002c0b4f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cc450945.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_EC1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/106f3e4d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b3fb433b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4042bcee.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/02265526.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/455f1b52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0d69c7e1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9f727ac7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5e98733a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f0cd152c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dc4d6a89.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6187b673.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/FIRMAPROFESIONAL_CA_ROOT-A_WEB.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ba8887ce.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/068570d1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f081611a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/48a195d8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GDCA_TrustAUTH_R5_ROOT.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0f6fa695.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ab59055e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b92fd57f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GLOBALTRUST_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fa5da96b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1ec40989.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7719f463.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1001acf7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f013ecaf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/626dceaf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c559d742.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1d3472b9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9479c8c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a81e292b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4bfab552.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Go_Daddy_Class_2_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Sectigo_Public_Server_Authentication_Root_E46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Go_Daddy_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e071171e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/57bcb2da.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HARICA_TLS_ECC_Root_CA_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ab5346f4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5046c355.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HARICA_TLS_RSA_Root_CA_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/865fbdf9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/da0cfd1d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/85cde254.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hellenic_Academic_and_Research_Institutions_ECC_RootCA_2015.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cbb3f32b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SecureSign_RootCA11.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hellenic_Academic_and_Research_Institutions_RootCA_2015.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5860aaa6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/31188b5e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HiPKI_Root_CA_-_G1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c7f1359b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f15c80c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hongkong_Post_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/09789157.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ISRG_Root_X2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/18856ac4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e09d511.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/IdenTrust_Commercial_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cf701eeb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d06393bb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/IdenTrust_Public_Sector_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/10531352.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Izenpe.com.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SecureTrust_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0ed035a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsec_e-Szigno_Root_CA_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8160b96c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e8651083.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2c63f966.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_RootCA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsoft_ECC_Root_Certificate_Authority_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d89cda1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/01419da9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_TLS_RSA_Root_CA_2022.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b7a5b843.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsoft_RSA_Root_Certificate_Authority_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bf53fb88.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9591a472.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3afde786.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SwissSign_Gold_CA_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/NAVER_Global_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3fb36b73.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d39b0a2c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a89d74c2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cd58d51e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b7db1890.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/NetLock_Arany__Class_Gold__F__tan__s__tv__ny.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/988a38cb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/60afe812.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f39fc864.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5443e9e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/OISTE_WISeKey_Global_Root_GB_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e73d606e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dfc0fe80.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b66938e9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e1eab7c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/OISTE_WISeKey_Global_Root_GC_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/773e07ad.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c899c73.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d59297b8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ddcda989.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_1_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/749e9e03.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/52b525c7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_RootCA3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d7e8dc79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a819ef2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/08063a00.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b483515.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_2_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/064e0aa9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1f58a078.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6f7454b3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7fa05551.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/76faf6c0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9339512a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f387163d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee37c333.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_3_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e18bfb83.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e442e424.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fe8a2cd8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/23f4c490.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5cd81ad7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_EV_Root_Certification_Authority_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f0c70a8d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7892ad52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SZAFIR_ROOT_CA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4f316efb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_EV_Root_Certification_Authority_RSA_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/06dc52d5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/583d0756.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Sectigo_Public_Server_Authentication_Root_R46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_Root_Certification_Authority_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0bf05006.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/88950faa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9046744a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c860d51.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_Root_Certification_Authority_RSA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6fa5da56.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/33ee480d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Secure_Global_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/63a2c897.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_TLS_ECC_Root_CA_2022.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bdacca6f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ff34af3f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dbff3a01.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_ECC_RootCA1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_Root_CA_-_C1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Class_2_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/406c9bb1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_ECC_Root_CA_-_C3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Services_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SwissSign_Silver_CA_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/99e1b953.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/T-TeleSec_GlobalRoot_Class_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/vTrus_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/T-TeleSec_GlobalRoot_Class_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/14bc7599.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TUBITAK_Kamu_SM_SSL_Kok_Sertifikasi_-_Surum_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TWCA_Global_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a3adc42.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TWCA_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f459871d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telekom_Security_TLS_ECC_Root_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_Root_CA_-_G1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telekom_Security_TLS_RSA_Root_2023.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TeliaSonera_Root_CA_v1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telia_Root_CA_v2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8f103249.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f058632f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca-certificates.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TrustAsia_Global_Root_CA_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9bf03295.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/98aaf404.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TrustAsia_Global_Root_CA_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1cef98f5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/073bfcc5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2923b3f9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f249de83.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/edcbddb5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_ECC_Root_CA_-_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_ECC_P256_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9b5697b0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1ae85e5e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b74d2bd5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_ECC_P384_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d887a5bb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9aef356c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TunTrust_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fd64f3fc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e13665f9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/UCA_Extended_Validation_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0f5dc4f3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/da7377f6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/UCA_Global_G2_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c01eb047.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/304d27c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ed858448.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/USERTrust_ECC_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f30dd6ad.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/04f60c28.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/vTrus_ECC_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/USERTrust_RSA_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fc5a8f99.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/35105088.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee532fd5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/XRamp_Global_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/706f604c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/76579174.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/certSIGN_ROOT_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d86cdd1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/882de061.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/certSIGN_ROOT_CA_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f618aec.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a9d40e02.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e-Szigno_Root_CA_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e868b802.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/83e9984f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ePKI_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca6e4ad9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9d6523ce.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4b718d9b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/869fbf79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/containers/registry/f8d22bdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/6e8bbfac not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/54dd7996 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/a4f1bb05 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/207129da not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/c1df39e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/15b8f1cd not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3523263858 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3523263858/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..2025_02_23_05_27_49.3256605594 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..2025_02_23_05_27_49.3256605594/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/77bd6913 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/2382c1b1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/704ce128 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/70d16fe0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/bfb95535 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/57a8e8e2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3413793711 not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3413793711/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/1b9d3e5e not reset as customized by admin to system_u:object_r:container_file_t:s0:c107,c917 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/fddb173c not reset as customized by admin to system_u:object_r:container_file_t:s0:c202,c983 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/95d3c6c4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/bfb5fff5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/2aef40aa not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/c0391cad not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/1119e69d not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/660608b4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/8220bd53 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/cluster-policy-controller/85f99d5c not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/cluster-policy-controller/4b0225f6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-cert-syncer/9c2a3394 not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-cert-syncer/e820b243 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-recovery-controller/1ca52ea0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-recovery-controller/e6988e45 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..2025_02_24_06_09_21.2517297950 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..2025_02_24_06_09_21.2517297950/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/6655f00b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/98bc3986 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/08e3458a not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/2a191cb0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/6c4eeefb not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/f61a549c not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/hostpath-provisioner/24891863 not reset as customized by admin to system_u:object_r:container_file_t:s0:c37,c572 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/hostpath-provisioner/fbdfd89c not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/liveness-probe/9b63b3bc not reset as customized by admin to system_u:object_r:container_file_t:s0:c37,c572 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/liveness-probe/8acde6d6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/node-driver-registrar/59ecbba3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/csi-provisioner/685d4be3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/openshift-route-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/openshift-route-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/openshift-route-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/openshift-route-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.2950937851 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.2950937851/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/containers/route-controller-manager/feaea55e not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abinitio-runtime-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abinitio-runtime-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/accuknox-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/accuknox-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aci-containers-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aci-containers-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airlock-microgateway not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airlock-microgateway/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ako-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ako-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloy not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloy/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anchore-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anchore-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-cloud-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-cloud-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-dcap-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-dcap-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cfm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cfm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium-enterprise not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium-enterprise/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloud-native-postgresql not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloud-native-postgresql/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudera-streams-messaging-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudera-streams-messaging-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudnative-pg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudnative-pg/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cnfv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cnfv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/conjur-follower-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/conjur-follower-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/coroot-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/coroot-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cte-k8s-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cte-k8s-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-deploy-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-deploy-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-release-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-release-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edb-hcp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edb-hcp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-eck-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-eck-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/federatorai-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/federatorai-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fujitsu-enterprise-postgres-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fujitsu-enterprise-postgres-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/function-mesh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/function-mesh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/harness-gitops-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/harness-gitops-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hcp-terraform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hcp-terraform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hpe-ezmeral-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hpe-ezmeral-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-application-gateway-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-application-gateway-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-directory-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-directory-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-dr-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-dr-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-licensing-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-licensing-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-sds-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-sds-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infrastructure-asset-orchestrator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infrastructure-asset-orchestrator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-device-plugins-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-device-plugins-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-kubernetes-power-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-kubernetes-power-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-openshift-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-openshift-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8s-triliovault not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8s-triliovault/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-ati-updates not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-ati-updates/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-framework not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-framework/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-ingress not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-ingress/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-licensing not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-licensing/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-sso not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-sso/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-load-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-load-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-loadcore-agents not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-loadcore-agents/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nats-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nats-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nimbusmosaic-dusim not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nimbusmosaic-dusim/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-rest-api-browser-v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-rest-api-browser-v1/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-appsec not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-appsec/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-db/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-diagnostics not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-diagnostics/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-logging not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-logging/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-migration not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-migration/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-msg-broker not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-msg-broker/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-notifications not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-notifications/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-stats-dashboards not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-stats-dashboards/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-storage not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-storage/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-test-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-test-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-ui not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-ui/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-websocket-service not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-websocket-service/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kong-gateway-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kong-gateway-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubearmor-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubearmor-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lenovo-locd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lenovo-locd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memcached-operator-ogaye not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memcached-operator-ogaye/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memory-machine-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memory-machine-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-enterprise not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-enterprise/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netapp-spark-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netapp-spark-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-adm-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-adm-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-repository-ha-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-repository-ha-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nginx-ingress-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nginx-ingress-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nim-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nim-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxiq-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxiq-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxrm-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxrm-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odigos-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odigos-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/open-liberty-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/open-liberty-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftartifactoryha-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftartifactoryha-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftxray-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftxray-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/operator-certification-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/operator-certification-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pmem-csi-operator-os not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pmem-csi-operator-os/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-component-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-component-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-fabric-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-fabric-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sanstoragecsi-operator-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sanstoragecsi-operator-bundle/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/smilecdr-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/smilecdr-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sriov-fec not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sriov-fec/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-commons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-commons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-zookeeper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-zookeeper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-tsc-client-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-tsc-client-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tawon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tawon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tigera-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tigera-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-secrets-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-secrets-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vcp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vcp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/webotx-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/webotx-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:07 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/63709497 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/d966b7fd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/f5773757 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/81c9edb9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/57bf57ee not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/86f5e6aa not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/0aabe31d not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/d2af85c2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/09d157d9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acm-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acm-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acmpca-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acmpca-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigateway-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigateway-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigatewayv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigatewayv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-applicationautoscaling-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-applicationautoscaling-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-athena-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-athena-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudfront-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudfront-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudtrail-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudtrail-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatch-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatch-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatchlogs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatchlogs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-documentdb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-documentdb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-dynamodb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-dynamodb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ec2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ec2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecr-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecr-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-efs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-efs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eks-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eks-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elasticache-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elasticache-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elbv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elbv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-emrcontainers-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-emrcontainers-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eventbridge-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eventbridge-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-iam-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-iam-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kafka-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kafka-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-keyspaces-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-keyspaces-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kinesis-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kinesis-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kms-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kms-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-lambda-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-lambda-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-memorydb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-memorydb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-mq-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-mq-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-networkfirewall-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-networkfirewall-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-opensearchservice-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-opensearchservice-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-organizations-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-organizations-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-pipes-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-pipes-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-prometheusservice-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-prometheusservice-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-rds-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-rds-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-recyclebin-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-recyclebin-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53resolver-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53resolver-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-s3-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-s3-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sagemaker-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sagemaker-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-secretsmanager-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-secretsmanager-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ses-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ses-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sfn-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sfn-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sns-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sns-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sqs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sqs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ssm-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ssm-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-wafv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-wafv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airflow-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airflow-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloydb-omni-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloydb-omni-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alvearie-imaging-ingestion not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alvearie-imaging-ingestion/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amd-gpu-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amd-gpu-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/analytics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/analytics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/annotationlab not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/annotationlab/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-api-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-api-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apimatic-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apimatic-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/application-services-metering-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/application-services-metering-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/argocd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/argocd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/assisted-service-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/assisted-service-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/automotive-infra not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/automotive-infra/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-efs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-efs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/awss3-operator-registry not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/awss3-operator-registry/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/azure-service-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/azure-service-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/beegfs-csi-driver-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/beegfs-csi-driver-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-k not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-k/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-karavan-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-karavan-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator-community not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator-community/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-utils-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-utils-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-aas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-aas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-impairment-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-impairment-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/codeflare-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/codeflare-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-kubevirt-hyperconverged not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-kubevirt-hyperconverged/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-trivy-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-trivy-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-windows-machine-config-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-windows-machine-config-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/customized-user-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/customized-user-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cxl-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cxl-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dapr-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dapr-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datatrucker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datatrucker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dbaas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dbaas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/debezium-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/debezium-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/deployment-validation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/deployment-validation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devopsinabox not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devopsinabox/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-amlen-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-amlen-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-che not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-che/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ecr-secret-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ecr-secret-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edp-keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edp-keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/egressip-ipam-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/egressip-ipam-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ember-csi-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ember-csi-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/etcd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/etcd/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eventing-kogito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eventing-kogito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-secrets-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-secrets-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flink-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flink-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8gb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8gb/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fossul-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fossul-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/github-arc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/github-arc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitops-primer not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitops-primer/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitwebhook-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitwebhook-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/global-load-balancer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/global-load-balancer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/grafana-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/grafana-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/group-sync-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/group-sync-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hawtio-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hawtio-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hedvig-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hedvig-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hive-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hive-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/horreum-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/horreum-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hyperfoil-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hyperfoil-bundle/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator-community not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator-community/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-spectrum-scale-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-spectrum-scale-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibmcloud-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibmcloud-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infinispan not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infinispan/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/integrity-shield-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/integrity-shield-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ipfs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ipfs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/istio-workspace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/istio-workspace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kaoto-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kaoto-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keda not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keda/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keepalived-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keepalived-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-permissions-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-permissions-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/klusterlet not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/klusterlet/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/koku-metrics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/koku-metrics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/konveyor-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/konveyor-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/korrel8r not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/korrel8r/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kuadrant-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kuadrant-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kube-green not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kube-green/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubernetes-imagepuller-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubernetes-imagepuller-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/l5-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/l5-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/layer7-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/layer7-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lbconfig-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lbconfig-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lib-bucket-provisioner not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lib-bucket-provisioner/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/limitador-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/limitador-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logging-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logging-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mariadb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mariadb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marin3r not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marin3r/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mercury-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mercury-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/microcks not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/microcks/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/move2kube-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/move2kube-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multi-nic-cni-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multi-nic-cni-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-global-hub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-global-hub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-operators-subscription not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-operators-subscription/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/must-gather-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/must-gather-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/namespace-configuration-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/namespace-configuration-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ncn-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ncn-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ndmspc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ndmspc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator-m88i not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator-m88i/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nfs-provisioner-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nfs-provisioner-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nlp-server not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nlp-server/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-discovery-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-discovery-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nsm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nsm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oadp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oadp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oci-ccm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oci-ccm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odoo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odoo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opendatahub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opendatahub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openebs not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openebs/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-nfd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-nfd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-node-upgrade-mutex-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-node-upgrade-mutex-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-qiskit-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-qiskit-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patch-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patch-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patterns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patterns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pelorus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pelorus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/percona-xtradb-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/percona-xtradb-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-essentials not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-essentials/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/postgresql not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/postgresql/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/proactive-node-scaling-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/proactive-node-scaling-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/project-quay not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/project-quay/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus-exporter-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus-exporter-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pulp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pulp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-messaging-topology-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-messaging-topology-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/reportportal-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/reportportal-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/resource-locker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/resource-locker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhoas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhoas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ripsaw not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ripsaw/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sailoperator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sailoperator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-commerce-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-commerce-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-data-intelligence-observer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-data-intelligence-observer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-hana-express-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-hana-express-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-binding-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-binding-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/shipwright-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/shipwright-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sigstore-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sigstore-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snapscheduler not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snapscheduler/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snyk-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snyk-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/socmmd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/socmmd/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonar-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonar-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosivio not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosivio/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonataflow-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonataflow-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosreport-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosreport-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/spark-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/spark-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/special-resource-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/special-resource-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/strimzi-kafka-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/strimzi-kafka-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/syndesis not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/syndesis/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tagger not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tagger/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tf-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tf-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tidb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tidb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trident-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trident-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustify-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustify-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ucs-ci-solutions-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ucs-ci-solutions-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/universal-crossplane not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/universal-crossplane/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/varnish-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/varnish-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-config-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-config-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/verticadb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/verticadb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volume-expander-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volume-expander-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/wandb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/wandb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/windup-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/windup-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yaks not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yaks/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/c0fe7256 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/c30319e4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/e6b1dd45 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/2bb643f0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/920de426 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/70fa1e87 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/a1c12a2f not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/9442e6c7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/5b45ec72 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abot-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abot-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/entando-k8s-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/entando-k8s-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-paygo-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-paygo-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-term-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-term-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/linstor-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/linstor-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-deploy-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-deploy-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-paygo-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-paygo-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vfunction-server-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vfunction-server-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yugabyte-platform-operator-bundle-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yugabyte-platform-operator-bundle-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/3c9f3a59 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/1091c11b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/9a6821c6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/ec0c35e2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/517f37e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/6214fe78 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/ba189c8b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/351e4f31 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/c0f219ff not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/8069f607 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/559c3d82 not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/605ad488 not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/148df488 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/3bf6dcb4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/022a2feb not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/938c3924 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/729fe23e not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/1fd5cbd4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/a96697e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/e155ddca not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/10dd0e0f not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..2025_02_24_06_09_35.3018472960 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..2025_02_24_06_09_35.3018472960/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..2025_02_24_06_09_35.4262376737 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..2025_02_24_06_09_35.4262376737/audit.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/audit.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..2025_02_24_06_09_35.2630275752 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..2025_02_24_06_09_35.2630275752/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..2025_02_24_06_09_35.2376963788 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..2025_02_24_06_09_35.2376963788/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/containers/oauth-openshift/6f2c8392 not reset as customized by admin to system_u:object_r:container_file_t:s0:c267,c588 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/containers/oauth-openshift/bd241ad9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/plugins not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/plugins/csi-hostpath not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/plugins/csi-hostpath/csi.sock not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/plugins/kubernetes.io not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/plugins/kubernetes.io/csi not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983 not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/vol_data.json not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 28 09:57:08 crc restorecon[4771]: /var/lib/kubelet/plugins_registry not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 28 09:57:08 crc restorecon[4771]: Relabeled /var/usrlocal/bin/kubenswrapper from system_u:object_r:bin_t:s0 to system_u:object_r:kubelet_exec_t:s0 Nov 28 09:57:08 crc kubenswrapper[4838]: Flag --container-runtime-endpoint has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 28 09:57:08 crc kubenswrapper[4838]: Flag --minimum-container-ttl-duration has been deprecated, Use --eviction-hard or --eviction-soft instead. Will be removed in a future version. Nov 28 09:57:08 crc kubenswrapper[4838]: Flag --volume-plugin-dir has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 28 09:57:08 crc kubenswrapper[4838]: Flag --register-with-taints has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 28 09:57:08 crc kubenswrapper[4838]: Flag --pod-infra-container-image has been deprecated, will be removed in a future release. Image garbage collector will get sandbox image information from CRI. Nov 28 09:57:08 crc kubenswrapper[4838]: Flag --system-reserved has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.396996 4838 server.go:211] "--pod-infra-container-image will not be pruned by the image garbage collector in kubelet and should also be set in the remote runtime" Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.400102 4838 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.400126 4838 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.400132 4838 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.400139 4838 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.400144 4838 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.400149 4838 feature_gate.go:330] unrecognized feature gate: Example Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.400154 4838 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.400158 4838 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.400162 4838 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.400167 4838 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.400174 4838 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.400181 4838 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.400188 4838 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.400194 4838 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.400201 4838 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.400207 4838 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.400214 4838 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.400220 4838 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.400228 4838 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.400234 4838 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.400241 4838 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.400247 4838 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.400252 4838 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.400258 4838 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.400263 4838 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.400268 4838 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.400273 4838 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.400279 4838 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.400291 4838 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.400296 4838 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.400301 4838 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.400305 4838 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.400310 4838 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.400316 4838 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.400321 4838 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.400326 4838 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.400337 4838 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.400343 4838 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.400348 4838 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.400352 4838 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.400357 4838 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.400361 4838 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.400368 4838 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.400375 4838 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.400380 4838 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.400385 4838 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.400389 4838 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.400393 4838 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.400398 4838 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.400402 4838 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.400406 4838 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.400411 4838 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.400415 4838 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.400420 4838 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.400424 4838 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.400428 4838 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.400432 4838 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.400437 4838 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.400441 4838 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.400448 4838 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.400453 4838 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.400457 4838 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.400463 4838 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.400467 4838 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.400471 4838 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.400475 4838 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.400480 4838 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.400484 4838 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.400489 4838 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.400495 4838 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.400500 4838 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.400740 4838 flags.go:64] FLAG: --address="0.0.0.0" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.400754 4838 flags.go:64] FLAG: --allowed-unsafe-sysctls="[]" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.400763 4838 flags.go:64] FLAG: --anonymous-auth="true" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.400770 4838 flags.go:64] FLAG: --application-metrics-count-limit="100" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.400777 4838 flags.go:64] FLAG: --authentication-token-webhook="false" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.400782 4838 flags.go:64] FLAG: --authentication-token-webhook-cache-ttl="2m0s" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.400789 4838 flags.go:64] FLAG: --authorization-mode="AlwaysAllow" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.400796 4838 flags.go:64] FLAG: --authorization-webhook-cache-authorized-ttl="5m0s" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.400801 4838 flags.go:64] FLAG: --authorization-webhook-cache-unauthorized-ttl="30s" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.400806 4838 flags.go:64] FLAG: --boot-id-file="/proc/sys/kernel/random/boot_id" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.400812 4838 flags.go:64] FLAG: --bootstrap-kubeconfig="/etc/kubernetes/kubeconfig" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.400817 4838 flags.go:64] FLAG: --cert-dir="/var/lib/kubelet/pki" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.400822 4838 flags.go:64] FLAG: --cgroup-driver="cgroupfs" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.400827 4838 flags.go:64] FLAG: --cgroup-root="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.400833 4838 flags.go:64] FLAG: --cgroups-per-qos="true" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.400838 4838 flags.go:64] FLAG: --client-ca-file="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.400843 4838 flags.go:64] FLAG: --cloud-config="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.400847 4838 flags.go:64] FLAG: --cloud-provider="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.400852 4838 flags.go:64] FLAG: --cluster-dns="[]" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.400860 4838 flags.go:64] FLAG: --cluster-domain="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.400864 4838 flags.go:64] FLAG: --config="/etc/kubernetes/kubelet.conf" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.400870 4838 flags.go:64] FLAG: --config-dir="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.400875 4838 flags.go:64] FLAG: --container-hints="/etc/cadvisor/container_hints.json" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.400882 4838 flags.go:64] FLAG: --container-log-max-files="5" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.400889 4838 flags.go:64] FLAG: --container-log-max-size="10Mi" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.400894 4838 flags.go:64] FLAG: --container-runtime-endpoint="/var/run/crio/crio.sock" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.400899 4838 flags.go:64] FLAG: --containerd="/run/containerd/containerd.sock" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.400904 4838 flags.go:64] FLAG: --containerd-namespace="k8s.io" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.400910 4838 flags.go:64] FLAG: --contention-profiling="false" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.400915 4838 flags.go:64] FLAG: --cpu-cfs-quota="true" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.400920 4838 flags.go:64] FLAG: --cpu-cfs-quota-period="100ms" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.400925 4838 flags.go:64] FLAG: --cpu-manager-policy="none" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.400930 4838 flags.go:64] FLAG: --cpu-manager-policy-options="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.400936 4838 flags.go:64] FLAG: --cpu-manager-reconcile-period="10s" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.400941 4838 flags.go:64] FLAG: --enable-controller-attach-detach="true" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.400947 4838 flags.go:64] FLAG: --enable-debugging-handlers="true" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.400951 4838 flags.go:64] FLAG: --enable-load-reader="false" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.400956 4838 flags.go:64] FLAG: --enable-server="true" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.400961 4838 flags.go:64] FLAG: --enforce-node-allocatable="[pods]" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.400968 4838 flags.go:64] FLAG: --event-burst="100" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.400973 4838 flags.go:64] FLAG: --event-qps="50" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.400978 4838 flags.go:64] FLAG: --event-storage-age-limit="default=0" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.400983 4838 flags.go:64] FLAG: --event-storage-event-limit="default=0" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.400987 4838 flags.go:64] FLAG: --eviction-hard="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.400994 4838 flags.go:64] FLAG: --eviction-max-pod-grace-period="0" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.400998 4838 flags.go:64] FLAG: --eviction-minimum-reclaim="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.401003 4838 flags.go:64] FLAG: --eviction-pressure-transition-period="5m0s" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.401008 4838 flags.go:64] FLAG: --eviction-soft="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.401013 4838 flags.go:64] FLAG: --eviction-soft-grace-period="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.401018 4838 flags.go:64] FLAG: --exit-on-lock-contention="false" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.401023 4838 flags.go:64] FLAG: --experimental-allocatable-ignore-eviction="false" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.401027 4838 flags.go:64] FLAG: --experimental-mounter-path="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.401032 4838 flags.go:64] FLAG: --fail-cgroupv1="false" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.401037 4838 flags.go:64] FLAG: --fail-swap-on="true" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.401042 4838 flags.go:64] FLAG: --feature-gates="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.401048 4838 flags.go:64] FLAG: --file-check-frequency="20s" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.401053 4838 flags.go:64] FLAG: --global-housekeeping-interval="1m0s" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.401058 4838 flags.go:64] FLAG: --hairpin-mode="promiscuous-bridge" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.401062 4838 flags.go:64] FLAG: --healthz-bind-address="127.0.0.1" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.401067 4838 flags.go:64] FLAG: --healthz-port="10248" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.401073 4838 flags.go:64] FLAG: --help="false" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.401077 4838 flags.go:64] FLAG: --hostname-override="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.401084 4838 flags.go:64] FLAG: --housekeeping-interval="10s" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.401089 4838 flags.go:64] FLAG: --http-check-frequency="20s" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.401094 4838 flags.go:64] FLAG: --image-credential-provider-bin-dir="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.401099 4838 flags.go:64] FLAG: --image-credential-provider-config="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.401104 4838 flags.go:64] FLAG: --image-gc-high-threshold="85" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.401110 4838 flags.go:64] FLAG: --image-gc-low-threshold="80" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.401115 4838 flags.go:64] FLAG: --image-service-endpoint="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.401120 4838 flags.go:64] FLAG: --kernel-memcg-notification="false" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.401125 4838 flags.go:64] FLAG: --kube-api-burst="100" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.401130 4838 flags.go:64] FLAG: --kube-api-content-type="application/vnd.kubernetes.protobuf" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.401136 4838 flags.go:64] FLAG: --kube-api-qps="50" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.401141 4838 flags.go:64] FLAG: --kube-reserved="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.401147 4838 flags.go:64] FLAG: --kube-reserved-cgroup="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.401151 4838 flags.go:64] FLAG: --kubeconfig="/var/lib/kubelet/kubeconfig" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.401156 4838 flags.go:64] FLAG: --kubelet-cgroups="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.401161 4838 flags.go:64] FLAG: --local-storage-capacity-isolation="true" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.401166 4838 flags.go:64] FLAG: --lock-file="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.401170 4838 flags.go:64] FLAG: --log-cadvisor-usage="false" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.401176 4838 flags.go:64] FLAG: --log-flush-frequency="5s" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.401181 4838 flags.go:64] FLAG: --log-json-info-buffer-size="0" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.401189 4838 flags.go:64] FLAG: --log-json-split-stream="false" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.401194 4838 flags.go:64] FLAG: --log-text-info-buffer-size="0" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.401198 4838 flags.go:64] FLAG: --log-text-split-stream="false" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.401203 4838 flags.go:64] FLAG: --logging-format="text" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.401208 4838 flags.go:64] FLAG: --machine-id-file="/etc/machine-id,/var/lib/dbus/machine-id" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.401214 4838 flags.go:64] FLAG: --make-iptables-util-chains="true" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.401218 4838 flags.go:64] FLAG: --manifest-url="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.401223 4838 flags.go:64] FLAG: --manifest-url-header="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.401230 4838 flags.go:64] FLAG: --max-housekeeping-interval="15s" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.401235 4838 flags.go:64] FLAG: --max-open-files="1000000" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.401241 4838 flags.go:64] FLAG: --max-pods="110" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.401246 4838 flags.go:64] FLAG: --maximum-dead-containers="-1" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.401251 4838 flags.go:64] FLAG: --maximum-dead-containers-per-container="1" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.401257 4838 flags.go:64] FLAG: --memory-manager-policy="None" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.401261 4838 flags.go:64] FLAG: --minimum-container-ttl-duration="6m0s" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.401267 4838 flags.go:64] FLAG: --minimum-image-ttl-duration="2m0s" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.401272 4838 flags.go:64] FLAG: --node-ip="192.168.126.11" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.401279 4838 flags.go:64] FLAG: --node-labels="node-role.kubernetes.io/control-plane=,node-role.kubernetes.io/master=,node.openshift.io/os_id=rhcos" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.401291 4838 flags.go:64] FLAG: --node-status-max-images="50" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.401296 4838 flags.go:64] FLAG: --node-status-update-frequency="10s" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.401302 4838 flags.go:64] FLAG: --oom-score-adj="-999" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.401307 4838 flags.go:64] FLAG: --pod-cidr="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.401312 4838 flags.go:64] FLAG: --pod-infra-container-image="quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:33549946e22a9ffa738fd94b1345f90921bc8f92fa6137784cb33c77ad806f9d" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.401322 4838 flags.go:64] FLAG: --pod-manifest-path="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.401327 4838 flags.go:64] FLAG: --pod-max-pids="-1" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.401332 4838 flags.go:64] FLAG: --pods-per-core="0" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.401336 4838 flags.go:64] FLAG: --port="10250" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.401343 4838 flags.go:64] FLAG: --protect-kernel-defaults="false" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.401348 4838 flags.go:64] FLAG: --provider-id="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.401353 4838 flags.go:64] FLAG: --qos-reserved="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.401358 4838 flags.go:64] FLAG: --read-only-port="10255" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.401363 4838 flags.go:64] FLAG: --register-node="true" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.401368 4838 flags.go:64] FLAG: --register-schedulable="true" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.401373 4838 flags.go:64] FLAG: --register-with-taints="node-role.kubernetes.io/master=:NoSchedule" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.401381 4838 flags.go:64] FLAG: --registry-burst="10" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.401386 4838 flags.go:64] FLAG: --registry-qps="5" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.401391 4838 flags.go:64] FLAG: --reserved-cpus="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.401395 4838 flags.go:64] FLAG: --reserved-memory="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.401401 4838 flags.go:64] FLAG: --resolv-conf="/etc/resolv.conf" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.401406 4838 flags.go:64] FLAG: --root-dir="/var/lib/kubelet" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.401411 4838 flags.go:64] FLAG: --rotate-certificates="false" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.401416 4838 flags.go:64] FLAG: --rotate-server-certificates="false" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.401421 4838 flags.go:64] FLAG: --runonce="false" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.401426 4838 flags.go:64] FLAG: --runtime-cgroups="/system.slice/crio.service" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.401431 4838 flags.go:64] FLAG: --runtime-request-timeout="2m0s" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.401437 4838 flags.go:64] FLAG: --seccomp-default="false" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.401442 4838 flags.go:64] FLAG: --serialize-image-pulls="true" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.401447 4838 flags.go:64] FLAG: --storage-driver-buffer-duration="1m0s" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.401452 4838 flags.go:64] FLAG: --storage-driver-db="cadvisor" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.401462 4838 flags.go:64] FLAG: --storage-driver-host="localhost:8086" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.401467 4838 flags.go:64] FLAG: --storage-driver-password="root" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.401472 4838 flags.go:64] FLAG: --storage-driver-secure="false" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.401477 4838 flags.go:64] FLAG: --storage-driver-table="stats" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.401482 4838 flags.go:64] FLAG: --storage-driver-user="root" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.401487 4838 flags.go:64] FLAG: --streaming-connection-idle-timeout="4h0m0s" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.401492 4838 flags.go:64] FLAG: --sync-frequency="1m0s" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.401498 4838 flags.go:64] FLAG: --system-cgroups="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.401503 4838 flags.go:64] FLAG: --system-reserved="cpu=200m,ephemeral-storage=350Mi,memory=350Mi" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.401512 4838 flags.go:64] FLAG: --system-reserved-cgroup="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.401517 4838 flags.go:64] FLAG: --tls-cert-file="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.401523 4838 flags.go:64] FLAG: --tls-cipher-suites="[]" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.401531 4838 flags.go:64] FLAG: --tls-min-version="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.401536 4838 flags.go:64] FLAG: --tls-private-key-file="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.401543 4838 flags.go:64] FLAG: --topology-manager-policy="none" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.401549 4838 flags.go:64] FLAG: --topology-manager-policy-options="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.401555 4838 flags.go:64] FLAG: --topology-manager-scope="container" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.401560 4838 flags.go:64] FLAG: --v="2" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.401568 4838 flags.go:64] FLAG: --version="false" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.401575 4838 flags.go:64] FLAG: --vmodule="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.401581 4838 flags.go:64] FLAG: --volume-plugin-dir="/etc/kubernetes/kubelet-plugins/volume/exec" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.401587 4838 flags.go:64] FLAG: --volume-stats-agg-period="1m0s" Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.401698 4838 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.401706 4838 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.401735 4838 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.401742 4838 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.401748 4838 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.401753 4838 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.401759 4838 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.401764 4838 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.401770 4838 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.401775 4838 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.401781 4838 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.401786 4838 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.401790 4838 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.401794 4838 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.401797 4838 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.401801 4838 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.401805 4838 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.401808 4838 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.401811 4838 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.401815 4838 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.401818 4838 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.401822 4838 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.401825 4838 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.401829 4838 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.401832 4838 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.401836 4838 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.401839 4838 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.401843 4838 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.401848 4838 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.401852 4838 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.401856 4838 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.401861 4838 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.401865 4838 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.401868 4838 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.401873 4838 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.401877 4838 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.401880 4838 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.401885 4838 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.401891 4838 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.401895 4838 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.401899 4838 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.401903 4838 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.401909 4838 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.401913 4838 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.401917 4838 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.401921 4838 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.401924 4838 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.401928 4838 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.401933 4838 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.401937 4838 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.401942 4838 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.401947 4838 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.401951 4838 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.401955 4838 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.401959 4838 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.401962 4838 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.401965 4838 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.401969 4838 feature_gate.go:330] unrecognized feature gate: Example Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.401973 4838 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.401976 4838 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.401980 4838 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.401983 4838 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.401986 4838 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.401990 4838 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.401994 4838 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.401997 4838 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.402001 4838 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.402005 4838 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.402008 4838 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.402011 4838 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.402015 4838 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.402029 4838 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.411929 4838 server.go:491] "Kubelet version" kubeletVersion="v1.31.5" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.411966 4838 server.go:493] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.412045 4838 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.412055 4838 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.412061 4838 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.412067 4838 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.412073 4838 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.412081 4838 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.412085 4838 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.412090 4838 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.412095 4838 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.412100 4838 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.412104 4838 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.412109 4838 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.412113 4838 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.412117 4838 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.412121 4838 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.412125 4838 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.412129 4838 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.412135 4838 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.412139 4838 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.412145 4838 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.412149 4838 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.412154 4838 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.412158 4838 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.412163 4838 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.412167 4838 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.412172 4838 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.412176 4838 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.412180 4838 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.412184 4838 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.412188 4838 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.412193 4838 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.412199 4838 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.412204 4838 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.412209 4838 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.412213 4838 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.412217 4838 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.412221 4838 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.412225 4838 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.412230 4838 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.412234 4838 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.412238 4838 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.412243 4838 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.412247 4838 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.412252 4838 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.412258 4838 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.412266 4838 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.412271 4838 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.412276 4838 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.412281 4838 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.412286 4838 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.412293 4838 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.412299 4838 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.412304 4838 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.412310 4838 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.412316 4838 feature_gate.go:330] unrecognized feature gate: Example Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.412323 4838 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.412328 4838 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.412332 4838 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.412337 4838 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.412342 4838 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.412346 4838 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.412350 4838 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.412355 4838 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.412361 4838 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.412366 4838 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.412371 4838 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.412375 4838 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.412380 4838 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.412384 4838 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.412390 4838 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.412396 4838 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.412405 4838 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.412537 4838 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.412546 4838 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.412551 4838 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.412556 4838 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.412560 4838 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.412565 4838 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.412569 4838 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.412576 4838 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.412583 4838 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.412588 4838 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.412593 4838 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.412598 4838 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.412603 4838 feature_gate.go:330] unrecognized feature gate: Example Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.412608 4838 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.412613 4838 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.412618 4838 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.412622 4838 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.412626 4838 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.412631 4838 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.412636 4838 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.412641 4838 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.412645 4838 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.412649 4838 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.412657 4838 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.412661 4838 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.412666 4838 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.412670 4838 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.412674 4838 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.412678 4838 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.412682 4838 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.412686 4838 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.412690 4838 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.412694 4838 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.412698 4838 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.412702 4838 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.412706 4838 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.412728 4838 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.412733 4838 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.412737 4838 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.412742 4838 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.412746 4838 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.412750 4838 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.412755 4838 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.412758 4838 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.412762 4838 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.412766 4838 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.412771 4838 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.412774 4838 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.412778 4838 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.412783 4838 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.412789 4838 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.412794 4838 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.412799 4838 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.412805 4838 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.412811 4838 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.412817 4838 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.412822 4838 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.412826 4838 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.412831 4838 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.412834 4838 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.412838 4838 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.412843 4838 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.412846 4838 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.412851 4838 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.412854 4838 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.412859 4838 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.412863 4838 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.412867 4838 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.412871 4838 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.412875 4838 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.412878 4838 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.412886 4838 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.413257 4838 server.go:940] "Client rotation is on, will bootstrap in background" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.416166 4838 bootstrap.go:85] "Current kubeconfig file contents are still valid, no bootstrap necessary" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.416251 4838 certificate_store.go:130] Loading cert/key pair from "/var/lib/kubelet/pki/kubelet-client-current.pem". Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.416798 4838 server.go:997] "Starting client certificate rotation" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.416823 4838 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate rotation is enabled Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.417198 4838 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate expiration is 2026-02-24 05:52:08 +0000 UTC, rotation deadline is 2025-12-21 04:03:17.424941262 +0000 UTC Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.417332 4838 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Waiting 546h6m9.007613555s for next certificate rotation Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.422284 4838 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.423640 4838 dynamic_cafile_content.go:161] "Starting controller" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.436103 4838 log.go:25] "Validated CRI v1 runtime API" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.448420 4838 log.go:25] "Validated CRI v1 image API" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.450093 4838 server.go:1437] "Using cgroup driver setting received from the CRI runtime" cgroupDriver="systemd" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.453056 4838 fs.go:133] Filesystem UUIDs: map[0b076daa-c26a-46d2-b3a6-72a8dbc6e257:/dev/vda4 2025-11-28-09-52-25-00:/dev/sr0 7B77-95E7:/dev/vda2 de0497b0-db1b-465a-b278-03db02455c71:/dev/vda3] Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.453116 4838 fs.go:134] Filesystem partitions: map[/dev/shm:{mountpoint:/dev/shm major:0 minor:22 fsType:tmpfs blockSize:0} /dev/vda3:{mountpoint:/boot major:252 minor:3 fsType:ext4 blockSize:0} /dev/vda4:{mountpoint:/var major:252 minor:4 fsType:xfs blockSize:0} /run:{mountpoint:/run major:0 minor:24 fsType:tmpfs blockSize:0} /run/user/1000:{mountpoint:/run/user/1000 major:0 minor:42 fsType:tmpfs blockSize:0} /tmp:{mountpoint:/tmp major:0 minor:30 fsType:tmpfs blockSize:0} /var/lib/etcd:{mountpoint:/var/lib/etcd major:0 minor:43 fsType:tmpfs blockSize:0}] Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.478933 4838 manager.go:217] Machine: {Timestamp:2025-11-28 09:57:08.476751299 +0000 UTC m=+0.175725549 CPUVendorID:AuthenticAMD NumCores:12 NumPhysicalCores:1 NumSockets:12 CpuFrequency:2799998 MemoryCapacity:33654124544 SwapCapacity:0 MemoryByType:map[] NVMInfo:{MemoryModeCapacity:0 AppDirectModeCapacity:0 AvgPowerBudget:0} HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] MachineID:21801e6708c44f15b81395eb736a7cec SystemUUID:e14391b4-beaf-4b9f-9de4-e3bbde3f3327 BootID:2d884793-8973-45d8-9335-b721f6accbac Filesystems:[{Device:/run/user/1000 DeviceMajor:0 DeviceMinor:42 Capacity:3365408768 Type:vfs Inodes:821633 HasInodes:true} {Device:/var/lib/etcd DeviceMajor:0 DeviceMinor:43 Capacity:1073741824 Type:vfs Inodes:4108169 HasInodes:true} {Device:/dev/shm DeviceMajor:0 DeviceMinor:22 Capacity:16827060224 Type:vfs Inodes:4108169 HasInodes:true} {Device:/run DeviceMajor:0 DeviceMinor:24 Capacity:6730825728 Type:vfs Inodes:819200 HasInodes:true} {Device:/dev/vda4 DeviceMajor:252 DeviceMinor:4 Capacity:85292941312 Type:vfs Inodes:41679680 HasInodes:true} {Device:/tmp DeviceMajor:0 DeviceMinor:30 Capacity:16827064320 Type:vfs Inodes:1048576 HasInodes:true} {Device:/dev/vda3 DeviceMajor:252 DeviceMinor:3 Capacity:366869504 Type:vfs Inodes:98304 HasInodes:true}] DiskMap:map[252:0:{Name:vda Major:252 Minor:0 Size:214748364800 Scheduler:none}] NetworkDevices:[{Name:br-ex MacAddress:fa:16:3e:b8:4a:fa Speed:0 Mtu:1500} {Name:br-int MacAddress:d6:39:55:2e:22:71 Speed:0 Mtu:1400} {Name:ens3 MacAddress:fa:16:3e:b8:4a:fa Speed:-1 Mtu:1500} {Name:ens7 MacAddress:fa:16:3e:57:9e:6c Speed:-1 Mtu:1500} {Name:ens7.20 MacAddress:52:54:00:65:c4:bf Speed:-1 Mtu:1496} {Name:ens7.21 MacAddress:52:54:00:24:e4:a3 Speed:-1 Mtu:1496} {Name:ens7.22 MacAddress:52:54:00:4a:ce:be Speed:-1 Mtu:1496} {Name:ens7.23 MacAddress:52:54:00:95:57:2d Speed:-1 Mtu:1496} {Name:eth10 MacAddress:a2:87:b4:3e:89:b1 Speed:0 Mtu:1500} {Name:ovn-k8s-mp0 MacAddress:0a:58:0a:d9:00:02 Speed:0 Mtu:1400} {Name:ovs-system MacAddress:de:06:1b:a7:9b:ff Speed:0 Mtu:1500}] Topology:[{Id:0 Memory:33654124544 HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] Cores:[{Id:0 Threads:[0] Caches:[{Id:0 Size:32768 Type:Data Level:1} {Id:0 Size:32768 Type:Instruction Level:1} {Id:0 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:0 Size:16777216 Type:Unified Level:3}] SocketID:0 BookID: DrawerID:} {Id:0 Threads:[1] Caches:[{Id:1 Size:32768 Type:Data Level:1} {Id:1 Size:32768 Type:Instruction Level:1} {Id:1 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:1 Size:16777216 Type:Unified Level:3}] SocketID:1 BookID: DrawerID:} {Id:0 Threads:[10] Caches:[{Id:10 Size:32768 Type:Data Level:1} {Id:10 Size:32768 Type:Instruction Level:1} {Id:10 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:10 Size:16777216 Type:Unified Level:3}] SocketID:10 BookID: DrawerID:} {Id:0 Threads:[11] Caches:[{Id:11 Size:32768 Type:Data Level:1} {Id:11 Size:32768 Type:Instruction Level:1} {Id:11 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:11 Size:16777216 Type:Unified Level:3}] SocketID:11 BookID: DrawerID:} {Id:0 Threads:[2] Caches:[{Id:2 Size:32768 Type:Data Level:1} {Id:2 Size:32768 Type:Instruction Level:1} {Id:2 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:2 Size:16777216 Type:Unified Level:3}] SocketID:2 BookID: DrawerID:} {Id:0 Threads:[3] Caches:[{Id:3 Size:32768 Type:Data Level:1} {Id:3 Size:32768 Type:Instruction Level:1} {Id:3 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:3 Size:16777216 Type:Unified Level:3}] SocketID:3 BookID: DrawerID:} {Id:0 Threads:[4] Caches:[{Id:4 Size:32768 Type:Data Level:1} {Id:4 Size:32768 Type:Instruction Level:1} {Id:4 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:4 Size:16777216 Type:Unified Level:3}] SocketID:4 BookID: DrawerID:} {Id:0 Threads:[5] Caches:[{Id:5 Size:32768 Type:Data Level:1} {Id:5 Size:32768 Type:Instruction Level:1} {Id:5 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:5 Size:16777216 Type:Unified Level:3}] SocketID:5 BookID: DrawerID:} {Id:0 Threads:[6] Caches:[{Id:6 Size:32768 Type:Data Level:1} {Id:6 Size:32768 Type:Instruction Level:1} {Id:6 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:6 Size:16777216 Type:Unified Level:3}] SocketID:6 BookID: DrawerID:} {Id:0 Threads:[7] Caches:[{Id:7 Size:32768 Type:Data Level:1} {Id:7 Size:32768 Type:Instruction Level:1} {Id:7 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:7 Size:16777216 Type:Unified Level:3}] SocketID:7 BookID: DrawerID:} {Id:0 Threads:[8] Caches:[{Id:8 Size:32768 Type:Data Level:1} {Id:8 Size:32768 Type:Instruction Level:1} {Id:8 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:8 Size:16777216 Type:Unified Level:3}] SocketID:8 BookID: DrawerID:} {Id:0 Threads:[9] Caches:[{Id:9 Size:32768 Type:Data Level:1} {Id:9 Size:32768 Type:Instruction Level:1} {Id:9 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:9 Size:16777216 Type:Unified Level:3}] SocketID:9 BookID: DrawerID:}] Caches:[] Distances:[10]}] CloudProvider:Unknown InstanceType:Unknown InstanceID:None} Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.479358 4838 manager_no_libpfm.go:29] cAdvisor is build without cgo and/or libpfm support. Perf event counters are not available. Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.479544 4838 manager.go:233] Version: {KernelVersion:5.14.0-427.50.2.el9_4.x86_64 ContainerOsVersion:Red Hat Enterprise Linux CoreOS 418.94.202502100215-0 DockerVersion: DockerAPIVersion: CadvisorVersion: CadvisorRevision:} Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.480160 4838 swap_util.go:113] "Swap is on" /proc/swaps contents="Filename\t\t\t\tType\t\tSize\t\tUsed\t\tPriority" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.480367 4838 container_manager_linux.go:267] "Container manager verified user specified cgroup-root exists" cgroupRoot=[] Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.480416 4838 container_manager_linux.go:272] "Creating Container Manager object based on Node Config" nodeConfig={"NodeName":"crc","RuntimeCgroupsName":"/system.slice/crio.service","SystemCgroupsName":"/system.slice","KubeletCgroupsName":"","KubeletOOMScoreAdj":-999,"ContainerRuntime":"","CgroupsPerQOS":true,"CgroupRoot":"/","CgroupDriver":"systemd","KubeletRootDir":"/var/lib/kubelet","ProtectKernelDefaults":true,"KubeReservedCgroupName":"","SystemReservedCgroupName":"","ReservedSystemCPUs":{},"EnforceNodeAllocatable":{"pods":{}},"KubeReserved":null,"SystemReserved":{"cpu":"200m","ephemeral-storage":"350Mi","memory":"350Mi"},"HardEvictionThresholds":[{"Signal":"imagefs.inodesFree","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.05},"GracePeriod":0,"MinReclaim":null},{"Signal":"memory.available","Operator":"LessThan","Value":{"Quantity":"100Mi","Percentage":0},"GracePeriod":0,"MinReclaim":null},{"Signal":"nodefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.1},"GracePeriod":0,"MinReclaim":null},{"Signal":"nodefs.inodesFree","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.05},"GracePeriod":0,"MinReclaim":null},{"Signal":"imagefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.15},"GracePeriod":0,"MinReclaim":null}],"QOSReserved":{},"CPUManagerPolicy":"none","CPUManagerPolicyOptions":null,"TopologyManagerScope":"container","CPUManagerReconcilePeriod":10000000000,"ExperimentalMemoryManagerPolicy":"None","ExperimentalMemoryManagerReservedMemory":null,"PodPidsLimit":4096,"EnforceCPULimits":true,"CPUCFSQuotaPeriod":100000000,"TopologyManagerPolicy":"none","TopologyManagerPolicyOptions":null,"CgroupVersion":2} Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.480652 4838 topology_manager.go:138] "Creating topology manager with none policy" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.480667 4838 container_manager_linux.go:303] "Creating device plugin manager" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.480876 4838 manager.go:142] "Creating Device Plugin manager" path="/var/lib/kubelet/device-plugins/kubelet.sock" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.480925 4838 server.go:66] "Creating device plugin registration server" version="v1beta1" socket="/var/lib/kubelet/device-plugins/kubelet.sock" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.481235 4838 state_mem.go:36] "Initialized new in-memory state store" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.481627 4838 server.go:1245] "Using root directory" path="/var/lib/kubelet" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.482176 4838 kubelet.go:418] "Attempting to sync node with API server" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.482201 4838 kubelet.go:313] "Adding static pod path" path="/etc/kubernetes/manifests" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.482226 4838 file.go:69] "Watching path" path="/etc/kubernetes/manifests" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.482242 4838 kubelet.go:324] "Adding apiserver pod source" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.482253 4838 apiserver.go:42] "Waiting for node sync before watching apiserver pods" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.484153 4838 kuberuntime_manager.go:262] "Container runtime initialized" containerRuntime="cri-o" version="1.31.5-4.rhaos4.18.gitdad78d5.el9" apiVersion="v1" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.484537 4838 certificate_store.go:130] Loading cert/key pair from "/var/lib/kubelet/pki/kubelet-server-current.pem". Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.485281 4838 kubelet.go:854] "Not starting ClusterTrustBundle informer because we are in static kubelet mode" Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.485730 4838 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.65:6443: connect: connection refused Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.485730 4838 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.65:6443: connect: connection refused Nov 28 09:57:08 crc kubenswrapper[4838]: E1128 09:57:08.485827 4838 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.65:6443: connect: connection refused" logger="UnhandledError" Nov 28 09:57:08 crc kubenswrapper[4838]: E1128 09:57:08.485835 4838 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.65:6443: connect: connection refused" logger="UnhandledError" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.485879 4838 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/portworx-volume" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.485900 4838 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/empty-dir" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.485909 4838 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/git-repo" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.485918 4838 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/host-path" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.485932 4838 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/nfs" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.485943 4838 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/secret" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.485972 4838 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/iscsi" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.485987 4838 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/downward-api" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.485998 4838 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/fc" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.486007 4838 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/configmap" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.486031 4838 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/projected" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.486039 4838 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/local-volume" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.486195 4838 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/csi" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.486646 4838 server.go:1280] "Started kubelet" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.487780 4838 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.65:6443: connect: connection refused Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.488188 4838 server.go:163] "Starting to listen" address="0.0.0.0" port=10250 Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.488552 4838 ratelimit.go:55] "Setting rate limiting for endpoint" service="podresources" qps=100 burstTokens=10 Nov 28 09:57:08 crc systemd[1]: Started Kubernetes Kubelet. Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.491848 4838 server.go:236] "Starting to serve the podresources API" endpoint="unix:/var/lib/kubelet/pod-resources/kubelet.sock" Nov 28 09:57:08 crc kubenswrapper[4838]: E1128 09:57:08.491957 4838 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": dial tcp 38.102.83.65:6443: connect: connection refused" event="&Event{ObjectMeta:{crc.187c232c99b36e15 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-11-28 09:57:08.486598165 +0000 UTC m=+0.185572335,LastTimestamp:2025-11-28 09:57:08.486598165 +0000 UTC m=+0.185572335,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.492540 4838 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate rotation is enabled Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.492678 4838 server.go:460] "Adding debug handlers to kubelet server" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.494095 4838 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-21 06:09:12.71027022 +0000 UTC Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.497213 4838 fs_resource_analyzer.go:67] "Starting FS ResourceAnalyzer" Nov 28 09:57:08 crc kubenswrapper[4838]: E1128 09:57:08.497897 4838 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.498134 4838 volume_manager.go:287] "The desired_state_of_world populator starts" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.498164 4838 volume_manager.go:289] "Starting Kubelet Volume Manager" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.498272 4838 desired_state_of_world_populator.go:146] "Desired state populator starts to run" Nov 28 09:57:08 crc kubenswrapper[4838]: E1128 09:57:08.498925 4838 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.65:6443: connect: connection refused" interval="200ms" Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.499247 4838 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.65:6443: connect: connection refused Nov 28 09:57:08 crc kubenswrapper[4838]: E1128 09:57:08.499426 4838 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.65:6443: connect: connection refused" logger="UnhandledError" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.499572 4838 factory.go:219] Registration of the containerd container factory failed: unable to create containerd client: containerd: cannot unix dial containerd api service: dial unix /run/containerd/containerd.sock: connect: no such file or directory Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.499608 4838 factory.go:55] Registering systemd factory Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.499626 4838 factory.go:221] Registration of the systemd container factory successfully Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.500266 4838 factory.go:153] Registering CRI-O factory Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.500309 4838 factory.go:221] Registration of the crio container factory successfully Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.500352 4838 factory.go:103] Registering Raw factory Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.500379 4838 manager.go:1196] Started watching for new ooms in manager Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.502325 4838 manager.go:319] Starting recovery of all containers Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.505204 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.505254 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.505271 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.505285 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.505299 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20b0d48f-5fd6-431c-a545-e3c800c7b866" volumeName="kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.505315 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.505329 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" volumeName="kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.505344 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.505359 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.505430 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.505443 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.505456 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.505474 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.505493 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.505506 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.505519 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="efdd0498-1daa-4136-9a4a-3b948c2293fc" volumeName="kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.505535 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.505547 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.505559 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.505571 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.505582 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.505596 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.505608 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.505621 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.505640 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.505653 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.505669 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.505684 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.505697 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.505708 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.505739 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.505752 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.505767 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.505779 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.505833 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.505847 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.505859 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" volumeName="kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.505872 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.505887 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.505900 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.505912 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.505923 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="37a5e44f-9a88-4405-be8a-b645485e7312" volumeName="kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.505935 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.505947 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.505960 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.505972 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" volumeName="kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.505984 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.505995 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.506006 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.506017 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.506029 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.506042 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.506060 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.506075 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.506089 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.506101 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.506113 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.506125 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.506135 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.506148 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.506161 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="37a5e44f-9a88-4405-be8a-b645485e7312" volumeName="kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.506173 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.506184 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.506197 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" volumeName="kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.506208 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.506220 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.506231 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.506248 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.506260 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.506271 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.506284 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.506297 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.506309 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20b0d48f-5fd6-431c-a545-e3c800c7b866" volumeName="kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.506320 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3b6479f0-333b-4a96-9adf-2099afdc2447" volumeName="kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.506333 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.506346 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.506365 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.506378 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.506391 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.506403 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.506415 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="44663579-783b-4372-86d6-acf235a62d72" volumeName="kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.506429 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.506440 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.506451 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.506463 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.506473 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.506485 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" volumeName="kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.506496 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.506507 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.506519 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.506533 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.506544 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.506557 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" volumeName="kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.506567 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.506578 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.506589 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.506601 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.506611 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.506668 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.506683 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.506694 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.506705 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.507936 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.507962 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" volumeName="kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.508082 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.508109 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.508129 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.508142 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.508162 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.508174 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.508192 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49ef4625-1d3a-4a9f-b595-c2433d32326d" volumeName="kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.508211 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.508223 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.508242 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.508254 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.508270 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.508283 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.508300 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.508313 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.508327 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3ab1a177-2de0-46d9-b765-d0d0649bb42e" volumeName="kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.508345 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.508358 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.508371 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.508389 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5b88f790-22fa-440e-b583-365168c0b23d" volumeName="kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.508403 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" volumeName="kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.508422 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.508436 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.508450 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.508469 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.510765 4838 reconstruct.go:144] "Volume is marked device as uncertain and added into the actual state" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" deviceMountPath="/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.510861 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.510901 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.510927 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.510967 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.510991 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5b88f790-22fa-440e-b583-365168c0b23d" volumeName="kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.511114 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.511157 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d751cbb-f2e2-430d-9754-c882a5e924a5" volumeName="kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.511219 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.511260 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.511287 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.511326 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.511353 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.511381 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.511418 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.511598 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.511665 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.511705 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.511766 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.511795 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.511830 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.511858 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="efdd0498-1daa-4136-9a4a-3b948c2293fc" volumeName="kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.511895 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" volumeName="kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.511923 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" volumeName="kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.511949 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.511981 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.512008 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.512044 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.512099 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.512128 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.512163 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.512190 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.512235 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.512260 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.512283 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.512315 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.512339 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.512371 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.512397 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.512423 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.512454 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" volumeName="kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.512477 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.512500 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3ab1a177-2de0-46d9-b765-d0d0649bb42e" volumeName="kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.512532 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.512557 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.512594 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6731426b-95fe-49ff-bb5f-40441049fde2" volumeName="kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.512621 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.512660 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.512756 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.512792 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.512828 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.512859 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.512887 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.512923 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.512951 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.512993 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" volumeName="kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.513019 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.513045 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.513078 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.513108 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.513144 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.513198 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.513224 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.513262 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.513286 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.513319 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.513350 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.513393 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.513430 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.513455 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.513492 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.513518 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6731426b-95fe-49ff-bb5f-40441049fde2" volumeName="kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.513565 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.513602 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.513632 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.513659 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.513692 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.513748 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.513785 4838 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" seLinuxMountContext="" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.513809 4838 reconstruct.go:97] "Volume reconstruction finished" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.513827 4838 reconciler.go:26] "Reconciler: start to sync state" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.527933 4838 manager.go:324] Recovery completed Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.537105 4838 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.539018 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.539065 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.539076 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.540317 4838 cpu_manager.go:225] "Starting CPU manager" policy="none" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.540347 4838 cpu_manager.go:226] "Reconciling" reconcilePeriod="10s" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.540371 4838 state_mem.go:36] "Initialized new in-memory state store" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.558627 4838 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv4" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.560681 4838 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv6" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.560762 4838 status_manager.go:217] "Starting to sync pod status with apiserver" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.560835 4838 kubelet.go:2335] "Starting kubelet main sync loop" Nov 28 09:57:08 crc kubenswrapper[4838]: E1128 09:57:08.560888 4838 kubelet.go:2359] "Skipping pod synchronization" err="[container runtime status check may not have completed yet, PLEG is not healthy: pleg has yet to be successful]" Nov 28 09:57:08 crc kubenswrapper[4838]: W1128 09:57:08.561520 4838 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.65:6443: connect: connection refused Nov 28 09:57:08 crc kubenswrapper[4838]: E1128 09:57:08.561574 4838 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.65:6443: connect: connection refused" logger="UnhandledError" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.563662 4838 policy_none.go:49] "None policy: Start" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.564411 4838 memory_manager.go:170] "Starting memorymanager" policy="None" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.564443 4838 state_mem.go:35] "Initializing new in-memory state store" Nov 28 09:57:08 crc kubenswrapper[4838]: E1128 09:57:08.599082 4838 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.625901 4838 manager.go:334] "Starting Device Plugin manager" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.625961 4838 manager.go:513] "Failed to read data from checkpoint" checkpoint="kubelet_internal_checkpoint" err="checkpoint is not found" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.625976 4838 server.go:79] "Starting device plugin registration server" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.626432 4838 eviction_manager.go:189] "Eviction manager: starting control loop" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.626446 4838 container_log_manager.go:189] "Initializing container log rotate workers" workers=1 monitorPeriod="10s" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.626799 4838 plugin_watcher.go:51] "Plugin Watcher Start" path="/var/lib/kubelet/plugins_registry" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.626892 4838 plugin_manager.go:116] "The desired_state_of_world populator (plugin watcher) starts" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.626901 4838 plugin_manager.go:118] "Starting Kubelet Plugin Manager" Nov 28 09:57:08 crc kubenswrapper[4838]: E1128 09:57:08.636220 4838 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.661687 4838 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-kube-controller-manager/kube-controller-manager-crc","openshift-kube-scheduler/openshift-kube-scheduler-crc","openshift-machine-config-operator/kube-rbac-proxy-crio-crc","openshift-etcd/etcd-crc","openshift-kube-apiserver/kube-apiserver-crc"] Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.661954 4838 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.663673 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.663755 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.663767 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.663964 4838 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.664546 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.664614 4838 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.665080 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.665121 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.665132 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.665228 4838 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.665353 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.665379 4838 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.665678 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.665747 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.665765 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.666521 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.666540 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.666549 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.666637 4838 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.666693 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.666755 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.666769 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.666797 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.666836 4838 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.668240 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.668267 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.668303 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.668629 4838 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.669175 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.669226 4838 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.669782 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.669815 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.669832 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.670179 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.670223 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.670233 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.670261 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.670279 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.670293 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.670650 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.670884 4838 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.672898 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.672945 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.672961 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:57:08 crc kubenswrapper[4838]: E1128 09:57:08.699803 4838 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.65:6443: connect: connection refused" interval="400ms" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.718113 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.718183 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.718217 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.718252 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.718383 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.718504 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.718553 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.718593 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.718921 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.719000 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.719035 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.719068 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.719115 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.719189 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.719244 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.727085 4838 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.729611 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.729656 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.729671 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.729707 4838 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 28 09:57:08 crc kubenswrapper[4838]: E1128 09:57:08.730196 4838 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.65:6443: connect: connection refused" node="crc" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.820194 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.820263 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.820295 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.820319 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.820342 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.820367 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.820389 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.820409 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.820434 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.820458 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.820446 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.820507 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.820541 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.820470 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.820480 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.820594 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.820596 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.820613 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.820673 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.820783 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.820725 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.820739 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.820774 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.820769 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.820872 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.820789 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.820766 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.820856 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.821019 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.821106 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.931296 4838 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.932953 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.933007 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.933020 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.933050 4838 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 28 09:57:08 crc kubenswrapper[4838]: E1128 09:57:08.933627 4838 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.65:6443: connect: connection refused" node="crc" Nov 28 09:57:08 crc kubenswrapper[4838]: I1128 09:57:08.997352 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 09:57:09 crc kubenswrapper[4838]: W1128 09:57:09.018454 4838 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf614b9022728cf315e60c057852e563e.slice/crio-d90c540b44008d10e7bd893bbe1fd8e8cb3558d318801ed8628a7cfe4a80ea62 WatchSource:0}: Error finding container d90c540b44008d10e7bd893bbe1fd8e8cb3558d318801ed8628a7cfe4a80ea62: Status 404 returned error can't find the container with id d90c540b44008d10e7bd893bbe1fd8e8cb3558d318801ed8628a7cfe4a80ea62 Nov 28 09:57:09 crc kubenswrapper[4838]: I1128 09:57:09.019794 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 28 09:57:09 crc kubenswrapper[4838]: W1128 09:57:09.035707 4838 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3dcd261975c3d6b9a6ad6367fd4facd3.slice/crio-edd0b3738f68a926ba3937f30f56e7c57401e61c8da5f156e1c7ca53d5dda04c WatchSource:0}: Error finding container edd0b3738f68a926ba3937f30f56e7c57401e61c8da5f156e1c7ca53d5dda04c: Status 404 returned error can't find the container with id edd0b3738f68a926ba3937f30f56e7c57401e61c8da5f156e1c7ca53d5dda04c Nov 28 09:57:09 crc kubenswrapper[4838]: I1128 09:57:09.036704 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 28 09:57:09 crc kubenswrapper[4838]: I1128 09:57:09.046441 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Nov 28 09:57:09 crc kubenswrapper[4838]: W1128 09:57:09.049987 4838 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd1b160f5dda77d281dd8e69ec8d817f9.slice/crio-a58ca0c60b0e6241eb34fb12ed1d8b106cfe62664b18c421b1e43e91585b33b0 WatchSource:0}: Error finding container a58ca0c60b0e6241eb34fb12ed1d8b106cfe62664b18c421b1e43e91585b33b0: Status 404 returned error can't find the container with id a58ca0c60b0e6241eb34fb12ed1d8b106cfe62664b18c421b1e43e91585b33b0 Nov 28 09:57:09 crc kubenswrapper[4838]: I1128 09:57:09.050835 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 09:57:09 crc kubenswrapper[4838]: W1128 09:57:09.061078 4838 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2139d3e2895fc6797b9c76a1b4c9886d.slice/crio-482314758a12c53602bf676d57b99198f69c86cc53b6bc453b8fe642e569cce0 WatchSource:0}: Error finding container 482314758a12c53602bf676d57b99198f69c86cc53b6bc453b8fe642e569cce0: Status 404 returned error can't find the container with id 482314758a12c53602bf676d57b99198f69c86cc53b6bc453b8fe642e569cce0 Nov 28 09:57:09 crc kubenswrapper[4838]: E1128 09:57:09.102242 4838 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.65:6443: connect: connection refused" interval="800ms" Nov 28 09:57:09 crc kubenswrapper[4838]: W1128 09:57:09.261112 4838 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf4b27818a5e8e43d0dc095d08835c792.slice/crio-b03a5b700aa9f013ca90b9241e03c90ff314d0d1cdd11722441e9bdda4f979c9 WatchSource:0}: Error finding container b03a5b700aa9f013ca90b9241e03c90ff314d0d1cdd11722441e9bdda4f979c9: Status 404 returned error can't find the container with id b03a5b700aa9f013ca90b9241e03c90ff314d0d1cdd11722441e9bdda4f979c9 Nov 28 09:57:09 crc kubenswrapper[4838]: E1128 09:57:09.305232 4838 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": dial tcp 38.102.83.65:6443: connect: connection refused" event="&Event{ObjectMeta:{crc.187c232c99b36e15 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-11-28 09:57:08.486598165 +0000 UTC m=+0.185572335,LastTimestamp:2025-11-28 09:57:08.486598165 +0000 UTC m=+0.185572335,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Nov 28 09:57:09 crc kubenswrapper[4838]: I1128 09:57:09.334062 4838 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 09:57:09 crc kubenswrapper[4838]: I1128 09:57:09.335854 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:57:09 crc kubenswrapper[4838]: I1128 09:57:09.335907 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:57:09 crc kubenswrapper[4838]: I1128 09:57:09.335925 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:57:09 crc kubenswrapper[4838]: I1128 09:57:09.335960 4838 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 28 09:57:09 crc kubenswrapper[4838]: E1128 09:57:09.336533 4838 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.65:6443: connect: connection refused" node="crc" Nov 28 09:57:09 crc kubenswrapper[4838]: W1128 09:57:09.376223 4838 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.65:6443: connect: connection refused Nov 28 09:57:09 crc kubenswrapper[4838]: E1128 09:57:09.376319 4838 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.65:6443: connect: connection refused" logger="UnhandledError" Nov 28 09:57:09 crc kubenswrapper[4838]: W1128 09:57:09.394245 4838 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.65:6443: connect: connection refused Nov 28 09:57:09 crc kubenswrapper[4838]: E1128 09:57:09.394323 4838 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.65:6443: connect: connection refused" logger="UnhandledError" Nov 28 09:57:09 crc kubenswrapper[4838]: I1128 09:57:09.489404 4838 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.65:6443: connect: connection refused Nov 28 09:57:09 crc kubenswrapper[4838]: I1128 09:57:09.494408 4838 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-08 20:48:20.51979763 +0000 UTC Nov 28 09:57:09 crc kubenswrapper[4838]: I1128 09:57:09.494474 4838 certificate_manager.go:356] kubernetes.io/kubelet-serving: Waiting 994h51m11.02532568s for next certificate rotation Nov 28 09:57:09 crc kubenswrapper[4838]: I1128 09:57:09.565580 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerStarted","Data":"a58ca0c60b0e6241eb34fb12ed1d8b106cfe62664b18c421b1e43e91585b33b0"} Nov 28 09:57:09 crc kubenswrapper[4838]: I1128 09:57:09.567154 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"edd0b3738f68a926ba3937f30f56e7c57401e61c8da5f156e1c7ca53d5dda04c"} Nov 28 09:57:09 crc kubenswrapper[4838]: I1128 09:57:09.568273 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"d90c540b44008d10e7bd893bbe1fd8e8cb3558d318801ed8628a7cfe4a80ea62"} Nov 28 09:57:09 crc kubenswrapper[4838]: I1128 09:57:09.569616 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"b03a5b700aa9f013ca90b9241e03c90ff314d0d1cdd11722441e9bdda4f979c9"} Nov 28 09:57:09 crc kubenswrapper[4838]: I1128 09:57:09.571296 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"482314758a12c53602bf676d57b99198f69c86cc53b6bc453b8fe642e569cce0"} Nov 28 09:57:09 crc kubenswrapper[4838]: E1128 09:57:09.903689 4838 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.65:6443: connect: connection refused" interval="1.6s" Nov 28 09:57:09 crc kubenswrapper[4838]: W1128 09:57:09.962687 4838 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.65:6443: connect: connection refused Nov 28 09:57:09 crc kubenswrapper[4838]: E1128 09:57:09.962815 4838 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.65:6443: connect: connection refused" logger="UnhandledError" Nov 28 09:57:10 crc kubenswrapper[4838]: W1128 09:57:10.079090 4838 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.65:6443: connect: connection refused Nov 28 09:57:10 crc kubenswrapper[4838]: E1128 09:57:10.079209 4838 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.65:6443: connect: connection refused" logger="UnhandledError" Nov 28 09:57:10 crc kubenswrapper[4838]: I1128 09:57:10.136784 4838 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 09:57:10 crc kubenswrapper[4838]: I1128 09:57:10.138771 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:57:10 crc kubenswrapper[4838]: I1128 09:57:10.138838 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:57:10 crc kubenswrapper[4838]: I1128 09:57:10.138852 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:57:10 crc kubenswrapper[4838]: I1128 09:57:10.138919 4838 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 28 09:57:10 crc kubenswrapper[4838]: E1128 09:57:10.139558 4838 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.65:6443: connect: connection refused" node="crc" Nov 28 09:57:10 crc kubenswrapper[4838]: I1128 09:57:10.489598 4838 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.65:6443: connect: connection refused Nov 28 09:57:10 crc kubenswrapper[4838]: I1128 09:57:10.576703 4838 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="b4d4af6fc5dd97d5b6104ef79b62fd241db5659dfdfd496a38536453b207a4e5" exitCode=0 Nov 28 09:57:10 crc kubenswrapper[4838]: I1128 09:57:10.576797 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"b4d4af6fc5dd97d5b6104ef79b62fd241db5659dfdfd496a38536453b207a4e5"} Nov 28 09:57:10 crc kubenswrapper[4838]: I1128 09:57:10.576904 4838 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 09:57:10 crc kubenswrapper[4838]: I1128 09:57:10.577868 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:57:10 crc kubenswrapper[4838]: I1128 09:57:10.577910 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:57:10 crc kubenswrapper[4838]: I1128 09:57:10.577921 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:57:10 crc kubenswrapper[4838]: I1128 09:57:10.579012 4838 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="33c04d931c3928e04948b6939ccc80fc0c50e2a3af20ab6617bf8c994ad4cbd9" exitCode=0 Nov 28 09:57:10 crc kubenswrapper[4838]: I1128 09:57:10.579139 4838 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 09:57:10 crc kubenswrapper[4838]: I1128 09:57:10.579162 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"33c04d931c3928e04948b6939ccc80fc0c50e2a3af20ab6617bf8c994ad4cbd9"} Nov 28 09:57:10 crc kubenswrapper[4838]: I1128 09:57:10.579611 4838 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 09:57:10 crc kubenswrapper[4838]: I1128 09:57:10.580199 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:57:10 crc kubenswrapper[4838]: I1128 09:57:10.580235 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:57:10 crc kubenswrapper[4838]: I1128 09:57:10.580249 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:57:10 crc kubenswrapper[4838]: I1128 09:57:10.580561 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:57:10 crc kubenswrapper[4838]: I1128 09:57:10.580621 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:57:10 crc kubenswrapper[4838]: I1128 09:57:10.580643 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:57:10 crc kubenswrapper[4838]: I1128 09:57:10.581280 4838 generic.go:334] "Generic (PLEG): container finished" podID="d1b160f5dda77d281dd8e69ec8d817f9" containerID="40b6222c83e4141c5d286efddc1b65ef732c5cadda9cebdd8d9ee114bf2eb533" exitCode=0 Nov 28 09:57:10 crc kubenswrapper[4838]: I1128 09:57:10.581349 4838 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 09:57:10 crc kubenswrapper[4838]: I1128 09:57:10.581350 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerDied","Data":"40b6222c83e4141c5d286efddc1b65ef732c5cadda9cebdd8d9ee114bf2eb533"} Nov 28 09:57:10 crc kubenswrapper[4838]: I1128 09:57:10.582013 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:57:10 crc kubenswrapper[4838]: I1128 09:57:10.582047 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:57:10 crc kubenswrapper[4838]: I1128 09:57:10.582059 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:57:10 crc kubenswrapper[4838]: I1128 09:57:10.583633 4838 generic.go:334] "Generic (PLEG): container finished" podID="3dcd261975c3d6b9a6ad6367fd4facd3" containerID="8352058616fa4ed90ca907e547bad2201d9aa696330f1eb8434c3c3d54d71d89" exitCode=0 Nov 28 09:57:10 crc kubenswrapper[4838]: I1128 09:57:10.583662 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerDied","Data":"8352058616fa4ed90ca907e547bad2201d9aa696330f1eb8434c3c3d54d71d89"} Nov 28 09:57:10 crc kubenswrapper[4838]: I1128 09:57:10.583846 4838 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 09:57:10 crc kubenswrapper[4838]: I1128 09:57:10.585102 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:57:10 crc kubenswrapper[4838]: I1128 09:57:10.585163 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:57:10 crc kubenswrapper[4838]: I1128 09:57:10.585187 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:57:10 crc kubenswrapper[4838]: I1128 09:57:10.585866 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"ef54c8995d6bb8e077c6d1c3d796b6f2ce88370b6cadc4f040f590760103320b"} Nov 28 09:57:11 crc kubenswrapper[4838]: I1128 09:57:11.489422 4838 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.65:6443: connect: connection refused Nov 28 09:57:11 crc kubenswrapper[4838]: E1128 09:57:11.505326 4838 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.65:6443: connect: connection refused" interval="3.2s" Nov 28 09:57:11 crc kubenswrapper[4838]: I1128 09:57:11.589826 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"426741a23e7b1b9fae6769b581c0d44694f957b0da985476923801395fad082f"} Nov 28 09:57:11 crc kubenswrapper[4838]: I1128 09:57:11.739889 4838 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 09:57:11 crc kubenswrapper[4838]: I1128 09:57:11.741166 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:57:11 crc kubenswrapper[4838]: I1128 09:57:11.741208 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:57:11 crc kubenswrapper[4838]: I1128 09:57:11.741224 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:57:11 crc kubenswrapper[4838]: I1128 09:57:11.741252 4838 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 28 09:57:11 crc kubenswrapper[4838]: E1128 09:57:11.743554 4838 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.65:6443: connect: connection refused" node="crc" Nov 28 09:57:11 crc kubenswrapper[4838]: W1128 09:57:11.937022 4838 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.65:6443: connect: connection refused Nov 28 09:57:11 crc kubenswrapper[4838]: E1128 09:57:11.937169 4838 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.65:6443: connect: connection refused" logger="UnhandledError" Nov 28 09:57:12 crc kubenswrapper[4838]: W1128 09:57:12.447978 4838 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.65:6443: connect: connection refused Nov 28 09:57:12 crc kubenswrapper[4838]: E1128 09:57:12.448093 4838 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.65:6443: connect: connection refused" logger="UnhandledError" Nov 28 09:57:12 crc kubenswrapper[4838]: I1128 09:57:12.488922 4838 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.65:6443: connect: connection refused Nov 28 09:57:12 crc kubenswrapper[4838]: I1128 09:57:12.598246 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"8f8f5484d008289a9e34ceaffd3cf2582565e7265003b0a6a913fe424760fc65"} Nov 28 09:57:12 crc kubenswrapper[4838]: I1128 09:57:12.600612 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"065f0c39a0f1fbdb83a6a758ddd20a4af3ddf96297ce35551b570f5e6c9deb89"} Nov 28 09:57:12 crc kubenswrapper[4838]: W1128 09:57:12.808427 4838 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.65:6443: connect: connection refused Nov 28 09:57:12 crc kubenswrapper[4838]: E1128 09:57:12.808524 4838 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.65:6443: connect: connection refused" logger="UnhandledError" Nov 28 09:57:12 crc kubenswrapper[4838]: W1128 09:57:12.958552 4838 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.65:6443: connect: connection refused Nov 28 09:57:12 crc kubenswrapper[4838]: E1128 09:57:12.958688 4838 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.65:6443: connect: connection refused" logger="UnhandledError" Nov 28 09:57:13 crc kubenswrapper[4838]: I1128 09:57:13.489770 4838 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.65:6443: connect: connection refused Nov 28 09:57:13 crc kubenswrapper[4838]: I1128 09:57:13.609626 4838 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="70d608e317f201a7280cec671bea1a0496f7a68e7caa8925f17ff61f9addf9c7" exitCode=0 Nov 28 09:57:13 crc kubenswrapper[4838]: I1128 09:57:13.609708 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"70d608e317f201a7280cec671bea1a0496f7a68e7caa8925f17ff61f9addf9c7"} Nov 28 09:57:13 crc kubenswrapper[4838]: I1128 09:57:13.611274 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerStarted","Data":"17e965aad7643d62c651c1e652be45bd914cfe3f14a0a6f43e4e4376b4cb7be0"} Nov 28 09:57:13 crc kubenswrapper[4838]: I1128 09:57:13.613321 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"fab4ce68cf732b3e6b32f68e84805013d646a9cbd6d5c55ea3d2f41a7f9db83d"} Nov 28 09:57:14 crc kubenswrapper[4838]: I1128 09:57:14.621021 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"496ac21c6d7e650f191b3bc29ab676bab6ba40727c5ac4d517833ef9a115ae07"} Nov 28 09:57:14 crc kubenswrapper[4838]: I1128 09:57:14.624943 4838 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 09:57:14 crc kubenswrapper[4838]: I1128 09:57:14.625340 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"837cc7d33d601516b4ea56a283f71167b41da7c769070c97ea77f29e97cf1555"} Nov 28 09:57:14 crc kubenswrapper[4838]: I1128 09:57:14.625519 4838 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 09:57:14 crc kubenswrapper[4838]: I1128 09:57:14.626663 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:57:14 crc kubenswrapper[4838]: I1128 09:57:14.626754 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:57:14 crc kubenswrapper[4838]: I1128 09:57:14.626784 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:57:14 crc kubenswrapper[4838]: I1128 09:57:14.626803 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:57:14 crc kubenswrapper[4838]: I1128 09:57:14.626835 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:57:14 crc kubenswrapper[4838]: I1128 09:57:14.626847 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:57:14 crc kubenswrapper[4838]: I1128 09:57:14.943806 4838 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 09:57:15 crc kubenswrapper[4838]: I1128 09:57:15.002563 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:57:15 crc kubenswrapper[4838]: I1128 09:57:15.002633 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:57:15 crc kubenswrapper[4838]: I1128 09:57:15.002646 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:57:15 crc kubenswrapper[4838]: I1128 09:57:15.002689 4838 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 28 09:57:15 crc kubenswrapper[4838]: I1128 09:57:15.631041 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"2da93de71b5896e3c9ba192df2896b946b1376fefe2a87cf21adb87ea665be04"} Nov 28 09:57:16 crc kubenswrapper[4838]: I1128 09:57:16.637143 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"25d620ea6d7c38547e89d61e7a60f227d28b21f18d563055db47256b266d5b6b"} Nov 28 09:57:16 crc kubenswrapper[4838]: I1128 09:57:16.639812 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"8ab9a9d79738e635e5aaffb8063f01f042d429c0323078dc8aab6bdb886927a3"} Nov 28 09:57:16 crc kubenswrapper[4838]: I1128 09:57:16.643382 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"698aacf6e94caf2da7095c89716d63d935ff60d95cb91c9a39dfe9282cbba005"} Nov 28 09:57:17 crc kubenswrapper[4838]: I1128 09:57:17.649977 4838 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="8ab9a9d79738e635e5aaffb8063f01f042d429c0323078dc8aab6bdb886927a3" exitCode=0 Nov 28 09:57:17 crc kubenswrapper[4838]: I1128 09:57:17.650107 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"8ab9a9d79738e635e5aaffb8063f01f042d429c0323078dc8aab6bdb886927a3"} Nov 28 09:57:17 crc kubenswrapper[4838]: I1128 09:57:17.650214 4838 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 09:57:17 crc kubenswrapper[4838]: I1128 09:57:17.650265 4838 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 09:57:17 crc kubenswrapper[4838]: I1128 09:57:17.651687 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:57:17 crc kubenswrapper[4838]: I1128 09:57:17.651774 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:57:17 crc kubenswrapper[4838]: I1128 09:57:17.651770 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:57:17 crc kubenswrapper[4838]: I1128 09:57:17.651848 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:57:17 crc kubenswrapper[4838]: I1128 09:57:17.651798 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:57:17 crc kubenswrapper[4838]: I1128 09:57:17.651885 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:57:17 crc kubenswrapper[4838]: I1128 09:57:17.701045 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 28 09:57:18 crc kubenswrapper[4838]: E1128 09:57:18.636413 4838 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Nov 28 09:57:18 crc kubenswrapper[4838]: I1128 09:57:18.652474 4838 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 09:57:18 crc kubenswrapper[4838]: I1128 09:57:18.652539 4838 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 09:57:18 crc kubenswrapper[4838]: I1128 09:57:18.654270 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:57:18 crc kubenswrapper[4838]: I1128 09:57:18.654331 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:57:18 crc kubenswrapper[4838]: I1128 09:57:18.654352 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:57:18 crc kubenswrapper[4838]: I1128 09:57:18.654380 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:57:18 crc kubenswrapper[4838]: I1128 09:57:18.654475 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:57:18 crc kubenswrapper[4838]: I1128 09:57:18.654496 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:57:19 crc kubenswrapper[4838]: I1128 09:57:19.334036 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 09:57:19 crc kubenswrapper[4838]: I1128 09:57:19.334264 4838 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 09:57:19 crc kubenswrapper[4838]: I1128 09:57:19.339566 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:57:19 crc kubenswrapper[4838]: I1128 09:57:19.339811 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:57:19 crc kubenswrapper[4838]: I1128 09:57:19.339892 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:57:19 crc kubenswrapper[4838]: I1128 09:57:19.659850 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"1c690f38f717fbfbd681f21f5dc845b94601530b4fc0860fdbeb1317042c793b"} Nov 28 09:57:19 crc kubenswrapper[4838]: I1128 09:57:19.663785 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"437bbddaf475f73991f0b8de425637a7b4baf931244bbb84f64e41aec2025064"} Nov 28 09:57:20 crc kubenswrapper[4838]: I1128 09:57:20.503593 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 09:57:20 crc kubenswrapper[4838]: I1128 09:57:20.504275 4838 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 09:57:20 crc kubenswrapper[4838]: I1128 09:57:20.505981 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:57:20 crc kubenswrapper[4838]: I1128 09:57:20.506037 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:57:20 crc kubenswrapper[4838]: I1128 09:57:20.506049 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:57:20 crc kubenswrapper[4838]: I1128 09:57:20.670793 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"b70728b7510ae583d35dd0047e99f5221eb6225b5f935ba40e01f02658850900"} Nov 28 09:57:20 crc kubenswrapper[4838]: I1128 09:57:20.670973 4838 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 09:57:20 crc kubenswrapper[4838]: I1128 09:57:20.672363 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:57:20 crc kubenswrapper[4838]: I1128 09:57:20.672398 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:57:20 crc kubenswrapper[4838]: I1128 09:57:20.672409 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:57:20 crc kubenswrapper[4838]: I1128 09:57:20.673991 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"4546745e01db79b4e5c22e32cc6c0c0290159bc97182b87319e4311deb64c031"} Nov 28 09:57:21 crc kubenswrapper[4838]: I1128 09:57:21.679847 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"610cdc4f3b3598ee593b3bbffdb4df63cc98c1f6a56602b357471acb90ad3add"} Nov 28 09:57:21 crc kubenswrapper[4838]: I1128 09:57:21.679900 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"bf37cdbd396192c6d67f546d27be521dafa2258de2928d497c13c9fe4ee53d13"} Nov 28 09:57:21 crc kubenswrapper[4838]: I1128 09:57:21.679940 4838 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 09:57:21 crc kubenswrapper[4838]: I1128 09:57:21.680009 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 09:57:21 crc kubenswrapper[4838]: I1128 09:57:21.680657 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:57:21 crc kubenswrapper[4838]: I1128 09:57:21.680683 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:57:21 crc kubenswrapper[4838]: I1128 09:57:21.680692 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:57:21 crc kubenswrapper[4838]: I1128 09:57:21.989958 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 09:57:21 crc kubenswrapper[4838]: I1128 09:57:21.990197 4838 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 09:57:21 crc kubenswrapper[4838]: I1128 09:57:21.991565 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:57:21 crc kubenswrapper[4838]: I1128 09:57:21.991590 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:57:21 crc kubenswrapper[4838]: I1128 09:57:21.991600 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:57:22 crc kubenswrapper[4838]: I1128 09:57:22.013818 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 09:57:22 crc kubenswrapper[4838]: I1128 09:57:22.112225 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 09:57:22 crc kubenswrapper[4838]: I1128 09:57:22.688498 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"ac22cb890363e5cedeb87067d4a8b7ba53c3cc547f2c133472a373795ca79cda"} Nov 28 09:57:22 crc kubenswrapper[4838]: I1128 09:57:22.688608 4838 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 09:57:22 crc kubenswrapper[4838]: I1128 09:57:22.688671 4838 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 09:57:22 crc kubenswrapper[4838]: I1128 09:57:22.688741 4838 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 09:57:22 crc kubenswrapper[4838]: I1128 09:57:22.690174 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:57:22 crc kubenswrapper[4838]: I1128 09:57:22.690275 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:57:22 crc kubenswrapper[4838]: I1128 09:57:22.690296 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:57:22 crc kubenswrapper[4838]: I1128 09:57:22.690209 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:57:22 crc kubenswrapper[4838]: I1128 09:57:22.690349 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:57:22 crc kubenswrapper[4838]: I1128 09:57:22.690353 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:57:22 crc kubenswrapper[4838]: I1128 09:57:22.690486 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:57:22 crc kubenswrapper[4838]: I1128 09:57:22.690323 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:57:22 crc kubenswrapper[4838]: I1128 09:57:22.690534 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:57:22 crc kubenswrapper[4838]: I1128 09:57:22.695070 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 09:57:23 crc kubenswrapper[4838]: I1128 09:57:23.692510 4838 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 09:57:23 crc kubenswrapper[4838]: I1128 09:57:23.692516 4838 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 09:57:23 crc kubenswrapper[4838]: I1128 09:57:23.694751 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:57:23 crc kubenswrapper[4838]: I1128 09:57:23.694824 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:57:23 crc kubenswrapper[4838]: I1128 09:57:23.694842 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:57:23 crc kubenswrapper[4838]: I1128 09:57:23.694890 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:57:23 crc kubenswrapper[4838]: I1128 09:57:23.694935 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:57:23 crc kubenswrapper[4838]: I1128 09:57:23.694953 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:57:23 crc kubenswrapper[4838]: I1128 09:57:23.972566 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 09:57:23 crc kubenswrapper[4838]: I1128 09:57:23.973022 4838 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 09:57:23 crc kubenswrapper[4838]: I1128 09:57:23.974924 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:57:23 crc kubenswrapper[4838]: I1128 09:57:23.974981 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:57:23 crc kubenswrapper[4838]: I1128 09:57:23.974993 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:57:24 crc kubenswrapper[4838]: I1128 09:57:24.263260 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 09:57:24 crc kubenswrapper[4838]: I1128 09:57:24.490572 4838 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": net/http: TLS handshake timeout Nov 28 09:57:24 crc kubenswrapper[4838]: I1128 09:57:24.694857 4838 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 09:57:24 crc kubenswrapper[4838]: I1128 09:57:24.694862 4838 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 09:57:24 crc kubenswrapper[4838]: I1128 09:57:24.695944 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:57:24 crc kubenswrapper[4838]: I1128 09:57:24.695970 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:57:24 crc kubenswrapper[4838]: I1128 09:57:24.695981 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:57:24 crc kubenswrapper[4838]: I1128 09:57:24.696045 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:57:24 crc kubenswrapper[4838]: I1128 09:57:24.696063 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:57:24 crc kubenswrapper[4838]: I1128 09:57:24.696073 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:57:24 crc kubenswrapper[4838]: E1128 09:57:24.706735 4838 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" interval="6.4s" Nov 28 09:57:24 crc kubenswrapper[4838]: I1128 09:57:24.990954 4838 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/cluster-policy-controller namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 28 09:57:24 crc kubenswrapper[4838]: I1128 09:57:24.991114 4838 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="cluster-policy-controller" probeResult="failure" output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Nov 28 09:57:25 crc kubenswrapper[4838]: E1128 09:57:25.003759 4838 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": net/http: TLS handshake timeout" node="crc" Nov 28 09:57:25 crc kubenswrapper[4838]: W1128 09:57:25.740927 4838 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": net/http: TLS handshake timeout Nov 28 09:57:25 crc kubenswrapper[4838]: I1128 09:57:25.741071 4838 trace.go:236] Trace[478933854]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (28-Nov-2025 09:57:15.739) (total time: 10001ms): Nov 28 09:57:25 crc kubenswrapper[4838]: Trace[478933854]: ---"Objects listed" error:Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": net/http: TLS handshake timeout 10001ms (09:57:25.740) Nov 28 09:57:25 crc kubenswrapper[4838]: Trace[478933854]: [10.001875652s] [10.001875652s] END Nov 28 09:57:25 crc kubenswrapper[4838]: E1128 09:57:25.741108 4838 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": net/http: TLS handshake timeout" logger="UnhandledError" Nov 28 09:57:26 crc kubenswrapper[4838]: I1128 09:57:26.859763 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-etcd/etcd-crc" Nov 28 09:57:26 crc kubenswrapper[4838]: I1128 09:57:26.859933 4838 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 09:57:26 crc kubenswrapper[4838]: I1128 09:57:26.861405 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:57:26 crc kubenswrapper[4838]: I1128 09:57:26.861462 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:57:26 crc kubenswrapper[4838]: I1128 09:57:26.861483 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:57:27 crc kubenswrapper[4838]: W1128 09:57:27.097640 4838 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": net/http: TLS handshake timeout Nov 28 09:57:27 crc kubenswrapper[4838]: I1128 09:57:27.097827 4838 trace.go:236] Trace[1815807116]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (28-Nov-2025 09:57:17.095) (total time: 10001ms): Nov 28 09:57:27 crc kubenswrapper[4838]: Trace[1815807116]: ---"Objects listed" error:Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": net/http: TLS handshake timeout 10001ms (09:57:27.097) Nov 28 09:57:27 crc kubenswrapper[4838]: Trace[1815807116]: [10.00195633s] [10.00195633s] END Nov 28 09:57:27 crc kubenswrapper[4838]: E1128 09:57:27.097862 4838 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": net/http: TLS handshake timeout" logger="UnhandledError" Nov 28 09:57:27 crc kubenswrapper[4838]: I1128 09:57:27.710532 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-etcd/etcd-crc" Nov 28 09:57:27 crc kubenswrapper[4838]: I1128 09:57:27.710798 4838 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 09:57:27 crc kubenswrapper[4838]: I1128 09:57:27.712248 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:57:27 crc kubenswrapper[4838]: I1128 09:57:27.712314 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:57:27 crc kubenswrapper[4838]: I1128 09:57:27.712338 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:57:28 crc kubenswrapper[4838]: W1128 09:57:28.203661 4838 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": net/http: TLS handshake timeout Nov 28 09:57:28 crc kubenswrapper[4838]: I1128 09:57:28.203861 4838 trace.go:236] Trace[479774412]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (28-Nov-2025 09:57:18.200) (total time: 10003ms): Nov 28 09:57:28 crc kubenswrapper[4838]: Trace[479774412]: ---"Objects listed" error:Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": net/http: TLS handshake timeout 10003ms (09:57:28.203) Nov 28 09:57:28 crc kubenswrapper[4838]: Trace[479774412]: [10.00328486s] [10.00328486s] END Nov 28 09:57:28 crc kubenswrapper[4838]: E1128 09:57:28.203909 4838 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": net/http: TLS handshake timeout" logger="UnhandledError" Nov 28 09:57:28 crc kubenswrapper[4838]: W1128 09:57:28.554776 4838 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": net/http: TLS handshake timeout Nov 28 09:57:28 crc kubenswrapper[4838]: I1128 09:57:28.554906 4838 trace.go:236] Trace[1997600846]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (28-Nov-2025 09:57:18.553) (total time: 10001ms): Nov 28 09:57:28 crc kubenswrapper[4838]: Trace[1997600846]: ---"Objects listed" error:Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": net/http: TLS handshake timeout 10001ms (09:57:28.554) Nov 28 09:57:28 crc kubenswrapper[4838]: Trace[1997600846]: [10.001665479s] [10.001665479s] END Nov 28 09:57:28 crc kubenswrapper[4838]: E1128 09:57:28.554942 4838 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": net/http: TLS handshake timeout" logger="UnhandledError" Nov 28 09:57:28 crc kubenswrapper[4838]: E1128 09:57:28.636586 4838 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Nov 28 09:57:29 crc kubenswrapper[4838]: E1128 09:57:29.307348 4838 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": net/http: TLS handshake timeout" event="&Event{ObjectMeta:{crc.187c232c99b36e15 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-11-28 09:57:08.486598165 +0000 UTC m=+0.185572335,LastTimestamp:2025-11-28 09:57:08.486598165 +0000 UTC m=+0.185572335,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Nov 28 09:57:31 crc kubenswrapper[4838]: I1128 09:57:31.404599 4838 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 09:57:31 crc kubenswrapper[4838]: I1128 09:57:31.406075 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:57:31 crc kubenswrapper[4838]: I1128 09:57:31.406136 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:57:31 crc kubenswrapper[4838]: I1128 09:57:31.406156 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:57:31 crc kubenswrapper[4838]: I1128 09:57:31.406189 4838 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 28 09:57:33 crc kubenswrapper[4838]: I1128 09:57:33.972873 4838 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="Get \"https://192.168.126.11:6443/livez\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 28 09:57:33 crc kubenswrapper[4838]: I1128 09:57:33.972976 4838 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="Get \"https://192.168.126.11:6443/livez\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Nov 28 09:57:34 crc kubenswrapper[4838]: E1128 09:57:34.571797 4838 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": read tcp 38.102.83.65:50678->38.102.83.65:6443: read: connection reset by peer" node="crc" Nov 28 09:57:34 crc kubenswrapper[4838]: I1128 09:57:34.571934 4838 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="Get \"https://192.168.126.11:6443/livez\": read tcp 192.168.126.11:36484->192.168.126.11:6443: read: connection reset by peer" start-of-body= Nov 28 09:57:34 crc kubenswrapper[4838]: I1128 09:57:34.572021 4838 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="Get \"https://192.168.126.11:6443/livez\": read tcp 192.168.126.11:36484->192.168.126.11:6443: read: connection reset by peer" Nov 28 09:57:34 crc kubenswrapper[4838]: I1128 09:57:34.990604 4838 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/cluster-policy-controller namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 28 09:57:34 crc kubenswrapper[4838]: I1128 09:57:34.990790 4838 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="cluster-policy-controller" probeResult="failure" output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Nov 28 09:57:35 crc kubenswrapper[4838]: W1128 09:57:35.222933 4838 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.65:6443: connect: connection refused Nov 28 09:57:35 crc kubenswrapper[4838]: E1128 09:57:35.223051 4838 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.65:6443: connect: connection refused" logger="UnhandledError" Nov 28 09:57:35 crc kubenswrapper[4838]: I1128 09:57:35.572279 4838 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.65:6443: connect: connection refused - error from a previous attempt: read tcp 38.102.83.65:50518->38.102.83.65:6443: read: connection reset by peer Nov 28 09:57:35 crc kubenswrapper[4838]: E1128 09:57:35.572786 4838 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.65:6443: connect: connection refused - error from a previous attempt: read tcp 38.102.83.65:50668->38.102.83.65:6443: read: connection reset by peer" interval="7s" Nov 28 09:57:35 crc kubenswrapper[4838]: I1128 09:57:35.731659 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver/0.log" Nov 28 09:57:35 crc kubenswrapper[4838]: I1128 09:57:35.732182 4838 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="065f0c39a0f1fbdb83a6a758ddd20a4af3ddf96297ce35551b570f5e6c9deb89" exitCode=1 Nov 28 09:57:35 crc kubenswrapper[4838]: I1128 09:57:35.732231 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"065f0c39a0f1fbdb83a6a758ddd20a4af3ddf96297ce35551b570f5e6c9deb89"} Nov 28 09:57:35 crc kubenswrapper[4838]: I1128 09:57:35.732390 4838 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 09:57:35 crc kubenswrapper[4838]: I1128 09:57:35.732908 4838 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Readiness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" start-of-body= Nov 28 09:57:35 crc kubenswrapper[4838]: I1128 09:57:35.733097 4838 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" Nov 28 09:57:35 crc kubenswrapper[4838]: I1128 09:57:35.733213 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:57:35 crc kubenswrapper[4838]: I1128 09:57:35.733240 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:57:35 crc kubenswrapper[4838]: I1128 09:57:35.733249 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:57:35 crc kubenswrapper[4838]: I1128 09:57:35.733661 4838 scope.go:117] "RemoveContainer" containerID="065f0c39a0f1fbdb83a6a758ddd20a4af3ddf96297ce35551b570f5e6c9deb89" Nov 28 09:57:35 crc kubenswrapper[4838]: I1128 09:57:35.735205 4838 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 09:57:35 crc kubenswrapper[4838]: I1128 09:57:35.736313 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:57:35 crc kubenswrapper[4838]: I1128 09:57:35.736345 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:57:35 crc kubenswrapper[4838]: I1128 09:57:35.736359 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:57:36 crc kubenswrapper[4838]: I1128 09:57:36.489537 4838 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.65:6443: connect: connection refused Nov 28 09:57:36 crc kubenswrapper[4838]: W1128 09:57:36.612188 4838 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.65:6443: connect: connection refused Nov 28 09:57:36 crc kubenswrapper[4838]: E1128 09:57:36.612299 4838 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.65:6443: connect: connection refused" logger="UnhandledError" Nov 28 09:57:37 crc kubenswrapper[4838]: I1128 09:57:37.489508 4838 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.65:6443: connect: connection refused Nov 28 09:57:38 crc kubenswrapper[4838]: I1128 09:57:38.489650 4838 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.65:6443: connect: connection refused Nov 28 09:57:38 crc kubenswrapper[4838]: W1128 09:57:38.521466 4838 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.65:6443: connect: connection refused Nov 28 09:57:38 crc kubenswrapper[4838]: E1128 09:57:38.521570 4838 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.65:6443: connect: connection refused" logger="UnhandledError" Nov 28 09:57:38 crc kubenswrapper[4838]: E1128 09:57:38.636786 4838 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Nov 28 09:57:38 crc kubenswrapper[4838]: I1128 09:57:38.694948 4838 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Liveness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" start-of-body= Nov 28 09:57:38 crc kubenswrapper[4838]: I1128 09:57:38.695014 4838 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" Nov 28 09:57:38 crc kubenswrapper[4838]: I1128 09:57:38.710993 4838 patch_prober.go:28] interesting pod/etcd-crc container/etcd namespace/openshift-etcd: Startup probe status=failure output="Get \"https://192.168.126.11:9980/readyz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 28 09:57:38 crc kubenswrapper[4838]: I1128 09:57:38.711041 4838 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-etcd/etcd-crc" podUID="2139d3e2895fc6797b9c76a1b4c9886d" containerName="etcd" probeResult="failure" output="Get \"https://192.168.126.11:9980/readyz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Nov 28 09:57:38 crc kubenswrapper[4838]: I1128 09:57:38.882192 4838 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Readiness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" start-of-body= Nov 28 09:57:38 crc kubenswrapper[4838]: I1128 09:57:38.882300 4838 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" Nov 28 09:57:38 crc kubenswrapper[4838]: I1128 09:57:38.972115 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 09:57:39 crc kubenswrapper[4838]: W1128 09:57:39.025129 4838 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.65:6443: connect: connection refused Nov 28 09:57:39 crc kubenswrapper[4838]: E1128 09:57:39.025251 4838 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.65:6443: connect: connection refused" logger="UnhandledError" Nov 28 09:57:39 crc kubenswrapper[4838]: E1128 09:57:39.309168 4838 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": dial tcp 38.102.83.65:6443: connect: connection refused" event="&Event{ObjectMeta:{crc.187c232c99b36e15 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-11-28 09:57:08.486598165 +0000 UTC m=+0.185572335,LastTimestamp:2025-11-28 09:57:08.486598165 +0000 UTC m=+0.185572335,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Nov 28 09:57:39 crc kubenswrapper[4838]: I1128 09:57:39.745804 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Nov 28 09:57:39 crc kubenswrapper[4838]: I1128 09:57:39.748055 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver/0.log" Nov 28 09:57:39 crc kubenswrapper[4838]: I1128 09:57:39.748430 4838 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="b70728b7510ae583d35dd0047e99f5221eb6225b5f935ba40e01f02658850900" exitCode=255 Nov 28 09:57:39 crc kubenswrapper[4838]: I1128 09:57:39.748497 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"b70728b7510ae583d35dd0047e99f5221eb6225b5f935ba40e01f02658850900"} Nov 28 09:57:39 crc kubenswrapper[4838]: I1128 09:57:39.748625 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"3a04db28528da269759635186b06952f9a3dc4c2b130458354a5bf9ef994db8b"} Nov 28 09:57:39 crc kubenswrapper[4838]: I1128 09:57:39.748578 4838 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 09:57:39 crc kubenswrapper[4838]: I1128 09:57:39.749831 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:57:39 crc kubenswrapper[4838]: I1128 09:57:39.750046 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:57:39 crc kubenswrapper[4838]: I1128 09:57:39.750064 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:57:39 crc kubenswrapper[4838]: I1128 09:57:39.750882 4838 scope.go:117] "RemoveContainer" containerID="b70728b7510ae583d35dd0047e99f5221eb6225b5f935ba40e01f02658850900" Nov 28 09:57:40 crc kubenswrapper[4838]: I1128 09:57:40.755096 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Nov 28 09:57:40 crc kubenswrapper[4838]: I1128 09:57:40.757375 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver/0.log" Nov 28 09:57:40 crc kubenswrapper[4838]: I1128 09:57:40.758004 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"2ad3223a9346861cf1b27af8c95207349f10af6f416380747e32c4faf1d3add4"} Nov 28 09:57:40 crc kubenswrapper[4838]: I1128 09:57:40.758221 4838 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 09:57:40 crc kubenswrapper[4838]: I1128 09:57:40.759501 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:57:40 crc kubenswrapper[4838]: I1128 09:57:40.759539 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:57:40 crc kubenswrapper[4838]: I1128 09:57:40.759558 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:57:41 crc kubenswrapper[4838]: I1128 09:57:41.572163 4838 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 09:57:41 crc kubenswrapper[4838]: I1128 09:57:41.574105 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:57:41 crc kubenswrapper[4838]: I1128 09:57:41.574178 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:57:41 crc kubenswrapper[4838]: I1128 09:57:41.574199 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:57:41 crc kubenswrapper[4838]: I1128 09:57:41.574257 4838 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 28 09:57:41 crc kubenswrapper[4838]: I1128 09:57:41.760667 4838 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 09:57:41 crc kubenswrapper[4838]: I1128 09:57:41.761984 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 09:57:41 crc kubenswrapper[4838]: I1128 09:57:41.767418 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:57:41 crc kubenswrapper[4838]: I1128 09:57:41.767468 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:57:41 crc kubenswrapper[4838]: I1128 09:57:41.767481 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:57:42 crc kubenswrapper[4838]: I1128 09:57:42.763439 4838 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 09:57:42 crc kubenswrapper[4838]: I1128 09:57:42.764815 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:57:42 crc kubenswrapper[4838]: I1128 09:57:42.764866 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:57:42 crc kubenswrapper[4838]: I1128 09:57:42.764879 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:57:43 crc kubenswrapper[4838]: I1128 09:57:43.972210 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 09:57:43 crc kubenswrapper[4838]: I1128 09:57:43.972463 4838 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 09:57:43 crc kubenswrapper[4838]: I1128 09:57:43.974555 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:57:43 crc kubenswrapper[4838]: I1128 09:57:43.974618 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:57:43 crc kubenswrapper[4838]: I1128 09:57:43.974631 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:57:43 crc kubenswrapper[4838]: I1128 09:57:43.982336 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 09:57:44 crc kubenswrapper[4838]: I1128 09:57:44.264011 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 09:57:44 crc kubenswrapper[4838]: I1128 09:57:44.271303 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 09:57:44 crc kubenswrapper[4838]: I1128 09:57:44.769122 4838 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 09:57:44 crc kubenswrapper[4838]: I1128 09:57:44.770992 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:57:44 crc kubenswrapper[4838]: I1128 09:57:44.771056 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:57:44 crc kubenswrapper[4838]: I1128 09:57:44.771078 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:57:44 crc kubenswrapper[4838]: I1128 09:57:44.991039 4838 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/cluster-policy-controller namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 28 09:57:44 crc kubenswrapper[4838]: I1128 09:57:44.991911 4838 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="cluster-policy-controller" probeResult="failure" output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Nov 28 09:57:44 crc kubenswrapper[4838]: I1128 09:57:44.992052 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 09:57:44 crc kubenswrapper[4838]: I1128 09:57:44.992299 4838 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 09:57:44 crc kubenswrapper[4838]: I1128 09:57:44.993905 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:57:44 crc kubenswrapper[4838]: I1128 09:57:44.993956 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:57:44 crc kubenswrapper[4838]: I1128 09:57:44.993974 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:57:44 crc kubenswrapper[4838]: I1128 09:57:44.994690 4838 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="cluster-policy-controller" containerStatusID={"Type":"cri-o","ID":"426741a23e7b1b9fae6769b581c0d44694f957b0da985476923801395fad082f"} pod="openshift-kube-controller-manager/kube-controller-manager-crc" containerMessage="Container cluster-policy-controller failed startup probe, will be restarted" Nov 28 09:57:44 crc kubenswrapper[4838]: I1128 09:57:44.995002 4838 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="cluster-policy-controller" containerID="cri-o://426741a23e7b1b9fae6769b581c0d44694f957b0da985476923801395fad082f" gracePeriod=30 Nov 28 09:57:45 crc kubenswrapper[4838]: I1128 09:57:45.772448 4838 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 09:57:45 crc kubenswrapper[4838]: I1128 09:57:45.774148 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:57:45 crc kubenswrapper[4838]: I1128 09:57:45.774185 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:57:45 crc kubenswrapper[4838]: I1128 09:57:45.774198 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:57:46 crc kubenswrapper[4838]: I1128 09:57:46.199115 4838 reconstruct.go:205] "DevicePaths of reconstructed volumes updated" Nov 28 09:57:46 crc kubenswrapper[4838]: I1128 09:57:46.759295 4838 kubelet_node_status.go:115] "Node was previously registered" node="crc" Nov 28 09:57:46 crc kubenswrapper[4838]: I1128 09:57:46.759967 4838 kubelet_node_status.go:79] "Successfully registered node" node="crc" Nov 28 09:57:46 crc kubenswrapper[4838]: E1128 09:57:46.760016 4838 kubelet_node_status.go:585] "Error updating node status, will retry" err="error getting node \"crc\": node \"crc\" not found" Nov 28 09:57:46 crc kubenswrapper[4838]: I1128 09:57:46.766162 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:57:46 crc kubenswrapper[4838]: I1128 09:57:46.766238 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:57:46 crc kubenswrapper[4838]: I1128 09:57:46.766263 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:57:46 crc kubenswrapper[4838]: I1128 09:57:46.766297 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:57:46 crc kubenswrapper[4838]: I1128 09:57:46.766324 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:57:46Z","lastTransitionTime":"2025-11-28T09:57:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:57:46 crc kubenswrapper[4838]: I1128 09:57:46.783169 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/cluster-policy-controller/0.log" Nov 28 09:57:46 crc kubenswrapper[4838]: I1128 09:57:46.783917 4838 generic.go:334] "Generic (PLEG): container finished" podID="f614b9022728cf315e60c057852e563e" containerID="426741a23e7b1b9fae6769b581c0d44694f957b0da985476923801395fad082f" exitCode=255 Nov 28 09:57:46 crc kubenswrapper[4838]: I1128 09:57:46.784010 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerDied","Data":"426741a23e7b1b9fae6769b581c0d44694f957b0da985476923801395fad082f"} Nov 28 09:57:46 crc kubenswrapper[4838]: E1128 09:57:46.790968 4838 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:57:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:46Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:57:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:46Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:57:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:46Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:57:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:46Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2d884793-8973-45d8-9335-b721f6accbac\\\",\\\"systemUUID\\\":\\\"e14391b4-beaf-4b9f-9de4-e3bbde3f3327\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:57:46 crc kubenswrapper[4838]: I1128 09:57:46.803209 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:57:46 crc kubenswrapper[4838]: I1128 09:57:46.803627 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:57:46 crc kubenswrapper[4838]: I1128 09:57:46.803921 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:57:46 crc kubenswrapper[4838]: I1128 09:57:46.804100 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:57:46 crc kubenswrapper[4838]: I1128 09:57:46.804255 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:57:46Z","lastTransitionTime":"2025-11-28T09:57:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:57:46 crc kubenswrapper[4838]: E1128 09:57:46.816701 4838 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:57:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:46Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:57:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:46Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:57:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:46Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:57:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:46Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2d884793-8973-45d8-9335-b721f6accbac\\\",\\\"systemUUID\\\":\\\"e14391b4-beaf-4b9f-9de4-e3bbde3f3327\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:57:46 crc kubenswrapper[4838]: I1128 09:57:46.826989 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:57:46 crc kubenswrapper[4838]: I1128 09:57:46.827073 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:57:46 crc kubenswrapper[4838]: I1128 09:57:46.827101 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:57:46 crc kubenswrapper[4838]: I1128 09:57:46.827135 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:57:46 crc kubenswrapper[4838]: I1128 09:57:46.827159 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:57:46Z","lastTransitionTime":"2025-11-28T09:57:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:57:46 crc kubenswrapper[4838]: E1128 09:57:46.846872 4838 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:57:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:46Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:57:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:46Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:57:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:46Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:57:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:46Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2d884793-8973-45d8-9335-b721f6accbac\\\",\\\"systemUUID\\\":\\\"e14391b4-beaf-4b9f-9de4-e3bbde3f3327\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:57:46 crc kubenswrapper[4838]: I1128 09:57:46.856621 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:57:46 crc kubenswrapper[4838]: I1128 09:57:46.856683 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:57:46 crc kubenswrapper[4838]: I1128 09:57:46.856704 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:57:46 crc kubenswrapper[4838]: I1128 09:57:46.856763 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:57:46 crc kubenswrapper[4838]: I1128 09:57:46.856785 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:57:46Z","lastTransitionTime":"2025-11-28T09:57:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:57:46 crc kubenswrapper[4838]: E1128 09:57:46.873498 4838 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:57:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:46Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:57:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:46Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:57:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:46Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:57:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:46Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2d884793-8973-45d8-9335-b721f6accbac\\\",\\\"systemUUID\\\":\\\"e14391b4-beaf-4b9f-9de4-e3bbde3f3327\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:57:46 crc kubenswrapper[4838]: E1128 09:57:46.873757 4838 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 28 09:57:46 crc kubenswrapper[4838]: E1128 09:57:46.873803 4838 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Nov 28 09:57:46 crc kubenswrapper[4838]: E1128 09:57:46.974029 4838 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Nov 28 09:57:47 crc kubenswrapper[4838]: E1128 09:57:47.074835 4838 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Nov 28 09:57:47 crc kubenswrapper[4838]: E1128 09:57:47.175865 4838 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Nov 28 09:57:47 crc kubenswrapper[4838]: E1128 09:57:47.277075 4838 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Nov 28 09:57:47 crc kubenswrapper[4838]: E1128 09:57:47.377376 4838 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Nov 28 09:57:47 crc kubenswrapper[4838]: E1128 09:57:47.478519 4838 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Nov 28 09:57:47 crc kubenswrapper[4838]: E1128 09:57:47.578766 4838 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Nov 28 09:57:47 crc kubenswrapper[4838]: E1128 09:57:47.679696 4838 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Nov 28 09:57:47 crc kubenswrapper[4838]: E1128 09:57:47.780428 4838 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Nov 28 09:57:47 crc kubenswrapper[4838]: E1128 09:57:47.881951 4838 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Nov 28 09:57:47 crc kubenswrapper[4838]: E1128 09:57:47.983223 4838 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Nov 28 09:57:48 crc kubenswrapper[4838]: E1128 09:57:48.084038 4838 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Nov 28 09:57:48 crc kubenswrapper[4838]: E1128 09:57:48.184884 4838 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Nov 28 09:57:48 crc kubenswrapper[4838]: E1128 09:57:48.285863 4838 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Nov 28 09:57:48 crc kubenswrapper[4838]: E1128 09:57:48.386332 4838 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Nov 28 09:57:48 crc kubenswrapper[4838]: E1128 09:57:48.487365 4838 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Nov 28 09:57:48 crc kubenswrapper[4838]: E1128 09:57:48.587804 4838 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Nov 28 09:57:48 crc kubenswrapper[4838]: E1128 09:57:48.637966 4838 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Nov 28 09:57:48 crc kubenswrapper[4838]: I1128 09:57:48.660414 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-etcd/etcd-crc" Nov 28 09:57:48 crc kubenswrapper[4838]: I1128 09:57:48.660862 4838 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 09:57:48 crc kubenswrapper[4838]: I1128 09:57:48.662580 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:57:48 crc kubenswrapper[4838]: I1128 09:57:48.662688 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:57:48 crc kubenswrapper[4838]: I1128 09:57:48.662789 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:57:48 crc kubenswrapper[4838]: I1128 09:57:48.681840 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-etcd/etcd-crc" Nov 28 09:57:48 crc kubenswrapper[4838]: E1128 09:57:48.688741 4838 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Nov 28 09:57:48 crc kubenswrapper[4838]: E1128 09:57:48.788852 4838 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Nov 28 09:57:48 crc kubenswrapper[4838]: I1128 09:57:48.795025 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/cluster-policy-controller/0.log" Nov 28 09:57:48 crc kubenswrapper[4838]: I1128 09:57:48.796159 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"5a39765f9493a3a9454db77d07855624ff5645e9dbd898e6dcb880d7a01a8c42"} Nov 28 09:57:48 crc kubenswrapper[4838]: I1128 09:57:48.796228 4838 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 09:57:48 crc kubenswrapper[4838]: I1128 09:57:48.797688 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:57:48 crc kubenswrapper[4838]: I1128 09:57:48.797746 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:57:48 crc kubenswrapper[4838]: I1128 09:57:48.797761 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:57:48 crc kubenswrapper[4838]: I1128 09:57:48.800389 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/1.log" Nov 28 09:57:48 crc kubenswrapper[4838]: I1128 09:57:48.801489 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Nov 28 09:57:48 crc kubenswrapper[4838]: I1128 09:57:48.803627 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver/0.log" Nov 28 09:57:48 crc kubenswrapper[4838]: I1128 09:57:48.804198 4838 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="2ad3223a9346861cf1b27af8c95207349f10af6f416380747e32c4faf1d3add4" exitCode=255 Nov 28 09:57:48 crc kubenswrapper[4838]: I1128 09:57:48.804234 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"2ad3223a9346861cf1b27af8c95207349f10af6f416380747e32c4faf1d3add4"} Nov 28 09:57:48 crc kubenswrapper[4838]: I1128 09:57:48.804305 4838 scope.go:117] "RemoveContainer" containerID="b70728b7510ae583d35dd0047e99f5221eb6225b5f935ba40e01f02658850900" Nov 28 09:57:48 crc kubenswrapper[4838]: I1128 09:57:48.804341 4838 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 09:57:48 crc kubenswrapper[4838]: I1128 09:57:48.804419 4838 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 09:57:48 crc kubenswrapper[4838]: I1128 09:57:48.806793 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:57:48 crc kubenswrapper[4838]: I1128 09:57:48.806842 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:57:48 crc kubenswrapper[4838]: I1128 09:57:48.806858 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:57:48 crc kubenswrapper[4838]: I1128 09:57:48.807081 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:57:48 crc kubenswrapper[4838]: I1128 09:57:48.807121 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:57:48 crc kubenswrapper[4838]: I1128 09:57:48.807135 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:57:48 crc kubenswrapper[4838]: I1128 09:57:48.809771 4838 scope.go:117] "RemoveContainer" containerID="2ad3223a9346861cf1b27af8c95207349f10af6f416380747e32c4faf1d3add4" Nov 28 09:57:48 crc kubenswrapper[4838]: E1128 09:57:48.810551 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" Nov 28 09:57:48 crc kubenswrapper[4838]: E1128 09:57:48.889275 4838 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Nov 28 09:57:48 crc kubenswrapper[4838]: E1128 09:57:48.989892 4838 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Nov 28 09:57:49 crc kubenswrapper[4838]: E1128 09:57:49.090852 4838 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Nov 28 09:57:49 crc kubenswrapper[4838]: E1128 09:57:49.191552 4838 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Nov 28 09:57:49 crc kubenswrapper[4838]: E1128 09:57:49.292244 4838 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Nov 28 09:57:49 crc kubenswrapper[4838]: I1128 09:57:49.333833 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 09:57:49 crc kubenswrapper[4838]: E1128 09:57:49.392661 4838 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Nov 28 09:57:49 crc kubenswrapper[4838]: E1128 09:57:49.493702 4838 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Nov 28 09:57:49 crc kubenswrapper[4838]: E1128 09:57:49.593802 4838 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Nov 28 09:57:49 crc kubenswrapper[4838]: E1128 09:57:49.694289 4838 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Nov 28 09:57:49 crc kubenswrapper[4838]: E1128 09:57:49.795014 4838 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Nov 28 09:57:49 crc kubenswrapper[4838]: I1128 09:57:49.809146 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/1.log" Nov 28 09:57:49 crc kubenswrapper[4838]: I1128 09:57:49.810998 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver/0.log" Nov 28 09:57:49 crc kubenswrapper[4838]: I1128 09:57:49.811562 4838 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 09:57:49 crc kubenswrapper[4838]: I1128 09:57:49.812372 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:57:49 crc kubenswrapper[4838]: I1128 09:57:49.812405 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:57:49 crc kubenswrapper[4838]: I1128 09:57:49.812415 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:57:49 crc kubenswrapper[4838]: E1128 09:57:49.895491 4838 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Nov 28 09:57:49 crc kubenswrapper[4838]: E1128 09:57:49.996306 4838 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Nov 28 09:57:50 crc kubenswrapper[4838]: E1128 09:57:50.096913 4838 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Nov 28 09:57:50 crc kubenswrapper[4838]: E1128 09:57:50.197675 4838 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Nov 28 09:57:50 crc kubenswrapper[4838]: E1128 09:57:50.299010 4838 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Nov 28 09:57:50 crc kubenswrapper[4838]: E1128 09:57:50.399688 4838 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Nov 28 09:57:50 crc kubenswrapper[4838]: E1128 09:57:50.500533 4838 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Nov 28 09:57:50 crc kubenswrapper[4838]: E1128 09:57:50.600739 4838 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Nov 28 09:57:50 crc kubenswrapper[4838]: E1128 09:57:50.701706 4838 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Nov 28 09:57:50 crc kubenswrapper[4838]: E1128 09:57:50.802372 4838 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Nov 28 09:57:50 crc kubenswrapper[4838]: I1128 09:57:50.814706 4838 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 09:57:50 crc kubenswrapper[4838]: I1128 09:57:50.815498 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:57:50 crc kubenswrapper[4838]: I1128 09:57:50.815544 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:57:50 crc kubenswrapper[4838]: I1128 09:57:50.815559 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:57:50 crc kubenswrapper[4838]: E1128 09:57:50.902638 4838 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Nov 28 09:57:51 crc kubenswrapper[4838]: E1128 09:57:51.002946 4838 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Nov 28 09:57:51 crc kubenswrapper[4838]: E1128 09:57:51.103507 4838 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Nov 28 09:57:51 crc kubenswrapper[4838]: E1128 09:57:51.204092 4838 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Nov 28 09:57:51 crc kubenswrapper[4838]: E1128 09:57:51.305067 4838 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Nov 28 09:57:51 crc kubenswrapper[4838]: E1128 09:57:51.405256 4838 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Nov 28 09:57:51 crc kubenswrapper[4838]: E1128 09:57:51.512801 4838 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Nov 28 09:57:51 crc kubenswrapper[4838]: E1128 09:57:51.613167 4838 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Nov 28 09:57:51 crc kubenswrapper[4838]: E1128 09:57:51.713792 4838 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Nov 28 09:57:51 crc kubenswrapper[4838]: E1128 09:57:51.814769 4838 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Nov 28 09:57:51 crc kubenswrapper[4838]: E1128 09:57:51.915664 4838 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Nov 28 09:57:51 crc kubenswrapper[4838]: I1128 09:57:51.990559 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 09:57:51 crc kubenswrapper[4838]: I1128 09:57:51.991067 4838 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 09:57:51 crc kubenswrapper[4838]: I1128 09:57:51.992920 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:57:51 crc kubenswrapper[4838]: I1128 09:57:51.992996 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:57:51 crc kubenswrapper[4838]: I1128 09:57:51.993026 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:57:51 crc kubenswrapper[4838]: I1128 09:57:51.995184 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 09:57:52 crc kubenswrapper[4838]: E1128 09:57:52.015841 4838 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Nov 28 09:57:52 crc kubenswrapper[4838]: I1128 09:57:52.019397 4838 reflector.go:368] Caches populated for *v1.RuntimeClass from k8s.io/client-go/informers/factory.go:160 Nov 28 09:57:52 crc kubenswrapper[4838]: E1128 09:57:52.116770 4838 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Nov 28 09:57:52 crc kubenswrapper[4838]: E1128 09:57:52.217784 4838 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Nov 28 09:57:52 crc kubenswrapper[4838]: E1128 09:57:52.318608 4838 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Nov 28 09:57:52 crc kubenswrapper[4838]: E1128 09:57:52.419354 4838 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Nov 28 09:57:52 crc kubenswrapper[4838]: E1128 09:57:52.519865 4838 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Nov 28 09:57:52 crc kubenswrapper[4838]: E1128 09:57:52.620796 4838 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Nov 28 09:57:52 crc kubenswrapper[4838]: I1128 09:57:52.655431 4838 reflector.go:368] Caches populated for *v1.Node from k8s.io/client-go/informers/factory.go:160 Nov 28 09:57:52 crc kubenswrapper[4838]: I1128 09:57:52.727165 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:57:52 crc kubenswrapper[4838]: I1128 09:57:52.727210 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:57:52 crc kubenswrapper[4838]: I1128 09:57:52.727221 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:57:52 crc kubenswrapper[4838]: I1128 09:57:52.727241 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:57:52 crc kubenswrapper[4838]: I1128 09:57:52.727252 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:57:52Z","lastTransitionTime":"2025-11-28T09:57:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:57:52 crc kubenswrapper[4838]: I1128 09:57:52.830331 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:57:52 crc kubenswrapper[4838]: I1128 09:57:52.830402 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:57:52 crc kubenswrapper[4838]: I1128 09:57:52.830429 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:57:52 crc kubenswrapper[4838]: I1128 09:57:52.830463 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:57:52 crc kubenswrapper[4838]: I1128 09:57:52.830488 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:57:52Z","lastTransitionTime":"2025-11-28T09:57:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:57:52 crc kubenswrapper[4838]: I1128 09:57:52.932745 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:57:52 crc kubenswrapper[4838]: I1128 09:57:52.932788 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:57:52 crc kubenswrapper[4838]: I1128 09:57:52.932800 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:57:52 crc kubenswrapper[4838]: I1128 09:57:52.932818 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:57:52 crc kubenswrapper[4838]: I1128 09:57:52.932830 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:57:52Z","lastTransitionTime":"2025-11-28T09:57:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.036197 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.036234 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.036244 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.036261 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.036274 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:57:53Z","lastTransitionTime":"2025-11-28T09:57:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.138417 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.138453 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.138466 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.138484 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.138496 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:57:53Z","lastTransitionTime":"2025-11-28T09:57:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.241648 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.241711 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.241761 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.241786 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.241805 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:57:53Z","lastTransitionTime":"2025-11-28T09:57:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.345156 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.345250 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.345276 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.345311 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.345341 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:57:53Z","lastTransitionTime":"2025-11-28T09:57:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.448159 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.448272 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.448291 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.448318 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.448340 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:57:53Z","lastTransitionTime":"2025-11-28T09:57:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.514360 4838 apiserver.go:52] "Watching apiserver" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.518145 4838 reflector.go:368] Caches populated for *v1.Pod from pkg/kubelet/config/apiserver.go:66 Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.518842 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-4rv9b","openshift-multus/multus-additional-cni-plugins-58mh7","openshift-network-operator/iptables-alerter-4ln5h","openshift-network-operator/network-operator-58b4c7f79c-55gtf","openshift-network-diagnostics/network-check-target-xd92c","openshift-network-node-identity/network-node-identity-vrzqb","openshift-ovn-kubernetes/ovnkube-node-gmhsj","openshift-dns/node-resolver-sft2b","openshift-kube-controller-manager/kube-controller-manager-crc","openshift-machine-config-operator/machine-config-daemon-5dxdd","openshift-network-console/networking-console-plugin-85b44fc459-gdk6g","openshift-network-diagnostics/network-check-source-55646444c4-trplf"] Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.519363 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.519684 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.520910 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.521390 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.523135 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"openshift-service-ca.crt" Nov 28 09:57:53 crc kubenswrapper[4838]: E1128 09:57:53.524898 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 09:57:53 crc kubenswrapper[4838]: E1128 09:57:53.524893 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.525250 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.525618 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"kube-root-ca.crt" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.525665 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 09:57:53 crc kubenswrapper[4838]: E1128 09:57:53.525739 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.526375 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-4rv9b" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.526857 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-58mh7" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.527708 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-operator"/"metrics-tls" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.527994 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"iptables-alerter-script" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.528162 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-node-identity"/"network-node-identity-cert" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.528231 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"kube-root-ca.crt" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.528199 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-gmhsj" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.528192 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-sft2b" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.528257 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.528459 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"env-overrides" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.530162 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"ovnkube-identity-cm" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.530360 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"openshift-service-ca.crt" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.532589 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ancillary-tools-dockercfg-vnmsz" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.532847 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"multus-daemon-config" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.534197 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"cni-copy-resources" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.535493 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"env-overrides" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.535536 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"openshift-service-ca.crt" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.535769 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"openshift-service-ca.crt" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.535961 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"openshift-service-ca.crt" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.536073 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"default-cni-sysctl-allowlist" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.536264 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-rbac-proxy" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.536502 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-config" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.536687 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-node-dockercfg-pwtwl" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.536972 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"openshift-service-ca.crt" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.537367 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"kube-root-ca.crt" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.537482 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.538796 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"node-resolver-dockercfg-kz9s7" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.539175 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-node-metrics-cert" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.539587 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"kube-root-ca.crt" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.540098 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"kube-root-ca.crt" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.540132 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-root-ca.crt" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.540459 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"proxy-tls" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.541655 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-script-lib" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.542132 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"default-dockercfg-2q5b6" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.543868 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.551550 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.551584 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.551593 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.551607 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.551619 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:57:53Z","lastTransitionTime":"2025-11-28T09:57:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.557118 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.569035 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.581619 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.596236 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.599961 4838 desired_state_of_world_populator.go:154] "Finished populating initial desired state of world" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.617364 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-58mh7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f556bd7-3b15-4d7d-b8e2-4815bb5c9c7d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-58mh7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.627645 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-sft2b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"200cdb91-cc86-40be-a5b6-30f7b9beba6d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lpt6k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-sft2b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.641458 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.646235 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.646313 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.646362 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.646395 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.646431 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.646466 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.646499 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.646534 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.646587 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.646622 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.646657 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.646691 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.646759 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.646794 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.646826 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.646858 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.646893 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.646931 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.646965 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.646997 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.647033 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.647067 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.647102 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.647135 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.647169 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.647190 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.647207 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.647242 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.647276 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.647315 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" (OuterVolumeSpecName: "service-ca") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.647323 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.647577 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.647625 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.647664 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.647702 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.647759 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.647785 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.647797 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.647835 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" (OuterVolumeSpecName: "client-ca") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.647838 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.647887 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.647908 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.647942 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.647908 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.647967 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.648044 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.648085 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.648130 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.648164 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.647977 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.648244 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" (OuterVolumeSpecName: "config") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.648297 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" (OuterVolumeSpecName: "kube-api-access-cfbct") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "kube-api-access-cfbct". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.650024 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.648364 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" (OuterVolumeSpecName: "config") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.650090 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.648448 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.648493 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" (OuterVolumeSpecName: "config-volume") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.650137 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.650177 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.650215 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.650174 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.650245 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.650281 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.650313 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.650340 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.650413 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" (OuterVolumeSpecName: "cni-sysctl-allowlist") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-sysctl-allowlist". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.650669 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" (OuterVolumeSpecName: "control-plane-machine-set-operator-tls") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "control-plane-machine-set-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.650694 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.650828 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.651008 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.651253 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.651356 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.651448 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.651471 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" (OuterVolumeSpecName: "multus-daemon-config") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "multus-daemon-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.651555 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.651579 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" (OuterVolumeSpecName: "kube-api-access-w7l8j") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "kube-api-access-w7l8j". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.651673 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.651892 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" (OuterVolumeSpecName: "package-server-manager-serving-cert") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "package-server-manager-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.653270 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.653815 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" (OuterVolumeSpecName: "kube-api-access-ngvvp") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "kube-api-access-ngvvp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.654040 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.654041 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" (OuterVolumeSpecName: "default-certificate") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "default-certificate". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.654338 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.654432 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" (OuterVolumeSpecName: "kube-api-access-jkwtn") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "kube-api-access-jkwtn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.654548 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" (OuterVolumeSpecName: "config") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.654834 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" (OuterVolumeSpecName: "machine-approver-tls") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "machine-approver-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.655320 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" (OuterVolumeSpecName: "config") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.656059 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" (OuterVolumeSpecName: "console-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.656110 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" (OuterVolumeSpecName: "apiservice-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "apiservice-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.653907 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.656781 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" (OuterVolumeSpecName: "config") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.656988 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.657028 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.657050 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.657087 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.657107 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:57:53Z","lastTransitionTime":"2025-11-28T09:57:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.657315 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" (OuterVolumeSpecName: "config") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.657435 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" (OuterVolumeSpecName: "client-ca") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.657674 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.657905 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4rv9b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"051f7e1c-2d47-4be9-bbd5-14feec16eb16\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-szqtp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4rv9b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.658367 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" (OuterVolumeSpecName: "kube-api-access-bf2bz") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "kube-api-access-bf2bz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.650371 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.658552 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.658681 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.658745 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.658998 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" (OuterVolumeSpecName: "utilities") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.659096 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.659250 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" (OuterVolumeSpecName: "kube-api-access-7c4vf") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "kube-api-access-7c4vf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.659351 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.659385 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.659460 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.659496 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.659528 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.659401 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" (OuterVolumeSpecName: "kube-api-access-6ccd8") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "kube-api-access-6ccd8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.659539 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.659654 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" (OuterVolumeSpecName: "kube-api-access-sb6h7") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "kube-api-access-sb6h7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.659678 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.659877 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.659917 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.659940 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.659986 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.660011 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.659890 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" (OuterVolumeSpecName: "kube-api-access-pj782") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "kube-api-access-pj782". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.660039 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" (OuterVolumeSpecName: "images") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.660035 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.660125 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.660138 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" (OuterVolumeSpecName: "config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.660173 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.660249 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.661024 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.661086 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.661129 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.661153 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.661184 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.661208 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.661231 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.661256 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.661281 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.661313 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.661341 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.661372 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.661400 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.661423 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.661444 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.661474 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.661496 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.661516 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.661543 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.661569 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.661590 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.661613 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.661635 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.661656 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.661676 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.661708 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.661752 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.661774 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.661795 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.661815 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.661833 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.661853 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.661881 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.661904 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.661930 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.661953 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.661976 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.662000 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.662024 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.662048 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.662071 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.662091 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.662121 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.662144 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.662162 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.662187 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.662208 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.662227 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.662251 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") pod \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\" (UID: \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.662283 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.662302 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.662324 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.662342 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.662361 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.662381 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.662401 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.662483 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.662504 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.662531 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.662553 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.662571 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.662595 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.662616 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.662662 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.662683 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.662706 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.662747 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.662768 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.662791 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.662815 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.662837 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.662858 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.662884 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.662906 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.662931 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.662960 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.662982 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.663000 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.663020 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.663042 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.663066 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") pod \"44663579-783b-4372-86d6-acf235a62d72\" (UID: \"44663579-783b-4372-86d6-acf235a62d72\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.663090 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.663111 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.663132 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.663151 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.663174 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.663196 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.663218 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.663241 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.663265 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.663289 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.663307 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.663331 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.663352 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.663372 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.663394 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.663415 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.663466 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.663485 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") pod \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\" (UID: \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.663507 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.663527 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.663543 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.663570 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.663600 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.663620 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.663642 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.663665 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.663687 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.660478 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.663708 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.663747 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.660775 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.663771 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.661093 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" (OuterVolumeSpecName: "kube-api-access-4d4hj") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "kube-api-access-4d4hj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.663792 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.661281 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" (OuterVolumeSpecName: "kube-api-access-mnrrd") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "kube-api-access-mnrrd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.663814 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.661458 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" (OuterVolumeSpecName: "webhook-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "webhook-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.661458 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.661527 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" (OuterVolumeSpecName: "kube-api-access-pcxfs") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "kube-api-access-pcxfs". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.661750 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" (OuterVolumeSpecName: "kube-api-access-dbsvg") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "kube-api-access-dbsvg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.661834 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.661920 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" (OuterVolumeSpecName: "mcd-auth-proxy-config") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "mcd-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.661913 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.662906 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" (OuterVolumeSpecName: "etcd-service-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.662908 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" (OuterVolumeSpecName: "utilities") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.663178 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" (OuterVolumeSpecName: "samples-operator-tls") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "samples-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.663290 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" (OuterVolumeSpecName: "signing-cabundle") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-cabundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.663682 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.663818 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") pod \"49ef4625-1d3a-4a9f-b595-c2433d32326d\" (UID: \"49ef4625-1d3a-4a9f-b595-c2433d32326d\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.663977 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 28 09:57:53 crc kubenswrapper[4838]: E1128 09:57:53.664121 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 09:57:54.164031663 +0000 UTC m=+45.863005853 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.664484 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.664587 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.664676 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" (OuterVolumeSpecName: "kube-api-access-w9rds") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "kube-api-access-w9rds". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.664807 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" (OuterVolumeSpecName: "kube-api-access-lzf88") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "kube-api-access-lzf88". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.665015 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" (OuterVolumeSpecName: "webhook-certs") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "webhook-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.665095 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" (OuterVolumeSpecName: "kube-api-access-d6qdx") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "kube-api-access-d6qdx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.665244 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" (OuterVolumeSpecName: "kube-api-access-rnphk") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "kube-api-access-rnphk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.665294 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.665600 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" (OuterVolumeSpecName: "kube-api-access-2w9zh") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "kube-api-access-2w9zh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.665944 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.664011 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.666119 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.666165 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.666190 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.666197 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" (OuterVolumeSpecName: "config") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.666213 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.666242 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.666268 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.666291 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.666339 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.666362 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.666513 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" (OuterVolumeSpecName: "stats-auth") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "stats-auth". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.666691 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.667561 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" (OuterVolumeSpecName: "kube-api-access-v47cf") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "kube-api-access-v47cf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.667750 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/3f556bd7-3b15-4d7d-b8e2-4815bb5c9c7d-cni-binary-copy\") pod \"multus-additional-cni-plugins-58mh7\" (UID: \"3f556bd7-3b15-4d7d-b8e2-4815bb5c9c7d\") " pod="openshift-multus/multus-additional-cni-plugins-58mh7" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.667784 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/41b01f7d-5c75-49de-86f7-87e04bf71194-host-cni-bin\") pod \"ovnkube-node-gmhsj\" (UID: \"41b01f7d-5c75-49de-86f7-87e04bf71194\") " pod="openshift-ovn-kubernetes/ovnkube-node-gmhsj" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.667790 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" (OuterVolumeSpecName: "signing-key") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.667818 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.667846 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/051f7e1c-2d47-4be9-bbd5-14feec16eb16-host-run-netns\") pod \"multus-4rv9b\" (UID: \"051f7e1c-2d47-4be9-bbd5-14feec16eb16\") " pod="openshift-multus/multus-4rv9b" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.667883 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" (OuterVolumeSpecName: "image-registry-operator-tls") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "image-registry-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.668081 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" (OuterVolumeSpecName: "kube-api-access-6g6sz") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "kube-api-access-6g6sz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.668282 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.668520 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" (OuterVolumeSpecName: "cert") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.668792 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" (OuterVolumeSpecName: "kube-api-access-nzwt7") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "kube-api-access-nzwt7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.669007 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.669177 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" (OuterVolumeSpecName: "kube-api-access-249nr") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "kube-api-access-249nr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.669440 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" (OuterVolumeSpecName: "kube-api-access-xcphl") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "kube-api-access-xcphl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.669480 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" (OuterVolumeSpecName: "kube-api-access-x7zkh") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "kube-api-access-x7zkh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.669767 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.669872 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.670090 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.670115 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" (OuterVolumeSpecName: "config") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.670333 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" (OuterVolumeSpecName: "kube-api-access-lz9wn") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "kube-api-access-lz9wn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.670777 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.671246 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" (OuterVolumeSpecName: "kube-api-access-x4zgh") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "kube-api-access-x4zgh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.671281 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.671391 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.662237 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.669689 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.664298 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" (OuterVolumeSpecName: "kube-api-access-pjr6v") pod "49ef4625-1d3a-4a9f-b595-c2433d32326d" (UID: "49ef4625-1d3a-4a9f-b595-c2433d32326d"). InnerVolumeSpecName "kube-api-access-pjr6v". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.671702 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.671860 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" (OuterVolumeSpecName: "kube-api-access-htfz6") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "kube-api-access-htfz6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.672114 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/051f7e1c-2d47-4be9-bbd5-14feec16eb16-multus-daemon-config\") pod \"multus-4rv9b\" (UID: \"051f7e1c-2d47-4be9-bbd5-14feec16eb16\") " pod="openshift-multus/multus-4rv9b" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.672297 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" (OuterVolumeSpecName: "kube-api-access-qs4fp") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "kube-api-access-qs4fp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.672308 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.672477 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/41b01f7d-5c75-49de-86f7-87e04bf71194-env-overrides\") pod \"ovnkube-node-gmhsj\" (UID: \"41b01f7d-5c75-49de-86f7-87e04bf71194\") " pod="openshift-ovn-kubernetes/ovnkube-node-gmhsj" Nov 28 09:57:53 crc kubenswrapper[4838]: E1128 09:57:53.672545 4838 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.672860 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.673039 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.673094 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: E1128 09:57:53.673328 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-28 09:57:54.173270477 +0000 UTC m=+45.872244667 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.672649 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/3f556bd7-3b15-4d7d-b8e2-4815bb5c9c7d-os-release\") pod \"multus-additional-cni-plugins-58mh7\" (UID: \"3f556bd7-3b15-4d7d-b8e2-4815bb5c9c7d\") " pod="openshift-multus/multus-additional-cni-plugins-58mh7" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.673665 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" (OuterVolumeSpecName: "kube-api-access-tk88c") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "kube-api-access-tk88c". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.673702 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9dmrk\" (UniqueName: \"kubernetes.io/projected/5c3daa53-8c4e-4e30-aeba-146602dd45cd-kube-api-access-9dmrk\") pod \"machine-config-daemon-5dxdd\" (UID: \"5c3daa53-8c4e-4e30-aeba-146602dd45cd\") " pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.673835 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/051f7e1c-2d47-4be9-bbd5-14feec16eb16-hostroot\") pod \"multus-4rv9b\" (UID: \"051f7e1c-2d47-4be9-bbd5-14feec16eb16\") " pod="openshift-multus/multus-4rv9b" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.673875 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.673916 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/41b01f7d-5c75-49de-86f7-87e04bf71194-host-slash\") pod \"ovnkube-node-gmhsj\" (UID: \"41b01f7d-5c75-49de-86f7-87e04bf71194\") " pod="openshift-ovn-kubernetes/ovnkube-node-gmhsj" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.673961 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/5c3daa53-8c4e-4e30-aeba-146602dd45cd-proxy-tls\") pod \"machine-config-daemon-5dxdd\" (UID: \"5c3daa53-8c4e-4e30-aeba-146602dd45cd\") " pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.674173 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/051f7e1c-2d47-4be9-bbd5-14feec16eb16-multus-socket-dir-parent\") pod \"multus-4rv9b\" (UID: \"051f7e1c-2d47-4be9-bbd5-14feec16eb16\") " pod="openshift-multus/multus-4rv9b" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.674214 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/051f7e1c-2d47-4be9-bbd5-14feec16eb16-host-run-k8s-cni-cncf-io\") pod \"multus-4rv9b\" (UID: \"051f7e1c-2d47-4be9-bbd5-14feec16eb16\") " pod="openshift-multus/multus-4rv9b" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.674256 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/41b01f7d-5c75-49de-86f7-87e04bf71194-var-lib-openvswitch\") pod \"ovnkube-node-gmhsj\" (UID: \"41b01f7d-5c75-49de-86f7-87e04bf71194\") " pod="openshift-ovn-kubernetes/ovnkube-node-gmhsj" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.674311 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.674357 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/41b01f7d-5c75-49de-86f7-87e04bf71194-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-gmhsj\" (UID: \"41b01f7d-5c75-49de-86f7-87e04bf71194\") " pod="openshift-ovn-kubernetes/ovnkube-node-gmhsj" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.674414 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/3f556bd7-3b15-4d7d-b8e2-4815bb5c9c7d-cnibin\") pod \"multus-additional-cni-plugins-58mh7\" (UID: \"3f556bd7-3b15-4d7d-b8e2-4815bb5c9c7d\") " pod="openshift-multus/multus-additional-cni-plugins-58mh7" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.674456 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/41b01f7d-5c75-49de-86f7-87e04bf71194-systemd-units\") pod \"ovnkube-node-gmhsj\" (UID: \"41b01f7d-5c75-49de-86f7-87e04bf71194\") " pod="openshift-ovn-kubernetes/ovnkube-node-gmhsj" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.674497 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/051f7e1c-2d47-4be9-bbd5-14feec16eb16-system-cni-dir\") pod \"multus-4rv9b\" (UID: \"051f7e1c-2d47-4be9-bbd5-14feec16eb16\") " pod="openshift-multus/multus-4rv9b" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.674535 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/41b01f7d-5c75-49de-86f7-87e04bf71194-host-kubelet\") pod \"ovnkube-node-gmhsj\" (UID: \"41b01f7d-5c75-49de-86f7-87e04bf71194\") " pod="openshift-ovn-kubernetes/ovnkube-node-gmhsj" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.674574 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/41b01f7d-5c75-49de-86f7-87e04bf71194-run-systemd\") pod \"ovnkube-node-gmhsj\" (UID: \"41b01f7d-5c75-49de-86f7-87e04bf71194\") " pod="openshift-ovn-kubernetes/ovnkube-node-gmhsj" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.674625 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.674677 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/5c3daa53-8c4e-4e30-aeba-146602dd45cd-rootfs\") pod \"machine-config-daemon-5dxdd\" (UID: \"5c3daa53-8c4e-4e30-aeba-146602dd45cd\") " pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.674749 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/41b01f7d-5c75-49de-86f7-87e04bf71194-host-run-ovn-kubernetes\") pod \"ovnkube-node-gmhsj\" (UID: \"41b01f7d-5c75-49de-86f7-87e04bf71194\") " pod="openshift-ovn-kubernetes/ovnkube-node-gmhsj" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.674792 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/41b01f7d-5c75-49de-86f7-87e04bf71194-host-cni-netd\") pod \"ovnkube-node-gmhsj\" (UID: \"41b01f7d-5c75-49de-86f7-87e04bf71194\") " pod="openshift-ovn-kubernetes/ovnkube-node-gmhsj" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.674824 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/41b01f7d-5c75-49de-86f7-87e04bf71194-ovnkube-config\") pod \"ovnkube-node-gmhsj\" (UID: \"41b01f7d-5c75-49de-86f7-87e04bf71194\") " pod="openshift-ovn-kubernetes/ovnkube-node-gmhsj" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.674864 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-svlft\" (UniqueName: \"kubernetes.io/projected/41b01f7d-5c75-49de-86f7-87e04bf71194-kube-api-access-svlft\") pod \"ovnkube-node-gmhsj\" (UID: \"41b01f7d-5c75-49de-86f7-87e04bf71194\") " pod="openshift-ovn-kubernetes/ovnkube-node-gmhsj" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.674903 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/051f7e1c-2d47-4be9-bbd5-14feec16eb16-etc-kubernetes\") pod \"multus-4rv9b\" (UID: \"051f7e1c-2d47-4be9-bbd5-14feec16eb16\") " pod="openshift-multus/multus-4rv9b" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.674943 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/200cdb91-cc86-40be-a5b6-30f7b9beba6d-hosts-file\") pod \"node-resolver-sft2b\" (UID: \"200cdb91-cc86-40be-a5b6-30f7b9beba6d\") " pod="openshift-dns/node-resolver-sft2b" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.674983 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/41b01f7d-5c75-49de-86f7-87e04bf71194-host-run-netns\") pod \"ovnkube-node-gmhsj\" (UID: \"41b01f7d-5c75-49de-86f7-87e04bf71194\") " pod="openshift-ovn-kubernetes/ovnkube-node-gmhsj" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.675026 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/051f7e1c-2d47-4be9-bbd5-14feec16eb16-host-run-multus-certs\") pod \"multus-4rv9b\" (UID: \"051f7e1c-2d47-4be9-bbd5-14feec16eb16\") " pod="openshift-multus/multus-4rv9b" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.675076 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-szqtp\" (UniqueName: \"kubernetes.io/projected/051f7e1c-2d47-4be9-bbd5-14feec16eb16-kube-api-access-szqtp\") pod \"multus-4rv9b\" (UID: \"051f7e1c-2d47-4be9-bbd5-14feec16eb16\") " pod="openshift-multus/multus-4rv9b" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.675131 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jgs5r\" (UniqueName: \"kubernetes.io/projected/3f556bd7-3b15-4d7d-b8e2-4815bb5c9c7d-kube-api-access-jgs5r\") pod \"multus-additional-cni-plugins-58mh7\" (UID: \"3f556bd7-3b15-4d7d-b8e2-4815bb5c9c7d\") " pod="openshift-multus/multus-additional-cni-plugins-58mh7" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.675183 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.675223 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/41b01f7d-5c75-49de-86f7-87e04bf71194-ovn-node-metrics-cert\") pod \"ovnkube-node-gmhsj\" (UID: \"41b01f7d-5c75-49de-86f7-87e04bf71194\") " pod="openshift-ovn-kubernetes/ovnkube-node-gmhsj" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.675266 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/051f7e1c-2d47-4be9-bbd5-14feec16eb16-host-var-lib-cni-multus\") pod \"multus-4rv9b\" (UID: \"051f7e1c-2d47-4be9-bbd5-14feec16eb16\") " pod="openshift-multus/multus-4rv9b" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.675309 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/051f7e1c-2d47-4be9-bbd5-14feec16eb16-host-var-lib-kubelet\") pod \"multus-4rv9b\" (UID: \"051f7e1c-2d47-4be9-bbd5-14feec16eb16\") " pod="openshift-multus/multus-4rv9b" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.675352 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/41b01f7d-5c75-49de-86f7-87e04bf71194-etc-openvswitch\") pod \"ovnkube-node-gmhsj\" (UID: \"41b01f7d-5c75-49de-86f7-87e04bf71194\") " pod="openshift-ovn-kubernetes/ovnkube-node-gmhsj" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.675390 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.675441 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.675686 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/3f556bd7-3b15-4d7d-b8e2-4815bb5c9c7d-tuning-conf-dir\") pod \"multus-additional-cni-plugins-58mh7\" (UID: \"3f556bd7-3b15-4d7d-b8e2-4815bb5c9c7d\") " pod="openshift-multus/multus-additional-cni-plugins-58mh7" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.673763 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" (OuterVolumeSpecName: "mcc-auth-proxy-config") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "mcc-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.674088 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.674162 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" (OuterVolumeSpecName: "kube-api-access-fcqwp") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "kube-api-access-fcqwp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.675780 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/5c3daa53-8c4e-4e30-aeba-146602dd45cd-mcd-auth-proxy-config\") pod \"machine-config-daemon-5dxdd\" (UID: \"5c3daa53-8c4e-4e30-aeba-146602dd45cd\") " pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.675831 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/051f7e1c-2d47-4be9-bbd5-14feec16eb16-multus-conf-dir\") pod \"multus-4rv9b\" (UID: \"051f7e1c-2d47-4be9-bbd5-14feec16eb16\") " pod="openshift-multus/multus-4rv9b" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.675872 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.675918 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/41b01f7d-5c75-49de-86f7-87e04bf71194-node-log\") pod \"ovnkube-node-gmhsj\" (UID: \"41b01f7d-5c75-49de-86f7-87e04bf71194\") " pod="openshift-ovn-kubernetes/ovnkube-node-gmhsj" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.675958 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/41b01f7d-5c75-49de-86f7-87e04bf71194-ovnkube-script-lib\") pod \"ovnkube-node-gmhsj\" (UID: \"41b01f7d-5c75-49de-86f7-87e04bf71194\") " pod="openshift-ovn-kubernetes/ovnkube-node-gmhsj" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.676002 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/3f556bd7-3b15-4d7d-b8e2-4815bb5c9c7d-system-cni-dir\") pod \"multus-additional-cni-plugins-58mh7\" (UID: \"3f556bd7-3b15-4d7d-b8e2-4815bb5c9c7d\") " pod="openshift-multus/multus-additional-cni-plugins-58mh7" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.676041 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/3f556bd7-3b15-4d7d-b8e2-4815bb5c9c7d-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-58mh7\" (UID: \"3f556bd7-3b15-4d7d-b8e2-4815bb5c9c7d\") " pod="openshift-multus/multus-additional-cni-plugins-58mh7" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.676087 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.676130 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/051f7e1c-2d47-4be9-bbd5-14feec16eb16-multus-cni-dir\") pod \"multus-4rv9b\" (UID: \"051f7e1c-2d47-4be9-bbd5-14feec16eb16\") " pod="openshift-multus/multus-4rv9b" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.676173 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lpt6k\" (UniqueName: \"kubernetes.io/projected/200cdb91-cc86-40be-a5b6-30f7b9beba6d-kube-api-access-lpt6k\") pod \"node-resolver-sft2b\" (UID: \"200cdb91-cc86-40be-a5b6-30f7b9beba6d\") " pod="openshift-dns/node-resolver-sft2b" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.676260 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.677437 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/051f7e1c-2d47-4be9-bbd5-14feec16eb16-cni-binary-copy\") pod \"multus-4rv9b\" (UID: \"051f7e1c-2d47-4be9-bbd5-14feec16eb16\") " pod="openshift-multus/multus-4rv9b" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.677531 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/051f7e1c-2d47-4be9-bbd5-14feec16eb16-host-var-lib-cni-bin\") pod \"multus-4rv9b\" (UID: \"051f7e1c-2d47-4be9-bbd5-14feec16eb16\") " pod="openshift-multus/multus-4rv9b" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.677647 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/41b01f7d-5c75-49de-86f7-87e04bf71194-log-socket\") pod \"ovnkube-node-gmhsj\" (UID: \"41b01f7d-5c75-49de-86f7-87e04bf71194\") " pod="openshift-ovn-kubernetes/ovnkube-node-gmhsj" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.677688 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/051f7e1c-2d47-4be9-bbd5-14feec16eb16-os-release\") pod \"multus-4rv9b\" (UID: \"051f7e1c-2d47-4be9-bbd5-14feec16eb16\") " pod="openshift-multus/multus-4rv9b" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.677737 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/41b01f7d-5c75-49de-86f7-87e04bf71194-run-ovn\") pod \"ovnkube-node-gmhsj\" (UID: \"41b01f7d-5c75-49de-86f7-87e04bf71194\") " pod="openshift-ovn-kubernetes/ovnkube-node-gmhsj" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.677791 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.677829 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.677858 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.677891 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.677927 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/051f7e1c-2d47-4be9-bbd5-14feec16eb16-cnibin\") pod \"multus-4rv9b\" (UID: \"051f7e1c-2d47-4be9-bbd5-14feec16eb16\") " pod="openshift-multus/multus-4rv9b" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.677952 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/41b01f7d-5c75-49de-86f7-87e04bf71194-run-openvswitch\") pod \"ovnkube-node-gmhsj\" (UID: \"41b01f7d-5c75-49de-86f7-87e04bf71194\") " pod="openshift-ovn-kubernetes/ovnkube-node-gmhsj" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.678107 4838 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.678138 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.678156 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.678168 4838 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.678183 4838 reconciler_common.go:293] "Volume detached for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.678194 4838 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.678207 4838 reconciler_common.go:293] "Volume detached for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.678221 4838 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.678235 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.678245 4838 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.678257 4838 reconciler_common.go:293] "Volume detached for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.678268 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.678282 4838 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.678293 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.678303 4838 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.678313 4838 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.678325 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.678337 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.678347 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.678359 4838 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.678370 4838 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.678380 4838 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.678391 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.678404 4838 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.678415 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.678425 4838 reconciler_common.go:293] "Volume detached for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.678437 4838 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.678452 4838 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.678465 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.678476 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.678489 4838 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.678501 4838 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.678512 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.678528 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.678541 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.678553 4838 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.678563 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.678573 4838 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.678586 4838 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.678597 4838 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.679132 4838 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.679153 4838 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.679168 4838 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.679178 4838 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.679188 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.679202 4838 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.679212 4838 reconciler_common.go:293] "Volume detached for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.679221 4838 reconciler_common.go:293] "Volume detached for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.679232 4838 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.679244 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.679255 4838 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.679266 4838 reconciler_common.go:293] "Volume detached for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.679275 4838 reconciler_common.go:293] "Volume detached for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.679287 4838 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.679460 4838 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.679476 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.679489 4838 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.679501 4838 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.679510 4838 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.679520 4838 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.679536 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.679547 4838 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.679559 4838 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.679573 4838 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.679586 4838 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.679597 4838 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.679609 4838 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.679621 4838 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.679634 4838 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.679644 4838 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.679654 4838 reconciler_common.go:293] "Volume detached for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.679666 4838 reconciler_common.go:293] "Volume detached for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.679677 4838 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.679693 4838 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.679703 4838 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.679745 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.679758 4838 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.679771 4838 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.679782 4838 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.679794 4838 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.679806 4838 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.679818 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.679833 4838 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.679843 4838 reconciler_common.go:293] "Volume detached for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.679856 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.679867 4838 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.679880 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.679893 4838 reconciler_common.go:293] "Volume detached for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.679903 4838 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.679913 4838 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.678795 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.681424 4838 reconciler_common.go:293] "Volume detached for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.683437 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.683472 4838 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.683547 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.683584 4838 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.683750 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.683817 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.683836 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.683854 4838 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.674198 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" (OuterVolumeSpecName: "kube-api-access-gf66m") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "kube-api-access-gf66m". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.684002 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" (OuterVolumeSpecName: "config") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.682902 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.674589 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" (OuterVolumeSpecName: "kube-api-access-kfwg7") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "kube-api-access-kfwg7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.674927 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" (OuterVolumeSpecName: "kube-api-access-w4xd4") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "kube-api-access-w4xd4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.675019 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.675190 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" (OuterVolumeSpecName: "kube-api-access-d4lsv") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "kube-api-access-d4lsv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.675346 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" (OuterVolumeSpecName: "kube-api-access-8tdtz") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "kube-api-access-8tdtz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.675522 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" (OuterVolumeSpecName: "kube-api-access-279lb") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "kube-api-access-279lb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.675569 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" (OuterVolumeSpecName: "etcd-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.672646 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" (OuterVolumeSpecName: "kube-api-access-zgdk5") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "kube-api-access-zgdk5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.676468 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" (OuterVolumeSpecName: "kube-api-access-vt5rc") pod "44663579-783b-4372-86d6-acf235a62d72" (UID: "44663579-783b-4372-86d6-acf235a62d72"). InnerVolumeSpecName "kube-api-access-vt5rc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.676821 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" (OuterVolumeSpecName: "kube-api-access-jhbk2") pod "bd23aa5c-e532-4e53-bccf-e79f130c5ae8" (UID: "bd23aa5c-e532-4e53-bccf-e79f130c5ae8"). InnerVolumeSpecName "kube-api-access-jhbk2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.676867 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" (OuterVolumeSpecName: "kube-api-access-x2m85") pod "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" (UID: "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d"). InnerVolumeSpecName "kube-api-access-x2m85". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.676951 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" (OuterVolumeSpecName: "config") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.677061 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" (OuterVolumeSpecName: "certs") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.677160 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.677371 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" (OuterVolumeSpecName: "ovn-control-plane-metrics-cert") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovn-control-plane-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.677824 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.677832 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.677847 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.678221 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" (OuterVolumeSpecName: "images") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.684761 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.678424 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.678433 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" (OuterVolumeSpecName: "kube-api-access-9xfj7") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "kube-api-access-9xfj7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.678775 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" (OuterVolumeSpecName: "config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.678499 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.679322 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" (OuterVolumeSpecName: "config") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.679336 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.679754 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" (OuterVolumeSpecName: "machine-api-operator-tls") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "machine-api-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.679854 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.680614 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" (OuterVolumeSpecName: "audit") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "audit". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.680669 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" (OuterVolumeSpecName: "kube-api-access-wxkg8") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "kube-api-access-wxkg8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.680074 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" (OuterVolumeSpecName: "kube-api-access-xcgwh") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "kube-api-access-xcgwh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.681160 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.681598 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.679656 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.681915 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" (OuterVolumeSpecName: "kube-api-access-mg5zb") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "kube-api-access-mg5zb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.682672 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" (OuterVolumeSpecName: "kube-api-access-fqsjt") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "kube-api-access-fqsjt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.682935 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" (OuterVolumeSpecName: "image-import-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "image-import-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.683018 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.683050 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.683342 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" (OuterVolumeSpecName: "service-ca") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.683374 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.683535 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" (OuterVolumeSpecName: "config") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.683854 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.684666 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5c3daa53-8c4e-4e30-aeba-146602dd45cd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9dmrk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9dmrk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-5dxdd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.684913 4838 swap_util.go:74] "error creating dir to test if tmpfs noswap is enabled. Assuming not supported" mount path="" error="stat /var/lib/kubelet/plugins/kubernetes.io/empty-dir: no such file or directory" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.685107 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.685126 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" (OuterVolumeSpecName: "kube-api-access-s4n52") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "kube-api-access-s4n52". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.685154 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" (OuterVolumeSpecName: "serviceca") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "serviceca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.684155 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: E1128 09:57:53.687469 4838 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.687617 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" (OuterVolumeSpecName: "kube-api-access-zkvpv") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "kube-api-access-zkvpv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.687703 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" (OuterVolumeSpecName: "kube-api-access-qg5z5") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "kube-api-access-qg5z5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: E1128 09:57:53.687749 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-28 09:57:54.18766887 +0000 UTC m=+45.886643080 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.688005 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.688379 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.688837 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" (OuterVolumeSpecName: "utilities") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.689295 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.689312 4838 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.689345 4838 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.689376 4838 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.689392 4838 reconciler_common.go:293] "Volume detached for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.689409 4838 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.689424 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.689444 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.689458 4838 reconciler_common.go:293] "Volume detached for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.689472 4838 reconciler_common.go:293] "Volume detached for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.689485 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.689503 4838 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.689518 4838 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.689552 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.689568 4838 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.689574 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" (OuterVolumeSpecName: "utilities") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.689584 4838 reconciler_common.go:293] "Volume detached for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.689633 4838 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.689700 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" (OuterVolumeSpecName: "config") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.689916 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.690197 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.690352 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" (OuterVolumeSpecName: "tmpfs") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "tmpfs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.690659 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.701668 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.701882 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.702608 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.703087 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.703150 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.703468 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.703504 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: E1128 09:57:53.703733 4838 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 28 09:57:53 crc kubenswrapper[4838]: E1128 09:57:53.703770 4838 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 28 09:57:53 crc kubenswrapper[4838]: E1128 09:57:53.703785 4838 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 09:57:53 crc kubenswrapper[4838]: E1128 09:57:53.703841 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-28 09:57:54.203823348 +0000 UTC m=+45.902797598 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.704172 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.704606 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.704821 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" (OuterVolumeSpecName: "node-bootstrap-token") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "node-bootstrap-token". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.706692 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" (OuterVolumeSpecName: "available-featuregates") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "available-featuregates". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.708249 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.708496 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 28 09:57:53 crc kubenswrapper[4838]: E1128 09:57:53.709918 4838 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 28 09:57:53 crc kubenswrapper[4838]: E1128 09:57:53.709955 4838 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 28 09:57:53 crc kubenswrapper[4838]: E1128 09:57:53.709971 4838 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 09:57:53 crc kubenswrapper[4838]: E1128 09:57:53.710034 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-28 09:57:54.210013195 +0000 UTC m=+45.908987375 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.713227 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.713346 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.713370 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.713407 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.714249 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.714569 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" (OuterVolumeSpecName: "kube-api-access-2d4wz") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "kube-api-access-2d4wz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.716205 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.716550 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42be35de-5c72-4cc2-a5e4-fb7872425cdf\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-policy-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-policy-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5a39765f9493a3a9454db77d07855624ff5645e9dbd898e6dcb880d7a01a8c42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://426741a23e7b1b9fae6769b581c0d44694f957b0da985476923801395fad082f\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T09:57:46Z\\\",\\\"message\\\":\\\"+ timeout 3m /bin/bash -exuo pipefail -c 'while [ -n \\\\\\\"$(ss -Htanop \\\\\\\\( sport = 10357 \\\\\\\\))\\\\\\\" ]; do sleep 1; done'\\\\n++ ss -Htanop '(' sport = 10357 ')'\\\\n+ '[' -n '' ']'\\\\n+ exec cluster-policy-controller start --config=/etc/kubernetes/static-pod-resources/configmaps/cluster-policy-controller-config/config.yaml --kubeconfig=/etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig --namespace=openshift-kube-controller-manager -v=2\\\\nI1128 09:57:12.554377 1 leaderelection.go:121] The leader election gives 4 retries and allows for 30s of clock skew. The kube-apiserver downtime tolerance is 78s. Worst non-graceful lease acquisition is 2m43s. Worst graceful lease acquisition is {26s}.\\\\nI1128 09:57:12.555793 1 observer_polling.go:159] Starting file observer\\\\nI1128 09:57:12.567187 1 builder.go:298] cluster-policy-controller version 4.18.0-202501230001.p0.g5fd8525.assembly.stream.el9-5fd8525-5fd852525909ce6eab52972ba9ce8fcf56528eb9\\\\nI1128 09:57:12.568976 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.crt::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.key\\\\\\\"\\\\nI1128 09:57:45.000857 1 cmd.go:138] Received SIGTERM or SIGINT signal, shutting down controller.\\\\nF1128 09:57:46.203931 1 cmd.go:179] failed checking apiserver connectivity: client rate limiter Wait returned an error: context deadline exceeded\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:10Z\\\"}},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef54c8995d6bb8e077c6d1c3d796b6f2ce88370b6cadc4f040f590760103320b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fab4ce68cf732b3e6b32f68e84805013d646a9cbd6d5c55ea3d2f41a7f9db83d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2da93de71b5896e3c9ba192df2896b946b1376fefe2a87cf21adb87ea665be04\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:08Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.726130 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.726366 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.733039 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.737863 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.742211 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gmhsj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"41b01f7d-5c75-49de-86f7-87e04bf71194\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-gmhsj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.761318 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.761382 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.761407 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.761437 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.761457 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:57:53Z","lastTransitionTime":"2025-11-28T09:57:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.802645 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/051f7e1c-2d47-4be9-bbd5-14feec16eb16-os-release\") pod \"multus-4rv9b\" (UID: \"051f7e1c-2d47-4be9-bbd5-14feec16eb16\") " pod="openshift-multus/multus-4rv9b" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.802736 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/051f7e1c-2d47-4be9-bbd5-14feec16eb16-cni-binary-copy\") pod \"multus-4rv9b\" (UID: \"051f7e1c-2d47-4be9-bbd5-14feec16eb16\") " pod="openshift-multus/multus-4rv9b" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.802769 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/051f7e1c-2d47-4be9-bbd5-14feec16eb16-host-var-lib-cni-bin\") pod \"multus-4rv9b\" (UID: \"051f7e1c-2d47-4be9-bbd5-14feec16eb16\") " pod="openshift-multus/multus-4rv9b" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.802812 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/41b01f7d-5c75-49de-86f7-87e04bf71194-log-socket\") pod \"ovnkube-node-gmhsj\" (UID: \"41b01f7d-5c75-49de-86f7-87e04bf71194\") " pod="openshift-ovn-kubernetes/ovnkube-node-gmhsj" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.802834 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/051f7e1c-2d47-4be9-bbd5-14feec16eb16-cnibin\") pod \"multus-4rv9b\" (UID: \"051f7e1c-2d47-4be9-bbd5-14feec16eb16\") " pod="openshift-multus/multus-4rv9b" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.802856 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/41b01f7d-5c75-49de-86f7-87e04bf71194-run-openvswitch\") pod \"ovnkube-node-gmhsj\" (UID: \"41b01f7d-5c75-49de-86f7-87e04bf71194\") " pod="openshift-ovn-kubernetes/ovnkube-node-gmhsj" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.802875 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/41b01f7d-5c75-49de-86f7-87e04bf71194-run-ovn\") pod \"ovnkube-node-gmhsj\" (UID: \"41b01f7d-5c75-49de-86f7-87e04bf71194\") " pod="openshift-ovn-kubernetes/ovnkube-node-gmhsj" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.802898 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/3f556bd7-3b15-4d7d-b8e2-4815bb5c9c7d-cni-binary-copy\") pod \"multus-additional-cni-plugins-58mh7\" (UID: \"3f556bd7-3b15-4d7d-b8e2-4815bb5c9c7d\") " pod="openshift-multus/multus-additional-cni-plugins-58mh7" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.802921 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/41b01f7d-5c75-49de-86f7-87e04bf71194-host-cni-bin\") pod \"ovnkube-node-gmhsj\" (UID: \"41b01f7d-5c75-49de-86f7-87e04bf71194\") " pod="openshift-ovn-kubernetes/ovnkube-node-gmhsj" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.802943 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/3f556bd7-3b15-4d7d-b8e2-4815bb5c9c7d-os-release\") pod \"multus-additional-cni-plugins-58mh7\" (UID: \"3f556bd7-3b15-4d7d-b8e2-4815bb5c9c7d\") " pod="openshift-multus/multus-additional-cni-plugins-58mh7" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.802936 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/051f7e1c-2d47-4be9-bbd5-14feec16eb16-os-release\") pod \"multus-4rv9b\" (UID: \"051f7e1c-2d47-4be9-bbd5-14feec16eb16\") " pod="openshift-multus/multus-4rv9b" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.802965 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9dmrk\" (UniqueName: \"kubernetes.io/projected/5c3daa53-8c4e-4e30-aeba-146602dd45cd-kube-api-access-9dmrk\") pod \"machine-config-daemon-5dxdd\" (UID: \"5c3daa53-8c4e-4e30-aeba-146602dd45cd\") " pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.803034 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/051f7e1c-2d47-4be9-bbd5-14feec16eb16-host-run-netns\") pod \"multus-4rv9b\" (UID: \"051f7e1c-2d47-4be9-bbd5-14feec16eb16\") " pod="openshift-multus/multus-4rv9b" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.803075 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/051f7e1c-2d47-4be9-bbd5-14feec16eb16-multus-daemon-config\") pod \"multus-4rv9b\" (UID: \"051f7e1c-2d47-4be9-bbd5-14feec16eb16\") " pod="openshift-multus/multus-4rv9b" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.803107 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/41b01f7d-5c75-49de-86f7-87e04bf71194-env-overrides\") pod \"ovnkube-node-gmhsj\" (UID: \"41b01f7d-5c75-49de-86f7-87e04bf71194\") " pod="openshift-ovn-kubernetes/ovnkube-node-gmhsj" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.803197 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/5c3daa53-8c4e-4e30-aeba-146602dd45cd-proxy-tls\") pod \"machine-config-daemon-5dxdd\" (UID: \"5c3daa53-8c4e-4e30-aeba-146602dd45cd\") " pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.803231 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/051f7e1c-2d47-4be9-bbd5-14feec16eb16-multus-socket-dir-parent\") pod \"multus-4rv9b\" (UID: \"051f7e1c-2d47-4be9-bbd5-14feec16eb16\") " pod="openshift-multus/multus-4rv9b" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.803265 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/051f7e1c-2d47-4be9-bbd5-14feec16eb16-host-run-k8s-cni-cncf-io\") pod \"multus-4rv9b\" (UID: \"051f7e1c-2d47-4be9-bbd5-14feec16eb16\") " pod="openshift-multus/multus-4rv9b" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.803306 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/051f7e1c-2d47-4be9-bbd5-14feec16eb16-hostroot\") pod \"multus-4rv9b\" (UID: \"051f7e1c-2d47-4be9-bbd5-14feec16eb16\") " pod="openshift-multus/multus-4rv9b" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.803350 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.803384 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/41b01f7d-5c75-49de-86f7-87e04bf71194-host-slash\") pod \"ovnkube-node-gmhsj\" (UID: \"41b01f7d-5c75-49de-86f7-87e04bf71194\") " pod="openshift-ovn-kubernetes/ovnkube-node-gmhsj" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.803418 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/41b01f7d-5c75-49de-86f7-87e04bf71194-var-lib-openvswitch\") pod \"ovnkube-node-gmhsj\" (UID: \"41b01f7d-5c75-49de-86f7-87e04bf71194\") " pod="openshift-ovn-kubernetes/ovnkube-node-gmhsj" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.803454 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/3f556bd7-3b15-4d7d-b8e2-4815bb5c9c7d-cnibin\") pod \"multus-additional-cni-plugins-58mh7\" (UID: \"3f556bd7-3b15-4d7d-b8e2-4815bb5c9c7d\") " pod="openshift-multus/multus-additional-cni-plugins-58mh7" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.803487 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/41b01f7d-5c75-49de-86f7-87e04bf71194-systemd-units\") pod \"ovnkube-node-gmhsj\" (UID: \"41b01f7d-5c75-49de-86f7-87e04bf71194\") " pod="openshift-ovn-kubernetes/ovnkube-node-gmhsj" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.803520 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/41b01f7d-5c75-49de-86f7-87e04bf71194-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-gmhsj\" (UID: \"41b01f7d-5c75-49de-86f7-87e04bf71194\") " pod="openshift-ovn-kubernetes/ovnkube-node-gmhsj" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.803560 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/051f7e1c-2d47-4be9-bbd5-14feec16eb16-system-cni-dir\") pod \"multus-4rv9b\" (UID: \"051f7e1c-2d47-4be9-bbd5-14feec16eb16\") " pod="openshift-multus/multus-4rv9b" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.803594 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/41b01f7d-5c75-49de-86f7-87e04bf71194-host-kubelet\") pod \"ovnkube-node-gmhsj\" (UID: \"41b01f7d-5c75-49de-86f7-87e04bf71194\") " pod="openshift-ovn-kubernetes/ovnkube-node-gmhsj" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.803629 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/41b01f7d-5c75-49de-86f7-87e04bf71194-run-systemd\") pod \"ovnkube-node-gmhsj\" (UID: \"41b01f7d-5c75-49de-86f7-87e04bf71194\") " pod="openshift-ovn-kubernetes/ovnkube-node-gmhsj" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.803663 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/5c3daa53-8c4e-4e30-aeba-146602dd45cd-rootfs\") pod \"machine-config-daemon-5dxdd\" (UID: \"5c3daa53-8c4e-4e30-aeba-146602dd45cd\") " pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.803694 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/41b01f7d-5c75-49de-86f7-87e04bf71194-host-run-ovn-kubernetes\") pod \"ovnkube-node-gmhsj\" (UID: \"41b01f7d-5c75-49de-86f7-87e04bf71194\") " pod="openshift-ovn-kubernetes/ovnkube-node-gmhsj" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.803778 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/051f7e1c-2d47-4be9-bbd5-14feec16eb16-etc-kubernetes\") pod \"multus-4rv9b\" (UID: \"051f7e1c-2d47-4be9-bbd5-14feec16eb16\") " pod="openshift-multus/multus-4rv9b" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.803814 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/200cdb91-cc86-40be-a5b6-30f7b9beba6d-hosts-file\") pod \"node-resolver-sft2b\" (UID: \"200cdb91-cc86-40be-a5b6-30f7b9beba6d\") " pod="openshift-dns/node-resolver-sft2b" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.803843 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/41b01f7d-5c75-49de-86f7-87e04bf71194-host-run-netns\") pod \"ovnkube-node-gmhsj\" (UID: \"41b01f7d-5c75-49de-86f7-87e04bf71194\") " pod="openshift-ovn-kubernetes/ovnkube-node-gmhsj" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.803874 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/41b01f7d-5c75-49de-86f7-87e04bf71194-host-cni-netd\") pod \"ovnkube-node-gmhsj\" (UID: \"41b01f7d-5c75-49de-86f7-87e04bf71194\") " pod="openshift-ovn-kubernetes/ovnkube-node-gmhsj" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.803903 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/41b01f7d-5c75-49de-86f7-87e04bf71194-ovnkube-config\") pod \"ovnkube-node-gmhsj\" (UID: \"41b01f7d-5c75-49de-86f7-87e04bf71194\") " pod="openshift-ovn-kubernetes/ovnkube-node-gmhsj" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.803945 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-svlft\" (UniqueName: \"kubernetes.io/projected/41b01f7d-5c75-49de-86f7-87e04bf71194-kube-api-access-svlft\") pod \"ovnkube-node-gmhsj\" (UID: \"41b01f7d-5c75-49de-86f7-87e04bf71194\") " pod="openshift-ovn-kubernetes/ovnkube-node-gmhsj" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.803975 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/051f7e1c-2d47-4be9-bbd5-14feec16eb16-host-run-multus-certs\") pod \"multus-4rv9b\" (UID: \"051f7e1c-2d47-4be9-bbd5-14feec16eb16\") " pod="openshift-multus/multus-4rv9b" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.804007 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-szqtp\" (UniqueName: \"kubernetes.io/projected/051f7e1c-2d47-4be9-bbd5-14feec16eb16-kube-api-access-szqtp\") pod \"multus-4rv9b\" (UID: \"051f7e1c-2d47-4be9-bbd5-14feec16eb16\") " pod="openshift-multus/multus-4rv9b" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.804040 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jgs5r\" (UniqueName: \"kubernetes.io/projected/3f556bd7-3b15-4d7d-b8e2-4815bb5c9c7d-kube-api-access-jgs5r\") pod \"multus-additional-cni-plugins-58mh7\" (UID: \"3f556bd7-3b15-4d7d-b8e2-4815bb5c9c7d\") " pod="openshift-multus/multus-additional-cni-plugins-58mh7" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.804127 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/051f7e1c-2d47-4be9-bbd5-14feec16eb16-host-var-lib-cni-bin\") pod \"multus-4rv9b\" (UID: \"051f7e1c-2d47-4be9-bbd5-14feec16eb16\") " pod="openshift-multus/multus-4rv9b" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.804214 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/41b01f7d-5c75-49de-86f7-87e04bf71194-log-socket\") pod \"ovnkube-node-gmhsj\" (UID: \"41b01f7d-5c75-49de-86f7-87e04bf71194\") " pod="openshift-ovn-kubernetes/ovnkube-node-gmhsj" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.804289 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/051f7e1c-2d47-4be9-bbd5-14feec16eb16-cnibin\") pod \"multus-4rv9b\" (UID: \"051f7e1c-2d47-4be9-bbd5-14feec16eb16\") " pod="openshift-multus/multus-4rv9b" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.804352 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/41b01f7d-5c75-49de-86f7-87e04bf71194-run-openvswitch\") pod \"ovnkube-node-gmhsj\" (UID: \"41b01f7d-5c75-49de-86f7-87e04bf71194\") " pod="openshift-ovn-kubernetes/ovnkube-node-gmhsj" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.804399 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/41b01f7d-5c75-49de-86f7-87e04bf71194-run-ovn\") pod \"ovnkube-node-gmhsj\" (UID: \"41b01f7d-5c75-49de-86f7-87e04bf71194\") " pod="openshift-ovn-kubernetes/ovnkube-node-gmhsj" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.804964 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/051f7e1c-2d47-4be9-bbd5-14feec16eb16-host-run-netns\") pod \"multus-4rv9b\" (UID: \"051f7e1c-2d47-4be9-bbd5-14feec16eb16\") " pod="openshift-multus/multus-4rv9b" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.805065 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/41b01f7d-5c75-49de-86f7-87e04bf71194-host-cni-bin\") pod \"ovnkube-node-gmhsj\" (UID: \"41b01f7d-5c75-49de-86f7-87e04bf71194\") " pod="openshift-ovn-kubernetes/ovnkube-node-gmhsj" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.805157 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/41b01f7d-5c75-49de-86f7-87e04bf71194-host-kubelet\") pod \"ovnkube-node-gmhsj\" (UID: \"41b01f7d-5c75-49de-86f7-87e04bf71194\") " pod="openshift-ovn-kubernetes/ovnkube-node-gmhsj" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.805281 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/3f556bd7-3b15-4d7d-b8e2-4815bb5c9c7d-os-release\") pod \"multus-additional-cni-plugins-58mh7\" (UID: \"3f556bd7-3b15-4d7d-b8e2-4815bb5c9c7d\") " pod="openshift-multus/multus-additional-cni-plugins-58mh7" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.805352 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/41b01f7d-5c75-49de-86f7-87e04bf71194-host-run-netns\") pod \"ovnkube-node-gmhsj\" (UID: \"41b01f7d-5c75-49de-86f7-87e04bf71194\") " pod="openshift-ovn-kubernetes/ovnkube-node-gmhsj" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.805386 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/41b01f7d-5c75-49de-86f7-87e04bf71194-host-slash\") pod \"ovnkube-node-gmhsj\" (UID: \"41b01f7d-5c75-49de-86f7-87e04bf71194\") " pod="openshift-ovn-kubernetes/ovnkube-node-gmhsj" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.805464 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/41b01f7d-5c75-49de-86f7-87e04bf71194-var-lib-openvswitch\") pod \"ovnkube-node-gmhsj\" (UID: \"41b01f7d-5c75-49de-86f7-87e04bf71194\") " pod="openshift-ovn-kubernetes/ovnkube-node-gmhsj" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.805491 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/41b01f7d-5c75-49de-86f7-87e04bf71194-run-systemd\") pod \"ovnkube-node-gmhsj\" (UID: \"41b01f7d-5c75-49de-86f7-87e04bf71194\") " pod="openshift-ovn-kubernetes/ovnkube-node-gmhsj" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.805502 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/5c3daa53-8c4e-4e30-aeba-146602dd45cd-rootfs\") pod \"machine-config-daemon-5dxdd\" (UID: \"5c3daa53-8c4e-4e30-aeba-146602dd45cd\") " pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.805562 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/41b01f7d-5c75-49de-86f7-87e04bf71194-host-run-ovn-kubernetes\") pod \"ovnkube-node-gmhsj\" (UID: \"41b01f7d-5c75-49de-86f7-87e04bf71194\") " pod="openshift-ovn-kubernetes/ovnkube-node-gmhsj" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.805565 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/051f7e1c-2d47-4be9-bbd5-14feec16eb16-hostroot\") pod \"multus-4rv9b\" (UID: \"051f7e1c-2d47-4be9-bbd5-14feec16eb16\") " pod="openshift-multus/multus-4rv9b" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.805597 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/41b01f7d-5c75-49de-86f7-87e04bf71194-host-cni-netd\") pod \"ovnkube-node-gmhsj\" (UID: \"41b01f7d-5c75-49de-86f7-87e04bf71194\") " pod="openshift-ovn-kubernetes/ovnkube-node-gmhsj" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.805617 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/41b01f7d-5c75-49de-86f7-87e04bf71194-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-gmhsj\" (UID: \"41b01f7d-5c75-49de-86f7-87e04bf71194\") " pod="openshift-ovn-kubernetes/ovnkube-node-gmhsj" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.805659 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/051f7e1c-2d47-4be9-bbd5-14feec16eb16-host-run-k8s-cni-cncf-io\") pod \"multus-4rv9b\" (UID: \"051f7e1c-2d47-4be9-bbd5-14feec16eb16\") " pod="openshift-multus/multus-4rv9b" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.805643 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/051f7e1c-2d47-4be9-bbd5-14feec16eb16-host-var-lib-cni-multus\") pod \"multus-4rv9b\" (UID: \"051f7e1c-2d47-4be9-bbd5-14feec16eb16\") " pod="openshift-multus/multus-4rv9b" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.805706 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/051f7e1c-2d47-4be9-bbd5-14feec16eb16-host-var-lib-cni-multus\") pod \"multus-4rv9b\" (UID: \"051f7e1c-2d47-4be9-bbd5-14feec16eb16\") " pod="openshift-multus/multus-4rv9b" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.805780 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/051f7e1c-2d47-4be9-bbd5-14feec16eb16-host-var-lib-kubelet\") pod \"multus-4rv9b\" (UID: \"051f7e1c-2d47-4be9-bbd5-14feec16eb16\") " pod="openshift-multus/multus-4rv9b" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.805804 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/051f7e1c-2d47-4be9-bbd5-14feec16eb16-host-run-multus-certs\") pod \"multus-4rv9b\" (UID: \"051f7e1c-2d47-4be9-bbd5-14feec16eb16\") " pod="openshift-multus/multus-4rv9b" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.805813 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/41b01f7d-5c75-49de-86f7-87e04bf71194-ovn-node-metrics-cert\") pod \"ovnkube-node-gmhsj\" (UID: \"41b01f7d-5c75-49de-86f7-87e04bf71194\") " pod="openshift-ovn-kubernetes/ovnkube-node-gmhsj" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.805440 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/3f556bd7-3b15-4d7d-b8e2-4815bb5c9c7d-cnibin\") pod \"multus-additional-cni-plugins-58mh7\" (UID: \"3f556bd7-3b15-4d7d-b8e2-4815bb5c9c7d\") " pod="openshift-multus/multus-additional-cni-plugins-58mh7" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.805843 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/3f556bd7-3b15-4d7d-b8e2-4815bb5c9c7d-tuning-conf-dir\") pod \"multus-additional-cni-plugins-58mh7\" (UID: \"3f556bd7-3b15-4d7d-b8e2-4815bb5c9c7d\") " pod="openshift-multus/multus-additional-cni-plugins-58mh7" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.805875 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/5c3daa53-8c4e-4e30-aeba-146602dd45cd-mcd-auth-proxy-config\") pod \"machine-config-daemon-5dxdd\" (UID: \"5c3daa53-8c4e-4e30-aeba-146602dd45cd\") " pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.805842 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/051f7e1c-2d47-4be9-bbd5-14feec16eb16-host-var-lib-kubelet\") pod \"multus-4rv9b\" (UID: \"051f7e1c-2d47-4be9-bbd5-14feec16eb16\") " pod="openshift-multus/multus-4rv9b" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.805901 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/051f7e1c-2d47-4be9-bbd5-14feec16eb16-multus-conf-dir\") pod \"multus-4rv9b\" (UID: \"051f7e1c-2d47-4be9-bbd5-14feec16eb16\") " pod="openshift-multus/multus-4rv9b" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.805928 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/051f7e1c-2d47-4be9-bbd5-14feec16eb16-multus-conf-dir\") pod \"multus-4rv9b\" (UID: \"051f7e1c-2d47-4be9-bbd5-14feec16eb16\") " pod="openshift-multus/multus-4rv9b" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.805780 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/051f7e1c-2d47-4be9-bbd5-14feec16eb16-system-cni-dir\") pod \"multus-4rv9b\" (UID: \"051f7e1c-2d47-4be9-bbd5-14feec16eb16\") " pod="openshift-multus/multus-4rv9b" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.805954 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/41b01f7d-5c75-49de-86f7-87e04bf71194-etc-openvswitch\") pod \"ovnkube-node-gmhsj\" (UID: \"41b01f7d-5c75-49de-86f7-87e04bf71194\") " pod="openshift-ovn-kubernetes/ovnkube-node-gmhsj" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.805812 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.805980 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.806018 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/41b01f7d-5c75-49de-86f7-87e04bf71194-node-log\") pod \"ovnkube-node-gmhsj\" (UID: \"41b01f7d-5c75-49de-86f7-87e04bf71194\") " pod="openshift-ovn-kubernetes/ovnkube-node-gmhsj" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.806022 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/41b01f7d-5c75-49de-86f7-87e04bf71194-etc-openvswitch\") pod \"ovnkube-node-gmhsj\" (UID: \"41b01f7d-5c75-49de-86f7-87e04bf71194\") " pod="openshift-ovn-kubernetes/ovnkube-node-gmhsj" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.806040 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/41b01f7d-5c75-49de-86f7-87e04bf71194-ovnkube-script-lib\") pod \"ovnkube-node-gmhsj\" (UID: \"41b01f7d-5c75-49de-86f7-87e04bf71194\") " pod="openshift-ovn-kubernetes/ovnkube-node-gmhsj" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.806063 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lpt6k\" (UniqueName: \"kubernetes.io/projected/200cdb91-cc86-40be-a5b6-30f7b9beba6d-kube-api-access-lpt6k\") pod \"node-resolver-sft2b\" (UID: \"200cdb91-cc86-40be-a5b6-30f7b9beba6d\") " pod="openshift-dns/node-resolver-sft2b" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.806090 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/3f556bd7-3b15-4d7d-b8e2-4815bb5c9c7d-system-cni-dir\") pod \"multus-additional-cni-plugins-58mh7\" (UID: \"3f556bd7-3b15-4d7d-b8e2-4815bb5c9c7d\") " pod="openshift-multus/multus-additional-cni-plugins-58mh7" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.806101 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.806112 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/3f556bd7-3b15-4d7d-b8e2-4815bb5c9c7d-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-58mh7\" (UID: \"3f556bd7-3b15-4d7d-b8e2-4815bb5c9c7d\") " pod="openshift-multus/multus-additional-cni-plugins-58mh7" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.806150 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/051f7e1c-2d47-4be9-bbd5-14feec16eb16-multus-cni-dir\") pod \"multus-4rv9b\" (UID: \"051f7e1c-2d47-4be9-bbd5-14feec16eb16\") " pod="openshift-multus/multus-4rv9b" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.806153 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/200cdb91-cc86-40be-a5b6-30f7b9beba6d-hosts-file\") pod \"node-resolver-sft2b\" (UID: \"200cdb91-cc86-40be-a5b6-30f7b9beba6d\") " pod="openshift-dns/node-resolver-sft2b" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.805370 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/41b01f7d-5c75-49de-86f7-87e04bf71194-systemd-units\") pod \"ovnkube-node-gmhsj\" (UID: \"41b01f7d-5c75-49de-86f7-87e04bf71194\") " pod="openshift-ovn-kubernetes/ovnkube-node-gmhsj" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.806208 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/41b01f7d-5c75-49de-86f7-87e04bf71194-node-log\") pod \"ovnkube-node-gmhsj\" (UID: \"41b01f7d-5c75-49de-86f7-87e04bf71194\") " pod="openshift-ovn-kubernetes/ovnkube-node-gmhsj" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.806353 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/3f556bd7-3b15-4d7d-b8e2-4815bb5c9c7d-tuning-conf-dir\") pod \"multus-additional-cni-plugins-58mh7\" (UID: \"3f556bd7-3b15-4d7d-b8e2-4815bb5c9c7d\") " pod="openshift-multus/multus-additional-cni-plugins-58mh7" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.806396 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/051f7e1c-2d47-4be9-bbd5-14feec16eb16-multus-cni-dir\") pod \"multus-4rv9b\" (UID: \"051f7e1c-2d47-4be9-bbd5-14feec16eb16\") " pod="openshift-multus/multus-4rv9b" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.806385 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/3f556bd7-3b15-4d7d-b8e2-4815bb5c9c7d-cni-binary-copy\") pod \"multus-additional-cni-plugins-58mh7\" (UID: \"3f556bd7-3b15-4d7d-b8e2-4815bb5c9c7d\") " pod="openshift-multus/multus-additional-cni-plugins-58mh7" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.806590 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/051f7e1c-2d47-4be9-bbd5-14feec16eb16-cni-binary-copy\") pod \"multus-4rv9b\" (UID: \"051f7e1c-2d47-4be9-bbd5-14feec16eb16\") " pod="openshift-multus/multus-4rv9b" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.806639 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/051f7e1c-2d47-4be9-bbd5-14feec16eb16-multus-socket-dir-parent\") pod \"multus-4rv9b\" (UID: \"051f7e1c-2d47-4be9-bbd5-14feec16eb16\") " pod="openshift-multus/multus-4rv9b" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.806963 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/41b01f7d-5c75-49de-86f7-87e04bf71194-ovnkube-config\") pod \"ovnkube-node-gmhsj\" (UID: \"41b01f7d-5c75-49de-86f7-87e04bf71194\") " pod="openshift-ovn-kubernetes/ovnkube-node-gmhsj" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.807382 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/5c3daa53-8c4e-4e30-aeba-146602dd45cd-mcd-auth-proxy-config\") pod \"machine-config-daemon-5dxdd\" (UID: \"5c3daa53-8c4e-4e30-aeba-146602dd45cd\") " pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.807666 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/3f556bd7-3b15-4d7d-b8e2-4815bb5c9c7d-system-cni-dir\") pod \"multus-additional-cni-plugins-58mh7\" (UID: \"3f556bd7-3b15-4d7d-b8e2-4815bb5c9c7d\") " pod="openshift-multus/multus-additional-cni-plugins-58mh7" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.808084 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/41b01f7d-5c75-49de-86f7-87e04bf71194-ovnkube-script-lib\") pod \"ovnkube-node-gmhsj\" (UID: \"41b01f7d-5c75-49de-86f7-87e04bf71194\") " pod="openshift-ovn-kubernetes/ovnkube-node-gmhsj" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.808321 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/051f7e1c-2d47-4be9-bbd5-14feec16eb16-etc-kubernetes\") pod \"multus-4rv9b\" (UID: \"051f7e1c-2d47-4be9-bbd5-14feec16eb16\") " pod="openshift-multus/multus-4rv9b" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.808544 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/3f556bd7-3b15-4d7d-b8e2-4815bb5c9c7d-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-58mh7\" (UID: \"3f556bd7-3b15-4d7d-b8e2-4815bb5c9c7d\") " pod="openshift-multus/multus-additional-cni-plugins-58mh7" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.809207 4838 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.809298 4838 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.809322 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.809342 4838 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.809391 4838 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.809537 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.809556 4838 reconciler_common.go:293] "Volume detached for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.809577 4838 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.809594 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.809613 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.809632 4838 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.809652 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.809711 4838 reconciler_common.go:293] "Volume detached for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.809759 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.809776 4838 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.809793 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.809812 4838 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.809829 4838 reconciler_common.go:293] "Volume detached for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.809846 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.809865 4838 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.809886 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.810220 4838 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.810378 4838 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.810402 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.810419 4838 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.810435 4838 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.810452 4838 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.810470 4838 reconciler_common.go:293] "Volume detached for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.810488 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.810508 4838 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.810528 4838 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.810544 4838 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.810561 4838 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.810577 4838 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.810593 4838 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.810610 4838 reconciler_common.go:293] "Volume detached for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.810628 4838 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.810646 4838 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.810701 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.810745 4838 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.810762 4838 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.810779 4838 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.810895 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.810913 4838 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.810981 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.811048 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.811067 4838 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.811118 4838 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.811139 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.811192 4838 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.811212 4838 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.811229 4838 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.811246 4838 reconciler_common.go:293] "Volume detached for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.811262 4838 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.811279 4838 reconciler_common.go:293] "Volume detached for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.811295 4838 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.819801 4838 reconciler_common.go:293] "Volume detached for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.810945 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/41b01f7d-5c75-49de-86f7-87e04bf71194-env-overrides\") pod \"ovnkube-node-gmhsj\" (UID: \"41b01f7d-5c75-49de-86f7-87e04bf71194\") " pod="openshift-ovn-kubernetes/ovnkube-node-gmhsj" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.819837 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.819933 4838 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.819960 4838 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.819979 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.819997 4838 reconciler_common.go:293] "Volume detached for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.820032 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.820054 4838 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.820073 4838 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.820091 4838 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.820113 4838 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.820132 4838 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.820153 4838 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.820194 4838 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.820212 4838 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.820228 4838 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.820249 4838 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.820269 4838 reconciler_common.go:293] "Volume detached for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.820287 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.820305 4838 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.820323 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.820340 4838 reconciler_common.go:293] "Volume detached for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.820358 4838 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.813815 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/051f7e1c-2d47-4be9-bbd5-14feec16eb16-multus-daemon-config\") pod \"multus-4rv9b\" (UID: \"051f7e1c-2d47-4be9-bbd5-14feec16eb16\") " pod="openshift-multus/multus-4rv9b" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.822837 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/5c3daa53-8c4e-4e30-aeba-146602dd45cd-proxy-tls\") pod \"machine-config-daemon-5dxdd\" (UID: \"5c3daa53-8c4e-4e30-aeba-146602dd45cd\") " pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.823371 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/41b01f7d-5c75-49de-86f7-87e04bf71194-ovn-node-metrics-cert\") pod \"ovnkube-node-gmhsj\" (UID: \"41b01f7d-5c75-49de-86f7-87e04bf71194\") " pod="openshift-ovn-kubernetes/ovnkube-node-gmhsj" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.826524 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jgs5r\" (UniqueName: \"kubernetes.io/projected/3f556bd7-3b15-4d7d-b8e2-4815bb5c9c7d-kube-api-access-jgs5r\") pod \"multus-additional-cni-plugins-58mh7\" (UID: \"3f556bd7-3b15-4d7d-b8e2-4815bb5c9c7d\") " pod="openshift-multus/multus-additional-cni-plugins-58mh7" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.827278 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-svlft\" (UniqueName: \"kubernetes.io/projected/41b01f7d-5c75-49de-86f7-87e04bf71194-kube-api-access-svlft\") pod \"ovnkube-node-gmhsj\" (UID: \"41b01f7d-5c75-49de-86f7-87e04bf71194\") " pod="openshift-ovn-kubernetes/ovnkube-node-gmhsj" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.828990 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9dmrk\" (UniqueName: \"kubernetes.io/projected/5c3daa53-8c4e-4e30-aeba-146602dd45cd-kube-api-access-9dmrk\") pod \"machine-config-daemon-5dxdd\" (UID: \"5c3daa53-8c4e-4e30-aeba-146602dd45cd\") " pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.832835 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-szqtp\" (UniqueName: \"kubernetes.io/projected/051f7e1c-2d47-4be9-bbd5-14feec16eb16-kube-api-access-szqtp\") pod \"multus-4rv9b\" (UID: \"051f7e1c-2d47-4be9-bbd5-14feec16eb16\") " pod="openshift-multus/multus-4rv9b" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.836550 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lpt6k\" (UniqueName: \"kubernetes.io/projected/200cdb91-cc86-40be-a5b6-30f7b9beba6d-kube-api-access-lpt6k\") pod \"node-resolver-sft2b\" (UID: \"200cdb91-cc86-40be-a5b6-30f7b9beba6d\") " pod="openshift-dns/node-resolver-sft2b" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.861566 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.869233 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.869275 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.869285 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.869305 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.869350 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:57:53Z","lastTransitionTime":"2025-11-28T09:57:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.873385 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.886030 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 28 09:57:53 crc kubenswrapper[4838]: W1128 09:57:53.886412 4838 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podef543e1b_8068_4ea3_b32a_61027b32e95d.slice/crio-d508fda60d2e79736db1f6ae125183cc098bc6f6a77000f8ccd8b629a31ed093 WatchSource:0}: Error finding container d508fda60d2e79736db1f6ae125183cc098bc6f6a77000f8ccd8b629a31ed093: Status 404 returned error can't find the container with id d508fda60d2e79736db1f6ae125183cc098bc6f6a77000f8ccd8b629a31ed093 Nov 28 09:57:53 crc kubenswrapper[4838]: E1128 09:57:53.890244 4838 kuberuntime_manager.go:1274] "Unhandled Error" err=< Nov 28 09:57:53 crc kubenswrapper[4838]: container &Container{Name:network-operator,Image:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b,Command:[/bin/bash -c #!/bin/bash Nov 28 09:57:53 crc kubenswrapper[4838]: set -o allexport Nov 28 09:57:53 crc kubenswrapper[4838]: if [[ -f /etc/kubernetes/apiserver-url.env ]]; then Nov 28 09:57:53 crc kubenswrapper[4838]: source /etc/kubernetes/apiserver-url.env Nov 28 09:57:53 crc kubenswrapper[4838]: else Nov 28 09:57:53 crc kubenswrapper[4838]: echo "Error: /etc/kubernetes/apiserver-url.env is missing" Nov 28 09:57:53 crc kubenswrapper[4838]: exit 1 Nov 28 09:57:53 crc kubenswrapper[4838]: fi Nov 28 09:57:53 crc kubenswrapper[4838]: exec /usr/bin/cluster-network-operator start --listen=0.0.0.0:9104 Nov 28 09:57:53 crc kubenswrapper[4838]: ],Args:[],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:cno,HostPort:9104,ContainerPort:9104,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:RELEASE_VERSION,Value:4.18.1,ValueFrom:nil,},EnvVar{Name:KUBE_PROXY_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b97554198294bf544fbc116c94a0a1fb2ec8a4de0e926bf9d9e320135f0bee6f,ValueFrom:nil,},EnvVar{Name:KUBE_RBAC_PROXY_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09,ValueFrom:nil,},EnvVar{Name:MULTUS_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26,ValueFrom:nil,},EnvVar{Name:MULTUS_ADMISSION_CONTROLLER_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317,ValueFrom:nil,},EnvVar{Name:CNI_PLUGINS_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc,ValueFrom:nil,},EnvVar{Name:BOND_CNI_PLUGIN_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78,ValueFrom:nil,},EnvVar{Name:WHEREABOUTS_CNI_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4,ValueFrom:nil,},EnvVar{Name:ROUTE_OVERRRIDE_CNI_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa,ValueFrom:nil,},EnvVar{Name:MULTUS_NETWORKPOLICY_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:23f833d3738d68706eb2f2868bd76bd71cee016cffa6faf5f045a60cc8c6eddd,ValueFrom:nil,},EnvVar{Name:OVN_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2,ValueFrom:nil,},EnvVar{Name:OVN_NB_RAFT_ELECTION_TIMER,Value:10,ValueFrom:nil,},EnvVar{Name:OVN_SB_RAFT_ELECTION_TIMER,Value:16,ValueFrom:nil,},EnvVar{Name:OVN_NORTHD_PROBE_INTERVAL,Value:10000,ValueFrom:nil,},EnvVar{Name:OVN_CONTROLLER_INACTIVITY_PROBE,Value:180000,ValueFrom:nil,},EnvVar{Name:OVN_NB_INACTIVITY_PROBE,Value:60000,ValueFrom:nil,},EnvVar{Name:EGRESS_ROUTER_CNI_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c,ValueFrom:nil,},EnvVar{Name:NETWORK_METRICS_DAEMON_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d,ValueFrom:nil,},EnvVar{Name:NETWORK_CHECK_SOURCE_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b,ValueFrom:nil,},EnvVar{Name:NETWORK_CHECK_TARGET_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b,ValueFrom:nil,},EnvVar{Name:NETWORK_OPERATOR_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b,ValueFrom:nil,},EnvVar{Name:CLOUD_NETWORK_CONFIG_CONTROLLER_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8048f1cb0be521f09749c0a489503cd56d85b68c6ca93380e082cfd693cd97a8,ValueFrom:nil,},EnvVar{Name:CLI_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2,ValueFrom:nil,},EnvVar{Name:FRR_K8S_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5dbf844e49bb46b78586930149e5e5f5dc121014c8afd10fe36f3651967cc256,ValueFrom:nil,},EnvVar{Name:NETWORKING_CONSOLE_PLUGIN_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd,ValueFrom:nil,},EnvVar{Name:POD_NAME,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.name,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{52428800 0} {} 50Mi BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:host-etc-kube,ReadOnly:true,MountPath:/etc/kubernetes,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:metrics-tls,ReadOnly:false,MountPath:/var/run/secrets/serving-cert,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-rdwmf,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:nil,Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod network-operator-58b4c7f79c-55gtf_openshift-network-operator(37a5e44f-9a88-4405-be8a-b645485e7312): CreateContainerConfigError: services have not yet been read at least once, cannot construct envvars Nov 28 09:57:53 crc kubenswrapper[4838]: > logger="UnhandledError" Nov 28 09:57:53 crc kubenswrapper[4838]: E1128 09:57:53.891524 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"network-operator\" with CreateContainerConfigError: \"services have not yet been read at least once, cannot construct envvars\"" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" podUID="37a5e44f-9a88-4405-be8a-b645485e7312" Nov 28 09:57:53 crc kubenswrapper[4838]: E1128 09:57:53.893044 4838 kuberuntime_manager.go:1274] "Unhandled Error" err=< Nov 28 09:57:53 crc kubenswrapper[4838]: container &Container{Name:webhook,Image:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2,Command:[/bin/bash -c set -xe Nov 28 09:57:53 crc kubenswrapper[4838]: if [[ -f "/env/_master" ]]; then Nov 28 09:57:53 crc kubenswrapper[4838]: set -o allexport Nov 28 09:57:53 crc kubenswrapper[4838]: source "/env/_master" Nov 28 09:57:53 crc kubenswrapper[4838]: set +o allexport Nov 28 09:57:53 crc kubenswrapper[4838]: fi Nov 28 09:57:53 crc kubenswrapper[4838]: # OVN-K will try to remove hybrid overlay node annotations even when the hybrid overlay is not enabled. Nov 28 09:57:53 crc kubenswrapper[4838]: # https://github.com/ovn-org/ovn-kubernetes/blob/ac6820df0b338a246f10f412cd5ec903bd234694/go-controller/pkg/ovn/master.go#L791 Nov 28 09:57:53 crc kubenswrapper[4838]: ho_enable="--enable-hybrid-overlay" Nov 28 09:57:53 crc kubenswrapper[4838]: echo "I$(date "+%m%d %H:%M:%S.%N") - network-node-identity - start webhook" Nov 28 09:57:53 crc kubenswrapper[4838]: # extra-allowed-user: service account `ovn-kubernetes-control-plane` Nov 28 09:57:53 crc kubenswrapper[4838]: # sets pod annotations in multi-homing layer3 network controller (cluster-manager) Nov 28 09:57:53 crc kubenswrapper[4838]: exec /usr/bin/ovnkube-identity --k8s-apiserver=https://api-int.crc.testing:6443 \ Nov 28 09:57:53 crc kubenswrapper[4838]: --webhook-cert-dir="/etc/webhook-cert" \ Nov 28 09:57:53 crc kubenswrapper[4838]: --webhook-host=127.0.0.1 \ Nov 28 09:57:53 crc kubenswrapper[4838]: --webhook-port=9743 \ Nov 28 09:57:53 crc kubenswrapper[4838]: ${ho_enable} \ Nov 28 09:57:53 crc kubenswrapper[4838]: --enable-interconnect \ Nov 28 09:57:53 crc kubenswrapper[4838]: --disable-approver \ Nov 28 09:57:53 crc kubenswrapper[4838]: --extra-allowed-user="system:serviceaccount:openshift-ovn-kubernetes:ovn-kubernetes-control-plane" \ Nov 28 09:57:53 crc kubenswrapper[4838]: --wait-for-kubernetes-api=200s \ Nov 28 09:57:53 crc kubenswrapper[4838]: --pod-admission-conditions="/var/run/ovnkube-identity-config/additional-pod-admission-cond.json" \ Nov 28 09:57:53 crc kubenswrapper[4838]: --loglevel="${LOGLEVEL}" Nov 28 09:57:53 crc kubenswrapper[4838]: ],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LOGLEVEL,Value:2,ValueFrom:nil,},EnvVar{Name:KUBERNETES_NODE_NAME,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:spec.nodeName,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{52428800 0} {} 50Mi BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:webhook-cert,ReadOnly:false,MountPath:/etc/webhook-cert/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:env-overrides,ReadOnly:false,MountPath:/env,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ovnkube-identity-cm,ReadOnly:false,MountPath:/var/run/ovnkube-identity-config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-s2kz5,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000470000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod network-node-identity-vrzqb_openshift-network-node-identity(ef543e1b-8068-4ea3-b32a-61027b32e95d): CreateContainerConfigError: services have not yet been read at least once, cannot construct envvars Nov 28 09:57:53 crc kubenswrapper[4838]: > logger="UnhandledError" Nov 28 09:57:53 crc kubenswrapper[4838]: E1128 09:57:53.897085 4838 kuberuntime_manager.go:1274] "Unhandled Error" err=< Nov 28 09:57:53 crc kubenswrapper[4838]: container &Container{Name:approver,Image:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2,Command:[/bin/bash -c set -xe Nov 28 09:57:53 crc kubenswrapper[4838]: if [[ -f "/env/_master" ]]; then Nov 28 09:57:53 crc kubenswrapper[4838]: set -o allexport Nov 28 09:57:53 crc kubenswrapper[4838]: source "/env/_master" Nov 28 09:57:53 crc kubenswrapper[4838]: set +o allexport Nov 28 09:57:53 crc kubenswrapper[4838]: fi Nov 28 09:57:53 crc kubenswrapper[4838]: Nov 28 09:57:53 crc kubenswrapper[4838]: echo "I$(date "+%m%d %H:%M:%S.%N") - network-node-identity - start approver" Nov 28 09:57:53 crc kubenswrapper[4838]: exec /usr/bin/ovnkube-identity --k8s-apiserver=https://api-int.crc.testing:6443 \ Nov 28 09:57:53 crc kubenswrapper[4838]: --disable-webhook \ Nov 28 09:57:53 crc kubenswrapper[4838]: --csr-acceptance-conditions="/var/run/ovnkube-identity-config/additional-cert-acceptance-cond.json" \ Nov 28 09:57:53 crc kubenswrapper[4838]: --loglevel="${LOGLEVEL}" Nov 28 09:57:53 crc kubenswrapper[4838]: ],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LOGLEVEL,Value:4,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{52428800 0} {} 50Mi BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:env-overrides,ReadOnly:false,MountPath:/env,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ovnkube-identity-cm,ReadOnly:false,MountPath:/var/run/ovnkube-identity-config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-s2kz5,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000470000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod network-node-identity-vrzqb_openshift-network-node-identity(ef543e1b-8068-4ea3-b32a-61027b32e95d): CreateContainerConfigError: services have not yet been read at least once, cannot construct envvars Nov 28 09:57:53 crc kubenswrapper[4838]: > logger="UnhandledError" Nov 28 09:57:53 crc kubenswrapper[4838]: E1128 09:57:53.899154 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"webhook\" with CreateContainerConfigError: \"services have not yet been read at least once, cannot construct envvars\", failed to \"StartContainer\" for \"approver\" with CreateContainerConfigError: \"services have not yet been read at least once, cannot construct envvars\"]" pod="openshift-network-node-identity/network-node-identity-vrzqb" podUID="ef543e1b-8068-4ea3-b32a-61027b32e95d" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.901800 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-58mh7" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.907596 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-4rv9b" Nov 28 09:57:53 crc kubenswrapper[4838]: W1128 09:57:53.907770 4838 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd75a4c96_2883_4a0b_bab2_0fab2b6c0b49.slice/crio-7fbae0872f24d3d63fb64f6ae19fb61a045c927cc3dcb5cc6bd2f95c17302372 WatchSource:0}: Error finding container 7fbae0872f24d3d63fb64f6ae19fb61a045c927cc3dcb5cc6bd2f95c17302372: Status 404 returned error can't find the container with id 7fbae0872f24d3d63fb64f6ae19fb61a045c927cc3dcb5cc6bd2f95c17302372 Nov 28 09:57:53 crc kubenswrapper[4838]: E1128 09:57:53.913076 4838 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:iptables-alerter,Image:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2,Command:[/iptables-alerter/iptables-alerter.sh],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONTAINER_RUNTIME_ENDPOINT,Value:unix:///run/crio/crio.sock,ValueFrom:nil,},EnvVar{Name:ALERTER_POD_NAME,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.name,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{68157440 0} {} 65Mi BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:iptables-alerter-script,ReadOnly:false,MountPath:/iptables-alerter,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:host-slash,ReadOnly:true,MountPath:/host,SubPath:,MountPropagation:*HostToContainer,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-rczfb,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:*true,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod iptables-alerter-4ln5h_openshift-network-operator(d75a4c96-2883-4a0b-bab2-0fab2b6c0b49): CreateContainerConfigError: services have not yet been read at least once, cannot construct envvars" logger="UnhandledError" Nov 28 09:57:53 crc kubenswrapper[4838]: E1128 09:57:53.914281 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"iptables-alerter\" with CreateContainerConfigError: \"services have not yet been read at least once, cannot construct envvars\"" pod="openshift-network-operator/iptables-alerter-4ln5h" podUID="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.919875 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-sft2b" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.932675 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-gmhsj" Nov 28 09:57:53 crc kubenswrapper[4838]: W1128 09:57:53.933687 4838 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod051f7e1c_2d47_4be9_bbd5_14feec16eb16.slice/crio-00a570b7f457c3e2d9db317ce9eb85785dce58f444ad4a4d33782f9f942666a0 WatchSource:0}: Error finding container 00a570b7f457c3e2d9db317ce9eb85785dce58f444ad4a4d33782f9f942666a0: Status 404 returned error can't find the container with id 00a570b7f457c3e2d9db317ce9eb85785dce58f444ad4a4d33782f9f942666a0 Nov 28 09:57:53 crc kubenswrapper[4838]: W1128 09:57:53.935214 4838 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3f556bd7_3b15_4d7d_b8e2_4815bb5c9c7d.slice/crio-0fb1281d4b0e75ef23d7fc84b5fec38df3e6805b922cfc48728bc1242c7785ac WatchSource:0}: Error finding container 0fb1281d4b0e75ef23d7fc84b5fec38df3e6805b922cfc48728bc1242c7785ac: Status 404 returned error can't find the container with id 0fb1281d4b0e75ef23d7fc84b5fec38df3e6805b922cfc48728bc1242c7785ac Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.939090 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" Nov 28 09:57:53 crc kubenswrapper[4838]: E1128 09:57:53.940849 4838 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:egress-router-binary-copy,Image:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c,Command:[/entrypoint/cnibincopy.sh],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:RHEL8_SOURCE_DIRECTORY,Value:/usr/src/egress-router-cni/rhel8/bin/,ValueFrom:nil,},EnvVar{Name:RHEL9_SOURCE_DIRECTORY,Value:/usr/src/egress-router-cni/rhel9/bin/,ValueFrom:nil,},EnvVar{Name:DEFAULT_SOURCE_DIRECTORY,Value:/usr/src/egress-router-cni/bin/,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:cni-binary-copy,ReadOnly:false,MountPath:/entrypoint,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:cnibin,ReadOnly:false,MountPath:/host/opt/cni/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:os-release,ReadOnly:true,MountPath:/host/etc/os-release,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-jgs5r,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:nil,Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod multus-additional-cni-plugins-58mh7_openshift-multus(3f556bd7-3b15-4d7d-b8e2-4815bb5c9c7d): CreateContainerConfigError: services have not yet been read at least once, cannot construct envvars" logger="UnhandledError" Nov 28 09:57:53 crc kubenswrapper[4838]: W1128 09:57:53.941997 4838 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod200cdb91_cc86_40be_a5b6_30f7b9beba6d.slice/crio-3da4fdddfcc4170aee286b7d330016d4a83c3b08fba2666b697bbf14081f2640 WatchSource:0}: Error finding container 3da4fdddfcc4170aee286b7d330016d4a83c3b08fba2666b697bbf14081f2640: Status 404 returned error can't find the container with id 3da4fdddfcc4170aee286b7d330016d4a83c3b08fba2666b697bbf14081f2640 Nov 28 09:57:53 crc kubenswrapper[4838]: E1128 09:57:53.942039 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"egress-router-binary-copy\" with CreateContainerConfigError: \"services have not yet been read at least once, cannot construct envvars\"" pod="openshift-multus/multus-additional-cni-plugins-58mh7" podUID="3f556bd7-3b15-4d7d-b8e2-4815bb5c9c7d" Nov 28 09:57:53 crc kubenswrapper[4838]: E1128 09:57:53.942453 4838 kuberuntime_manager.go:1274] "Unhandled Error" err=< Nov 28 09:57:53 crc kubenswrapper[4838]: container &Container{Name:kube-multus,Image:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26,Command:[/bin/bash -ec --],Args:[MULTUS_DAEMON_OPT="" Nov 28 09:57:53 crc kubenswrapper[4838]: /entrypoint/cnibincopy.sh; exec /usr/src/multus-cni/bin/multus-daemon $MULTUS_DAEMON_OPT Nov 28 09:57:53 crc kubenswrapper[4838]: ],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:RHEL8_SOURCE_DIRECTORY,Value:/usr/src/multus-cni/rhel8/bin/,ValueFrom:nil,},EnvVar{Name:RHEL9_SOURCE_DIRECTORY,Value:/usr/src/multus-cni/rhel9/bin/,ValueFrom:nil,},EnvVar{Name:DEFAULT_SOURCE_DIRECTORY,Value:/usr/src/multus-cni/bin/,ValueFrom:nil,},EnvVar{Name:KUBERNETES_SERVICE_PORT,Value:6443,ValueFrom:nil,},EnvVar{Name:KUBERNETES_SERVICE_HOST,Value:api-int.crc.testing,ValueFrom:nil,},EnvVar{Name:MULTUS_NODE_NAME,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:spec.nodeName,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},EnvVar{Name:K8S_NODE,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:spec.nodeName,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{68157440 0} {} 65Mi BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:cni-binary-copy,ReadOnly:false,MountPath:/entrypoint,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:os-release,ReadOnly:false,MountPath:/host/etc/os-release,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:system-cni-dir,ReadOnly:false,MountPath:/host/etc/cni/net.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:multus-cni-dir,ReadOnly:false,MountPath:/host/run/multus/cni/net.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:cnibin,ReadOnly:false,MountPath:/host/opt/cni/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:multus-socket-dir-parent,ReadOnly:false,MountPath:/host/run/multus,SubPath:,MountPropagation:*HostToContainer,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:host-run-k8s-cni-cncf-io,ReadOnly:false,MountPath:/run/k8s.cni.cncf.io,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:host-run-netns,ReadOnly:false,MountPath:/run/netns,SubPath:,MountPropagation:*HostToContainer,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:host-var-lib-cni-bin,ReadOnly:false,MountPath:/var/lib/cni/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:host-var-lib-cni-multus,ReadOnly:false,MountPath:/var/lib/cni/multus,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:host-var-lib-kubelet,ReadOnly:false,MountPath:/var/lib/kubelet,SubPath:,MountPropagation:*HostToContainer,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:hostroot,ReadOnly:false,MountPath:/hostroot,SubPath:,MountPropagation:*HostToContainer,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:multus-conf-dir,ReadOnly:false,MountPath:/etc/cni/multus/net.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:multus-daemon-config,ReadOnly:true,MountPath:/etc/cni/net.d/multus.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:host-run-multus-certs,ReadOnly:false,MountPath:/etc/cni/multus/certs,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:etc-kubernetes,ReadOnly:false,MountPath:/etc/kubernetes,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-szqtp,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:*true,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod multus-4rv9b_openshift-multus(051f7e1c-2d47-4be9-bbd5-14feec16eb16): CreateContainerConfigError: services have not yet been read at least once, cannot construct envvars Nov 28 09:57:53 crc kubenswrapper[4838]: > logger="UnhandledError" Nov 28 09:57:53 crc kubenswrapper[4838]: E1128 09:57:53.943535 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CreateContainerConfigError: \"services have not yet been read at least once, cannot construct envvars\"" pod="openshift-multus/multus-4rv9b" podUID="051f7e1c-2d47-4be9-bbd5-14feec16eb16" Nov 28 09:57:53 crc kubenswrapper[4838]: E1128 09:57:53.950292 4838 kuberuntime_manager.go:1274] "Unhandled Error" err=< Nov 28 09:57:53 crc kubenswrapper[4838]: container &Container{Name:dns-node-resolver,Image:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2,Command:[/bin/bash -c #!/bin/bash Nov 28 09:57:53 crc kubenswrapper[4838]: set -uo pipefail Nov 28 09:57:53 crc kubenswrapper[4838]: Nov 28 09:57:53 crc kubenswrapper[4838]: trap 'jobs -p | xargs kill || true; wait; exit 0' TERM Nov 28 09:57:53 crc kubenswrapper[4838]: Nov 28 09:57:53 crc kubenswrapper[4838]: OPENSHIFT_MARKER="openshift-generated-node-resolver" Nov 28 09:57:53 crc kubenswrapper[4838]: HOSTS_FILE="/etc/hosts" Nov 28 09:57:53 crc kubenswrapper[4838]: TEMP_FILE="/etc/hosts.tmp" Nov 28 09:57:53 crc kubenswrapper[4838]: Nov 28 09:57:53 crc kubenswrapper[4838]: IFS=', ' read -r -a services <<< "${SERVICES}" Nov 28 09:57:53 crc kubenswrapper[4838]: Nov 28 09:57:53 crc kubenswrapper[4838]: # Make a temporary file with the old hosts file's attributes. Nov 28 09:57:53 crc kubenswrapper[4838]: if ! cp -f --attributes-only "${HOSTS_FILE}" "${TEMP_FILE}"; then Nov 28 09:57:53 crc kubenswrapper[4838]: echo "Failed to preserve hosts file. Exiting." Nov 28 09:57:53 crc kubenswrapper[4838]: exit 1 Nov 28 09:57:53 crc kubenswrapper[4838]: fi Nov 28 09:57:53 crc kubenswrapper[4838]: Nov 28 09:57:53 crc kubenswrapper[4838]: while true; do Nov 28 09:57:53 crc kubenswrapper[4838]: declare -A svc_ips Nov 28 09:57:53 crc kubenswrapper[4838]: for svc in "${services[@]}"; do Nov 28 09:57:53 crc kubenswrapper[4838]: # Fetch service IP from cluster dns if present. We make several tries Nov 28 09:57:53 crc kubenswrapper[4838]: # to do it: IPv4, IPv6, IPv4 over TCP and IPv6 over TCP. The two last ones Nov 28 09:57:53 crc kubenswrapper[4838]: # are for deployments with Kuryr on older OpenStack (OSP13) - those do not Nov 28 09:57:53 crc kubenswrapper[4838]: # support UDP loadbalancers and require reaching DNS through TCP. Nov 28 09:57:53 crc kubenswrapper[4838]: cmds=('dig -t A @"${NAMESERVER}" +short "${svc}.${CLUSTER_DOMAIN}"|grep -v "^;"' Nov 28 09:57:53 crc kubenswrapper[4838]: 'dig -t AAAA @"${NAMESERVER}" +short "${svc}.${CLUSTER_DOMAIN}"|grep -v "^;"' Nov 28 09:57:53 crc kubenswrapper[4838]: 'dig -t A +tcp +retry=0 @"${NAMESERVER}" +short "${svc}.${CLUSTER_DOMAIN}"|grep -v "^;"' Nov 28 09:57:53 crc kubenswrapper[4838]: 'dig -t AAAA +tcp +retry=0 @"${NAMESERVER}" +short "${svc}.${CLUSTER_DOMAIN}"|grep -v "^;"') Nov 28 09:57:53 crc kubenswrapper[4838]: for i in ${!cmds[*]} Nov 28 09:57:53 crc kubenswrapper[4838]: do Nov 28 09:57:53 crc kubenswrapper[4838]: ips=($(eval "${cmds[i]}")) Nov 28 09:57:53 crc kubenswrapper[4838]: if [[ "$?" -eq 0 && "${#ips[@]}" -ne 0 ]]; then Nov 28 09:57:53 crc kubenswrapper[4838]: svc_ips["${svc}"]="${ips[@]}" Nov 28 09:57:53 crc kubenswrapper[4838]: break Nov 28 09:57:53 crc kubenswrapper[4838]: fi Nov 28 09:57:53 crc kubenswrapper[4838]: done Nov 28 09:57:53 crc kubenswrapper[4838]: done Nov 28 09:57:53 crc kubenswrapper[4838]: Nov 28 09:57:53 crc kubenswrapper[4838]: # Update /etc/hosts only if we get valid service IPs Nov 28 09:57:53 crc kubenswrapper[4838]: # We will not update /etc/hosts when there is coredns service outage or api unavailability Nov 28 09:57:53 crc kubenswrapper[4838]: # Stale entries could exist in /etc/hosts if the service is deleted Nov 28 09:57:53 crc kubenswrapper[4838]: if [[ -n "${svc_ips[*]-}" ]]; then Nov 28 09:57:53 crc kubenswrapper[4838]: # Build a new hosts file from /etc/hosts with our custom entries filtered out Nov 28 09:57:53 crc kubenswrapper[4838]: if ! sed --silent "/# ${OPENSHIFT_MARKER}/d; w ${TEMP_FILE}" "${HOSTS_FILE}"; then Nov 28 09:57:53 crc kubenswrapper[4838]: # Only continue rebuilding the hosts entries if its original content is preserved Nov 28 09:57:53 crc kubenswrapper[4838]: sleep 60 & wait Nov 28 09:57:53 crc kubenswrapper[4838]: continue Nov 28 09:57:53 crc kubenswrapper[4838]: fi Nov 28 09:57:53 crc kubenswrapper[4838]: Nov 28 09:57:53 crc kubenswrapper[4838]: # Append resolver entries for services Nov 28 09:57:53 crc kubenswrapper[4838]: rc=0 Nov 28 09:57:53 crc kubenswrapper[4838]: for svc in "${!svc_ips[@]}"; do Nov 28 09:57:53 crc kubenswrapper[4838]: for ip in ${svc_ips[${svc}]}; do Nov 28 09:57:53 crc kubenswrapper[4838]: echo "${ip} ${svc} ${svc}.${CLUSTER_DOMAIN} # ${OPENSHIFT_MARKER}" >> "${TEMP_FILE}" || rc=$? Nov 28 09:57:53 crc kubenswrapper[4838]: done Nov 28 09:57:53 crc kubenswrapper[4838]: done Nov 28 09:57:53 crc kubenswrapper[4838]: if [[ $rc -ne 0 ]]; then Nov 28 09:57:53 crc kubenswrapper[4838]: sleep 60 & wait Nov 28 09:57:53 crc kubenswrapper[4838]: continue Nov 28 09:57:53 crc kubenswrapper[4838]: fi Nov 28 09:57:53 crc kubenswrapper[4838]: Nov 28 09:57:53 crc kubenswrapper[4838]: Nov 28 09:57:53 crc kubenswrapper[4838]: # TODO: Update /etc/hosts atomically to avoid any inconsistent behavior Nov 28 09:57:53 crc kubenswrapper[4838]: # Replace /etc/hosts with our modified version if needed Nov 28 09:57:53 crc kubenswrapper[4838]: cmp "${TEMP_FILE}" "${HOSTS_FILE}" || cp -f "${TEMP_FILE}" "${HOSTS_FILE}" Nov 28 09:57:53 crc kubenswrapper[4838]: # TEMP_FILE is not removed to avoid file create/delete and attributes copy churn Nov 28 09:57:53 crc kubenswrapper[4838]: fi Nov 28 09:57:53 crc kubenswrapper[4838]: sleep 60 & wait Nov 28 09:57:53 crc kubenswrapper[4838]: unset svc_ips Nov 28 09:57:53 crc kubenswrapper[4838]: done Nov 28 09:57:53 crc kubenswrapper[4838]: ],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:SERVICES,Value:image-registry.openshift-image-registry.svc,ValueFrom:nil,},EnvVar{Name:NAMESERVER,Value:10.217.4.10,ValueFrom:nil,},EnvVar{Name:CLUSTER_DOMAIN,Value:cluster.local,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{22020096 0} {} 21Mi BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:hosts-file,ReadOnly:false,MountPath:/etc/hosts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-lpt6k,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:*true,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod node-resolver-sft2b_openshift-dns(200cdb91-cc86-40be-a5b6-30f7b9beba6d): CreateContainerConfigError: services have not yet been read at least once, cannot construct envvars Nov 28 09:57:53 crc kubenswrapper[4838]: > logger="UnhandledError" Nov 28 09:57:53 crc kubenswrapper[4838]: E1128 09:57:53.952881 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"dns-node-resolver\" with CreateContainerConfigError: \"services have not yet been read at least once, cannot construct envvars\"" pod="openshift-dns/node-resolver-sft2b" podUID="200cdb91-cc86-40be-a5b6-30f7b9beba6d" Nov 28 09:57:53 crc kubenswrapper[4838]: W1128 09:57:53.965858 4838 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5c3daa53_8c4e_4e30_aeba_146602dd45cd.slice/crio-c54de276a68a3140bdcfa9ffd5016b85d3264f102ec87ab93377c9e13c38218b WatchSource:0}: Error finding container c54de276a68a3140bdcfa9ffd5016b85d3264f102ec87ab93377c9e13c38218b: Status 404 returned error can't find the container with id c54de276a68a3140bdcfa9ffd5016b85d3264f102ec87ab93377c9e13c38218b Nov 28 09:57:53 crc kubenswrapper[4838]: E1128 09:57:53.966123 4838 kuberuntime_manager.go:1274] "Unhandled Error" err=< Nov 28 09:57:53 crc kubenswrapper[4838]: init container &Container{Name:kubecfg-setup,Image:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2,Command:[/bin/bash -c cat << EOF > /etc/ovn/kubeconfig Nov 28 09:57:53 crc kubenswrapper[4838]: apiVersion: v1 Nov 28 09:57:53 crc kubenswrapper[4838]: clusters: Nov 28 09:57:53 crc kubenswrapper[4838]: - cluster: Nov 28 09:57:53 crc kubenswrapper[4838]: certificate-authority: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt Nov 28 09:57:53 crc kubenswrapper[4838]: server: https://api-int.crc.testing:6443 Nov 28 09:57:53 crc kubenswrapper[4838]: name: default-cluster Nov 28 09:57:53 crc kubenswrapper[4838]: contexts: Nov 28 09:57:53 crc kubenswrapper[4838]: - context: Nov 28 09:57:53 crc kubenswrapper[4838]: cluster: default-cluster Nov 28 09:57:53 crc kubenswrapper[4838]: namespace: default Nov 28 09:57:53 crc kubenswrapper[4838]: user: default-auth Nov 28 09:57:53 crc kubenswrapper[4838]: name: default-context Nov 28 09:57:53 crc kubenswrapper[4838]: current-context: default-context Nov 28 09:57:53 crc kubenswrapper[4838]: kind: Config Nov 28 09:57:53 crc kubenswrapper[4838]: preferences: {} Nov 28 09:57:53 crc kubenswrapper[4838]: users: Nov 28 09:57:53 crc kubenswrapper[4838]: - name: default-auth Nov 28 09:57:53 crc kubenswrapper[4838]: user: Nov 28 09:57:53 crc kubenswrapper[4838]: client-certificate: /etc/ovn/ovnkube-node-certs/ovnkube-client-current.pem Nov 28 09:57:53 crc kubenswrapper[4838]: client-key: /etc/ovn/ovnkube-node-certs/ovnkube-client-current.pem Nov 28 09:57:53 crc kubenswrapper[4838]: EOF Nov 28 09:57:53 crc kubenswrapper[4838]: ],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:etc-openvswitch,ReadOnly:false,MountPath:/etc/ovn/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-svlft,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:nil,Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ovnkube-node-gmhsj_openshift-ovn-kubernetes(41b01f7d-5c75-49de-86f7-87e04bf71194): CreateContainerConfigError: services have not yet been read at least once, cannot construct envvars Nov 28 09:57:53 crc kubenswrapper[4838]: > logger="UnhandledError" Nov 28 09:57:53 crc kubenswrapper[4838]: E1128 09:57:53.969433 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kubecfg-setup\" with CreateContainerConfigError: \"services have not yet been read at least once, cannot construct envvars\"" pod="openshift-ovn-kubernetes/ovnkube-node-gmhsj" podUID="41b01f7d-5c75-49de-86f7-87e04bf71194" Nov 28 09:57:53 crc kubenswrapper[4838]: E1128 09:57:53.971193 4838 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:machine-config-daemon,Image:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a,Command:[/usr/bin/machine-config-daemon],Args:[start --payload-version=4.18.1],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:health,HostPort:8798,ContainerPort:8798,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:NODE_NAME,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:spec.nodeName,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{52428800 0} {} 50Mi BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:rootfs,ReadOnly:false,MountPath:/rootfs,SubPath:,MountPropagation:*HostToContainer,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-9dmrk,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/health,Port:{0 8798 },Host:127.0.0.1,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:120,TimeoutSeconds:1,PeriodSeconds:30,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:*true,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:*false,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod machine-config-daemon-5dxdd_openshift-machine-config-operator(5c3daa53-8c4e-4e30-aeba-146602dd45cd): CreateContainerConfigError: services have not yet been read at least once, cannot construct envvars" logger="UnhandledError" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.972906 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.972956 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.972972 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.972992 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:57:53 crc kubenswrapper[4838]: I1128 09:57:53.973008 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:57:53Z","lastTransitionTime":"2025-11-28T09:57:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:57:53 crc kubenswrapper[4838]: E1128 09:57:53.974486 4838 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09,Command:[],Args:[--secure-listen-address=0.0.0.0:9001 --config-file=/etc/kube-rbac-proxy/config-file.yaml --tls-cipher-suites=TLS_AES_128_GCM_SHA256,TLS_AES_256_GCM_SHA384,TLS_CHACHA20_POLY1305_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256 --tls-min-version=VersionTLS12 --upstream=http://127.0.0.1:8797 --logtostderr=true --tls-cert-file=/etc/tls/private/tls.crt --tls-private-key-file=/etc/tls/private/tls.key],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:metrics,HostPort:9001,ContainerPort:9001,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{52428800 0} {} 50Mi BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:proxy-tls,ReadOnly:false,MountPath:/etc/tls/private,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:mcd-auth-proxy-config,ReadOnly:false,MountPath:/etc/kube-rbac-proxy,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-9dmrk,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:nil,Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod machine-config-daemon-5dxdd_openshift-machine-config-operator(5c3daa53-8c4e-4e30-aeba-146602dd45cd): CreateContainerConfigError: services have not yet been read at least once, cannot construct envvars" logger="UnhandledError" Nov 28 09:57:53 crc kubenswrapper[4838]: E1128 09:57:53.975623 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"machine-config-daemon\" with CreateContainerConfigError: \"services have not yet been read at least once, cannot construct envvars\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with CreateContainerConfigError: \"services have not yet been read at least once, cannot construct envvars\"]" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" Nov 28 09:57:54 crc kubenswrapper[4838]: I1128 09:57:54.087259 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:57:54 crc kubenswrapper[4838]: I1128 09:57:54.087318 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:57:54 crc kubenswrapper[4838]: I1128 09:57:54.087339 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:57:54 crc kubenswrapper[4838]: I1128 09:57:54.087366 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:57:54 crc kubenswrapper[4838]: I1128 09:57:54.087383 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:57:54Z","lastTransitionTime":"2025-11-28T09:57:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:57:54 crc kubenswrapper[4838]: I1128 09:57:54.190838 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:57:54 crc kubenswrapper[4838]: I1128 09:57:54.190890 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:57:54 crc kubenswrapper[4838]: I1128 09:57:54.190904 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:57:54 crc kubenswrapper[4838]: I1128 09:57:54.190926 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:57:54 crc kubenswrapper[4838]: I1128 09:57:54.190940 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:57:54Z","lastTransitionTime":"2025-11-28T09:57:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:57:54 crc kubenswrapper[4838]: I1128 09:57:54.224773 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 09:57:54 crc kubenswrapper[4838]: I1128 09:57:54.224912 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 09:57:54 crc kubenswrapper[4838]: I1128 09:57:54.224942 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 09:57:54 crc kubenswrapper[4838]: I1128 09:57:54.224963 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 09:57:54 crc kubenswrapper[4838]: I1128 09:57:54.225009 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 09:57:54 crc kubenswrapper[4838]: E1128 09:57:54.225112 4838 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 28 09:57:54 crc kubenswrapper[4838]: E1128 09:57:54.225197 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-28 09:57:55.225177887 +0000 UTC m=+46.924152067 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 28 09:57:54 crc kubenswrapper[4838]: E1128 09:57:54.225242 4838 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 28 09:57:54 crc kubenswrapper[4838]: E1128 09:57:54.225293 4838 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 28 09:57:54 crc kubenswrapper[4838]: E1128 09:57:54.225315 4838 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 09:57:54 crc kubenswrapper[4838]: E1128 09:57:54.225242 4838 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 28 09:57:54 crc kubenswrapper[4838]: E1128 09:57:54.225395 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-28 09:57:55.225369302 +0000 UTC m=+46.924343542 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 09:57:54 crc kubenswrapper[4838]: E1128 09:57:54.225407 4838 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 28 09:57:54 crc kubenswrapper[4838]: E1128 09:57:54.225427 4838 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 09:57:54 crc kubenswrapper[4838]: E1128 09:57:54.225487 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-28 09:57:55.225468284 +0000 UTC m=+46.924442484 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 09:57:54 crc kubenswrapper[4838]: E1128 09:57:54.225119 4838 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 28 09:57:54 crc kubenswrapper[4838]: E1128 09:57:54.225564 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-28 09:57:55.225549766 +0000 UTC m=+46.924524086 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 28 09:57:54 crc kubenswrapper[4838]: E1128 09:57:54.225774 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 09:57:55.225753201 +0000 UTC m=+46.924727381 (durationBeforeRetry 1s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 09:57:54 crc kubenswrapper[4838]: I1128 09:57:54.294237 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:57:54 crc kubenswrapper[4838]: I1128 09:57:54.294834 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:57:54 crc kubenswrapper[4838]: I1128 09:57:54.295057 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:57:54 crc kubenswrapper[4838]: I1128 09:57:54.295287 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:57:54 crc kubenswrapper[4838]: I1128 09:57:54.295584 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:57:54Z","lastTransitionTime":"2025-11-28T09:57:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:57:54 crc kubenswrapper[4838]: I1128 09:57:54.400137 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:57:54 crc kubenswrapper[4838]: I1128 09:57:54.401854 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:57:54 crc kubenswrapper[4838]: I1128 09:57:54.401998 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:57:54 crc kubenswrapper[4838]: I1128 09:57:54.402138 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:57:54 crc kubenswrapper[4838]: I1128 09:57:54.402288 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:57:54Z","lastTransitionTime":"2025-11-28T09:57:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:57:54 crc kubenswrapper[4838]: I1128 09:57:54.492710 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/node-ca-tj8hl"] Nov 28 09:57:54 crc kubenswrapper[4838]: I1128 09:57:54.498634 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-tj8hl" Nov 28 09:57:54 crc kubenswrapper[4838]: I1128 09:57:54.502137 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"openshift-service-ca.crt" Nov 28 09:57:54 crc kubenswrapper[4838]: I1128 09:57:54.502210 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"node-ca-dockercfg-4777p" Nov 28 09:57:54 crc kubenswrapper[4838]: I1128 09:57:54.502544 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"kube-root-ca.crt" Nov 28 09:57:54 crc kubenswrapper[4838]: I1128 09:57:54.502807 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"image-registry-certificates" Nov 28 09:57:54 crc kubenswrapper[4838]: I1128 09:57:54.505145 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:57:54 crc kubenswrapper[4838]: I1128 09:57:54.505177 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:57:54 crc kubenswrapper[4838]: I1128 09:57:54.505506 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:57:54 crc kubenswrapper[4838]: I1128 09:57:54.505541 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:57:54 crc kubenswrapper[4838]: I1128 09:57:54.505555 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:57:54Z","lastTransitionTime":"2025-11-28T09:57:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:57:54 crc kubenswrapper[4838]: I1128 09:57:54.510946 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-tj8hl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cbb3c60a-bf9f-4a62-9310-30898e42be4f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c2jkq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:54Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-tj8hl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:57:54 crc kubenswrapper[4838]: I1128 09:57:54.519117 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:57:54 crc kubenswrapper[4838]: I1128 09:57:54.528845 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:57:54 crc kubenswrapper[4838]: I1128 09:57:54.539522 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:57:54 crc kubenswrapper[4838]: I1128 09:57:54.549859 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:57:54 crc kubenswrapper[4838]: I1128 09:57:54.560778 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-58mh7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f556bd7-3b15-4d7d-b8e2-4815bb5c9c7d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-58mh7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:57:54 crc kubenswrapper[4838]: I1128 09:57:54.564805 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="01ab3dd5-8196-46d0-ad33-122e2ca51def" path="/var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes" Nov 28 09:57:54 crc kubenswrapper[4838]: I1128 09:57:54.565303 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" path="/var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes" Nov 28 09:57:54 crc kubenswrapper[4838]: I1128 09:57:54.566095 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09efc573-dbb6-4249-bd59-9b87aba8dd28" path="/var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes" Nov 28 09:57:54 crc kubenswrapper[4838]: I1128 09:57:54.566732 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b574797-001e-440a-8f4e-c0be86edad0f" path="/var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes" Nov 28 09:57:54 crc kubenswrapper[4838]: I1128 09:57:54.567254 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b78653f-4ff9-4508-8672-245ed9b561e3" path="/var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes" Nov 28 09:57:54 crc kubenswrapper[4838]: I1128 09:57:54.567703 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1386a44e-36a2-460c-96d0-0359d2b6f0f5" path="/var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes" Nov 28 09:57:54 crc kubenswrapper[4838]: I1128 09:57:54.568277 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1bf7eb37-55a3-4c65-b768-a94c82151e69" path="/var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes" Nov 28 09:57:54 crc kubenswrapper[4838]: I1128 09:57:54.568796 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1d611f23-29be-4491-8495-bee1670e935f" path="/var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes" Nov 28 09:57:54 crc kubenswrapper[4838]: I1128 09:57:54.569361 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="20b0d48f-5fd6-431c-a545-e3c800c7b866" path="/var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/volumes" Nov 28 09:57:54 crc kubenswrapper[4838]: I1128 09:57:54.569874 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" path="/var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes" Nov 28 09:57:54 crc kubenswrapper[4838]: I1128 09:57:54.569972 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-sft2b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"200cdb91-cc86-40be-a5b6-30f7b9beba6d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lpt6k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-sft2b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:57:54 crc kubenswrapper[4838]: I1128 09:57:54.570340 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="22c825df-677d-4ca6-82db-3454ed06e783" path="/var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes" Nov 28 09:57:54 crc kubenswrapper[4838]: I1128 09:57:54.570971 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="25e176fe-21b4-4974-b1ed-c8b94f112a7f" path="/var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes" Nov 28 09:57:54 crc kubenswrapper[4838]: I1128 09:57:54.571426 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" path="/var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes" Nov 28 09:57:54 crc kubenswrapper[4838]: I1128 09:57:54.571946 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="31d8b7a1-420e-4252-a5b7-eebe8a111292" path="/var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes" Nov 28 09:57:54 crc kubenswrapper[4838]: I1128 09:57:54.572456 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3ab1a177-2de0-46d9-b765-d0d0649bb42e" path="/var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/volumes" Nov 28 09:57:54 crc kubenswrapper[4838]: I1128 09:57:54.574337 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" path="/var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes" Nov 28 09:57:54 crc kubenswrapper[4838]: I1128 09:57:54.575384 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="43509403-f426-496e-be36-56cef71462f5" path="/var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes" Nov 28 09:57:54 crc kubenswrapper[4838]: I1128 09:57:54.575973 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="44663579-783b-4372-86d6-acf235a62d72" path="/var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/volumes" Nov 28 09:57:54 crc kubenswrapper[4838]: I1128 09:57:54.576845 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="496e6271-fb68-4057-954e-a0d97a4afa3f" path="/var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes" Nov 28 09:57:54 crc kubenswrapper[4838]: I1128 09:57:54.577632 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" path="/var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes" Nov 28 09:57:54 crc kubenswrapper[4838]: I1128 09:57:54.578290 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49ef4625-1d3a-4a9f-b595-c2433d32326d" path="/var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/volumes" Nov 28 09:57:54 crc kubenswrapper[4838]: I1128 09:57:54.579231 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4bb40260-dbaa-4fb0-84df-5e680505d512" path="/var/lib/kubelet/pods/4bb40260-dbaa-4fb0-84df-5e680505d512/volumes" Nov 28 09:57:54 crc kubenswrapper[4838]: I1128 09:57:54.579896 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5225d0e4-402f-4861-b410-819f433b1803" path="/var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes" Nov 28 09:57:54 crc kubenswrapper[4838]: I1128 09:57:54.580885 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5441d097-087c-4d9a-baa8-b210afa90fc9" path="/var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes" Nov 28 09:57:54 crc kubenswrapper[4838]: I1128 09:57:54.581481 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="57a731c4-ef35-47a8-b875-bfb08a7f8011" path="/var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes" Nov 28 09:57:54 crc kubenswrapper[4838]: I1128 09:57:54.582384 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5b88f790-22fa-440e-b583-365168c0b23d" path="/var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/volumes" Nov 28 09:57:54 crc kubenswrapper[4838]: I1128 09:57:54.583824 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:57:54 crc kubenswrapper[4838]: I1128 09:57:54.585651 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5fe579f8-e8a6-4643-bce5-a661393c4dde" path="/var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/volumes" Nov 28 09:57:54 crc kubenswrapper[4838]: I1128 09:57:54.586557 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6402fda4-df10-493c-b4e5-d0569419652d" path="/var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes" Nov 28 09:57:54 crc kubenswrapper[4838]: I1128 09:57:54.587409 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6509e943-70c6-444c-bc41-48a544e36fbd" path="/var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes" Nov 28 09:57:54 crc kubenswrapper[4838]: I1128 09:57:54.588179 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6731426b-95fe-49ff-bb5f-40441049fde2" path="/var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/volumes" Nov 28 09:57:54 crc kubenswrapper[4838]: I1128 09:57:54.588923 4838 kubelet_volumes.go:152] "Cleaned up orphaned volume subpath from pod" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volume-subpaths/run-systemd/ovnkube-controller/6" Nov 28 09:57:54 crc kubenswrapper[4838]: I1128 09:57:54.589069 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volumes" Nov 28 09:57:54 crc kubenswrapper[4838]: I1128 09:57:54.591138 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7539238d-5fe0-46ed-884e-1c3b566537ec" path="/var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes" Nov 28 09:57:54 crc kubenswrapper[4838]: I1128 09:57:54.591927 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7583ce53-e0fe-4a16-9e4d-50516596a136" path="/var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes" Nov 28 09:57:54 crc kubenswrapper[4838]: I1128 09:57:54.592644 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7bb08738-c794-4ee8-9972-3a62ca171029" path="/var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes" Nov 28 09:57:54 crc kubenswrapper[4838]: I1128 09:57:54.594642 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="87cf06ed-a83f-41a7-828d-70653580a8cb" path="/var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes" Nov 28 09:57:54 crc kubenswrapper[4838]: I1128 09:57:54.596027 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4rv9b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"051f7e1c-2d47-4be9-bbd5-14feec16eb16\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-szqtp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4rv9b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:57:54 crc kubenswrapper[4838]: I1128 09:57:54.598754 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" path="/var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes" Nov 28 09:57:54 crc kubenswrapper[4838]: I1128 09:57:54.599468 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="925f1c65-6136-48ba-85aa-3a3b50560753" path="/var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes" Nov 28 09:57:54 crc kubenswrapper[4838]: I1128 09:57:54.601000 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" path="/var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/volumes" Nov 28 09:57:54 crc kubenswrapper[4838]: I1128 09:57:54.601931 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9d4552c7-cd75-42dd-8880-30dd377c49a4" path="/var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes" Nov 28 09:57:54 crc kubenswrapper[4838]: I1128 09:57:54.603090 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" path="/var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/volumes" Nov 28 09:57:54 crc kubenswrapper[4838]: I1128 09:57:54.603934 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a31745f5-9847-4afe-82a5-3161cc66ca93" path="/var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes" Nov 28 09:57:54 crc kubenswrapper[4838]: I1128 09:57:54.605306 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" path="/var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes" Nov 28 09:57:54 crc kubenswrapper[4838]: I1128 09:57:54.606760 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6312bbd-5731-4ea0-a20f-81d5a57df44a" path="/var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/volumes" Nov 28 09:57:54 crc kubenswrapper[4838]: I1128 09:57:54.607675 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" path="/var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes" Nov 28 09:57:54 crc kubenswrapper[4838]: I1128 09:57:54.607892 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:57:54 crc kubenswrapper[4838]: I1128 09:57:54.607949 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:57:54 crc kubenswrapper[4838]: I1128 09:57:54.607965 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:57:54 crc kubenswrapper[4838]: I1128 09:57:54.607989 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:57:54 crc kubenswrapper[4838]: I1128 09:57:54.608007 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:57:54Z","lastTransitionTime":"2025-11-28T09:57:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:57:54 crc kubenswrapper[4838]: I1128 09:57:54.608154 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5c3daa53-8c4e-4e30-aeba-146602dd45cd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9dmrk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9dmrk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-5dxdd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:57:54 crc kubenswrapper[4838]: I1128 09:57:54.609024 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" path="/var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes" Nov 28 09:57:54 crc kubenswrapper[4838]: I1128 09:57:54.609734 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" path="/var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/volumes" Nov 28 09:57:54 crc kubenswrapper[4838]: I1128 09:57:54.611273 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bf126b07-da06-4140-9a57-dfd54fc6b486" path="/var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes" Nov 28 09:57:54 crc kubenswrapper[4838]: I1128 09:57:54.611935 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c03ee662-fb2f-4fc4-a2c1-af487c19d254" path="/var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes" Nov 28 09:57:54 crc kubenswrapper[4838]: I1128 09:57:54.612584 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" path="/var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/volumes" Nov 28 09:57:54 crc kubenswrapper[4838]: I1128 09:57:54.613805 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e7e6199b-1264-4501-8953-767f51328d08" path="/var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes" Nov 28 09:57:54 crc kubenswrapper[4838]: I1128 09:57:54.614870 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="efdd0498-1daa-4136-9a4a-3b948c2293fc" path="/var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/volumes" Nov 28 09:57:54 crc kubenswrapper[4838]: I1128 09:57:54.616030 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" path="/var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/volumes" Nov 28 09:57:54 crc kubenswrapper[4838]: I1128 09:57:54.616513 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fda69060-fa79-4696-b1a6-7980f124bf7c" path="/var/lib/kubelet/pods/fda69060-fa79-4696-b1a6-7980f124bf7c/volumes" Nov 28 09:57:54 crc kubenswrapper[4838]: I1128 09:57:54.617512 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42be35de-5c72-4cc2-a5e4-fb7872425cdf\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-policy-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-policy-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5a39765f9493a3a9454db77d07855624ff5645e9dbd898e6dcb880d7a01a8c42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://426741a23e7b1b9fae6769b581c0d44694f957b0da985476923801395fad082f\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T09:57:46Z\\\",\\\"message\\\":\\\"+ timeout 3m /bin/bash -exuo pipefail -c 'while [ -n \\\\\\\"$(ss -Htanop \\\\\\\\( sport = 10357 \\\\\\\\))\\\\\\\" ]; do sleep 1; done'\\\\n++ ss -Htanop '(' sport = 10357 ')'\\\\n+ '[' -n '' ']'\\\\n+ exec cluster-policy-controller start --config=/etc/kubernetes/static-pod-resources/configmaps/cluster-policy-controller-config/config.yaml --kubeconfig=/etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig --namespace=openshift-kube-controller-manager -v=2\\\\nI1128 09:57:12.554377 1 leaderelection.go:121] The leader election gives 4 retries and allows for 30s of clock skew. The kube-apiserver downtime tolerance is 78s. Worst non-graceful lease acquisition is 2m43s. Worst graceful lease acquisition is {26s}.\\\\nI1128 09:57:12.555793 1 observer_polling.go:159] Starting file observer\\\\nI1128 09:57:12.567187 1 builder.go:298] cluster-policy-controller version 4.18.0-202501230001.p0.g5fd8525.assembly.stream.el9-5fd8525-5fd852525909ce6eab52972ba9ce8fcf56528eb9\\\\nI1128 09:57:12.568976 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.crt::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.key\\\\\\\"\\\\nI1128 09:57:45.000857 1 cmd.go:138] Received SIGTERM or SIGINT signal, shutting down controller.\\\\nF1128 09:57:46.203931 1 cmd.go:179] failed checking apiserver connectivity: client rate limiter Wait returned an error: context deadline exceeded\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:10Z\\\"}},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef54c8995d6bb8e077c6d1c3d796b6f2ce88370b6cadc4f040f590760103320b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fab4ce68cf732b3e6b32f68e84805013d646a9cbd6d5c55ea3d2f41a7f9db83d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2da93de71b5896e3c9ba192df2896b946b1376fefe2a87cf21adb87ea665be04\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:08Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:57:54 crc kubenswrapper[4838]: I1128 09:57:54.629161 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/cbb3c60a-bf9f-4a62-9310-30898e42be4f-serviceca\") pod \"node-ca-tj8hl\" (UID: \"cbb3c60a-bf9f-4a62-9310-30898e42be4f\") " pod="openshift-image-registry/node-ca-tj8hl" Nov 28 09:57:54 crc kubenswrapper[4838]: I1128 09:57:54.629262 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/cbb3c60a-bf9f-4a62-9310-30898e42be4f-host\") pod \"node-ca-tj8hl\" (UID: \"cbb3c60a-bf9f-4a62-9310-30898e42be4f\") " pod="openshift-image-registry/node-ca-tj8hl" Nov 28 09:57:54 crc kubenswrapper[4838]: I1128 09:57:54.629424 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c2jkq\" (UniqueName: \"kubernetes.io/projected/cbb3c60a-bf9f-4a62-9310-30898e42be4f-kube-api-access-c2jkq\") pod \"node-ca-tj8hl\" (UID: \"cbb3c60a-bf9f-4a62-9310-30898e42be4f\") " pod="openshift-image-registry/node-ca-tj8hl" Nov 28 09:57:54 crc kubenswrapper[4838]: I1128 09:57:54.630017 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:57:54 crc kubenswrapper[4838]: I1128 09:57:54.645389 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gmhsj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"41b01f7d-5c75-49de-86f7-87e04bf71194\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-gmhsj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:57:54 crc kubenswrapper[4838]: I1128 09:57:54.711693 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:57:54 crc kubenswrapper[4838]: I1128 09:57:54.711787 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:57:54 crc kubenswrapper[4838]: I1128 09:57:54.711804 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:57:54 crc kubenswrapper[4838]: I1128 09:57:54.711879 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:57:54 crc kubenswrapper[4838]: I1128 09:57:54.711898 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:57:54Z","lastTransitionTime":"2025-11-28T09:57:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:57:54 crc kubenswrapper[4838]: I1128 09:57:54.730450 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/cbb3c60a-bf9f-4a62-9310-30898e42be4f-serviceca\") pod \"node-ca-tj8hl\" (UID: \"cbb3c60a-bf9f-4a62-9310-30898e42be4f\") " pod="openshift-image-registry/node-ca-tj8hl" Nov 28 09:57:54 crc kubenswrapper[4838]: I1128 09:57:54.730533 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/cbb3c60a-bf9f-4a62-9310-30898e42be4f-host\") pod \"node-ca-tj8hl\" (UID: \"cbb3c60a-bf9f-4a62-9310-30898e42be4f\") " pod="openshift-image-registry/node-ca-tj8hl" Nov 28 09:57:54 crc kubenswrapper[4838]: I1128 09:57:54.730597 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c2jkq\" (UniqueName: \"kubernetes.io/projected/cbb3c60a-bf9f-4a62-9310-30898e42be4f-kube-api-access-c2jkq\") pod \"node-ca-tj8hl\" (UID: \"cbb3c60a-bf9f-4a62-9310-30898e42be4f\") " pod="openshift-image-registry/node-ca-tj8hl" Nov 28 09:57:54 crc kubenswrapper[4838]: I1128 09:57:54.730764 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/cbb3c60a-bf9f-4a62-9310-30898e42be4f-host\") pod \"node-ca-tj8hl\" (UID: \"cbb3c60a-bf9f-4a62-9310-30898e42be4f\") " pod="openshift-image-registry/node-ca-tj8hl" Nov 28 09:57:54 crc kubenswrapper[4838]: I1128 09:57:54.733091 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/cbb3c60a-bf9f-4a62-9310-30898e42be4f-serviceca\") pod \"node-ca-tj8hl\" (UID: \"cbb3c60a-bf9f-4a62-9310-30898e42be4f\") " pod="openshift-image-registry/node-ca-tj8hl" Nov 28 09:57:54 crc kubenswrapper[4838]: I1128 09:57:54.752110 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c2jkq\" (UniqueName: \"kubernetes.io/projected/cbb3c60a-bf9f-4a62-9310-30898e42be4f-kube-api-access-c2jkq\") pod \"node-ca-tj8hl\" (UID: \"cbb3c60a-bf9f-4a62-9310-30898e42be4f\") " pod="openshift-image-registry/node-ca-tj8hl" Nov 28 09:57:54 crc kubenswrapper[4838]: I1128 09:57:54.814825 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:57:54 crc kubenswrapper[4838]: I1128 09:57:54.814916 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:57:54 crc kubenswrapper[4838]: I1128 09:57:54.814934 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:57:54 crc kubenswrapper[4838]: I1128 09:57:54.814959 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:57:54 crc kubenswrapper[4838]: I1128 09:57:54.814978 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:57:54Z","lastTransitionTime":"2025-11-28T09:57:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:57:54 crc kubenswrapper[4838]: I1128 09:57:54.815798 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-tj8hl" Nov 28 09:57:54 crc kubenswrapper[4838]: I1128 09:57:54.828891 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"d508fda60d2e79736db1f6ae125183cc098bc6f6a77000f8ccd8b629a31ed093"} Nov 28 09:57:54 crc kubenswrapper[4838]: I1128 09:57:54.830065 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gmhsj" event={"ID":"41b01f7d-5c75-49de-86f7-87e04bf71194","Type":"ContainerStarted","Data":"427d9dfdc8a9866defe7ab2b5b761ead46fe46d3b13b79c04d4ee5af6525116a"} Nov 28 09:57:54 crc kubenswrapper[4838]: E1128 09:57:54.831714 4838 kuberuntime_manager.go:1274] "Unhandled Error" err=< Nov 28 09:57:54 crc kubenswrapper[4838]: container &Container{Name:webhook,Image:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2,Command:[/bin/bash -c set -xe Nov 28 09:57:54 crc kubenswrapper[4838]: if [[ -f "/env/_master" ]]; then Nov 28 09:57:54 crc kubenswrapper[4838]: set -o allexport Nov 28 09:57:54 crc kubenswrapper[4838]: source "/env/_master" Nov 28 09:57:54 crc kubenswrapper[4838]: set +o allexport Nov 28 09:57:54 crc kubenswrapper[4838]: fi Nov 28 09:57:54 crc kubenswrapper[4838]: # OVN-K will try to remove hybrid overlay node annotations even when the hybrid overlay is not enabled. Nov 28 09:57:54 crc kubenswrapper[4838]: # https://github.com/ovn-org/ovn-kubernetes/blob/ac6820df0b338a246f10f412cd5ec903bd234694/go-controller/pkg/ovn/master.go#L791 Nov 28 09:57:54 crc kubenswrapper[4838]: ho_enable="--enable-hybrid-overlay" Nov 28 09:57:54 crc kubenswrapper[4838]: echo "I$(date "+%m%d %H:%M:%S.%N") - network-node-identity - start webhook" Nov 28 09:57:54 crc kubenswrapper[4838]: # extra-allowed-user: service account `ovn-kubernetes-control-plane` Nov 28 09:57:54 crc kubenswrapper[4838]: # sets pod annotations in multi-homing layer3 network controller (cluster-manager) Nov 28 09:57:54 crc kubenswrapper[4838]: exec /usr/bin/ovnkube-identity --k8s-apiserver=https://api-int.crc.testing:6443 \ Nov 28 09:57:54 crc kubenswrapper[4838]: --webhook-cert-dir="/etc/webhook-cert" \ Nov 28 09:57:54 crc kubenswrapper[4838]: --webhook-host=127.0.0.1 \ Nov 28 09:57:54 crc kubenswrapper[4838]: --webhook-port=9743 \ Nov 28 09:57:54 crc kubenswrapper[4838]: ${ho_enable} \ Nov 28 09:57:54 crc kubenswrapper[4838]: --enable-interconnect \ Nov 28 09:57:54 crc kubenswrapper[4838]: --disable-approver \ Nov 28 09:57:54 crc kubenswrapper[4838]: --extra-allowed-user="system:serviceaccount:openshift-ovn-kubernetes:ovn-kubernetes-control-plane" \ Nov 28 09:57:54 crc kubenswrapper[4838]: --wait-for-kubernetes-api=200s \ Nov 28 09:57:54 crc kubenswrapper[4838]: --pod-admission-conditions="/var/run/ovnkube-identity-config/additional-pod-admission-cond.json" \ Nov 28 09:57:54 crc kubenswrapper[4838]: --loglevel="${LOGLEVEL}" Nov 28 09:57:54 crc kubenswrapper[4838]: ],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LOGLEVEL,Value:2,ValueFrom:nil,},EnvVar{Name:KUBERNETES_NODE_NAME,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:spec.nodeName,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{52428800 0} {} 50Mi BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:webhook-cert,ReadOnly:false,MountPath:/etc/webhook-cert/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:env-overrides,ReadOnly:false,MountPath:/env,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ovnkube-identity-cm,ReadOnly:false,MountPath:/var/run/ovnkube-identity-config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-s2kz5,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000470000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod network-node-identity-vrzqb_openshift-network-node-identity(ef543e1b-8068-4ea3-b32a-61027b32e95d): CreateContainerConfigError: services have not yet been read at least once, cannot construct envvars Nov 28 09:57:54 crc kubenswrapper[4838]: > logger="UnhandledError" Nov 28 09:57:54 crc kubenswrapper[4838]: E1128 09:57:54.832973 4838 kuberuntime_manager.go:1274] "Unhandled Error" err=< Nov 28 09:57:54 crc kubenswrapper[4838]: init container &Container{Name:kubecfg-setup,Image:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2,Command:[/bin/bash -c cat << EOF > /etc/ovn/kubeconfig Nov 28 09:57:54 crc kubenswrapper[4838]: apiVersion: v1 Nov 28 09:57:54 crc kubenswrapper[4838]: clusters: Nov 28 09:57:54 crc kubenswrapper[4838]: - cluster: Nov 28 09:57:54 crc kubenswrapper[4838]: certificate-authority: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt Nov 28 09:57:54 crc kubenswrapper[4838]: server: https://api-int.crc.testing:6443 Nov 28 09:57:54 crc kubenswrapper[4838]: name: default-cluster Nov 28 09:57:54 crc kubenswrapper[4838]: contexts: Nov 28 09:57:54 crc kubenswrapper[4838]: - context: Nov 28 09:57:54 crc kubenswrapper[4838]: cluster: default-cluster Nov 28 09:57:54 crc kubenswrapper[4838]: namespace: default Nov 28 09:57:54 crc kubenswrapper[4838]: user: default-auth Nov 28 09:57:54 crc kubenswrapper[4838]: name: default-context Nov 28 09:57:54 crc kubenswrapper[4838]: current-context: default-context Nov 28 09:57:54 crc kubenswrapper[4838]: kind: Config Nov 28 09:57:54 crc kubenswrapper[4838]: preferences: {} Nov 28 09:57:54 crc kubenswrapper[4838]: users: Nov 28 09:57:54 crc kubenswrapper[4838]: - name: default-auth Nov 28 09:57:54 crc kubenswrapper[4838]: user: Nov 28 09:57:54 crc kubenswrapper[4838]: client-certificate: /etc/ovn/ovnkube-node-certs/ovnkube-client-current.pem Nov 28 09:57:54 crc kubenswrapper[4838]: client-key: /etc/ovn/ovnkube-node-certs/ovnkube-client-current.pem Nov 28 09:57:54 crc kubenswrapper[4838]: EOF Nov 28 09:57:54 crc kubenswrapper[4838]: ],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:etc-openvswitch,ReadOnly:false,MountPath:/etc/ovn/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-svlft,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:nil,Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ovnkube-node-gmhsj_openshift-ovn-kubernetes(41b01f7d-5c75-49de-86f7-87e04bf71194): CreateContainerConfigError: services have not yet been read at least once, cannot construct envvars Nov 28 09:57:54 crc kubenswrapper[4838]: > logger="UnhandledError" Nov 28 09:57:54 crc kubenswrapper[4838]: I1128 09:57:54.833336 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-4rv9b" event={"ID":"051f7e1c-2d47-4be9-bbd5-14feec16eb16","Type":"ContainerStarted","Data":"00a570b7f457c3e2d9db317ce9eb85785dce58f444ad4a4d33782f9f942666a0"} Nov 28 09:57:54 crc kubenswrapper[4838]: E1128 09:57:54.834486 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kubecfg-setup\" with CreateContainerConfigError: \"services have not yet been read at least once, cannot construct envvars\"" pod="openshift-ovn-kubernetes/ovnkube-node-gmhsj" podUID="41b01f7d-5c75-49de-86f7-87e04bf71194" Nov 28 09:57:54 crc kubenswrapper[4838]: I1128 09:57:54.834930 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-58mh7" event={"ID":"3f556bd7-3b15-4d7d-b8e2-4815bb5c9c7d","Type":"ContainerStarted","Data":"0fb1281d4b0e75ef23d7fc84b5fec38df3e6805b922cfc48728bc1242c7785ac"} Nov 28 09:57:54 crc kubenswrapper[4838]: E1128 09:57:54.835938 4838 kuberuntime_manager.go:1274] "Unhandled Error" err=< Nov 28 09:57:54 crc kubenswrapper[4838]: container &Container{Name:kube-multus,Image:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26,Command:[/bin/bash -ec --],Args:[MULTUS_DAEMON_OPT="" Nov 28 09:57:54 crc kubenswrapper[4838]: /entrypoint/cnibincopy.sh; exec /usr/src/multus-cni/bin/multus-daemon $MULTUS_DAEMON_OPT Nov 28 09:57:54 crc kubenswrapper[4838]: ],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:RHEL8_SOURCE_DIRECTORY,Value:/usr/src/multus-cni/rhel8/bin/,ValueFrom:nil,},EnvVar{Name:RHEL9_SOURCE_DIRECTORY,Value:/usr/src/multus-cni/rhel9/bin/,ValueFrom:nil,},EnvVar{Name:DEFAULT_SOURCE_DIRECTORY,Value:/usr/src/multus-cni/bin/,ValueFrom:nil,},EnvVar{Name:KUBERNETES_SERVICE_PORT,Value:6443,ValueFrom:nil,},EnvVar{Name:KUBERNETES_SERVICE_HOST,Value:api-int.crc.testing,ValueFrom:nil,},EnvVar{Name:MULTUS_NODE_NAME,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:spec.nodeName,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},EnvVar{Name:K8S_NODE,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:spec.nodeName,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{68157440 0} {} 65Mi BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:cni-binary-copy,ReadOnly:false,MountPath:/entrypoint,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:os-release,ReadOnly:false,MountPath:/host/etc/os-release,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:system-cni-dir,ReadOnly:false,MountPath:/host/etc/cni/net.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:multus-cni-dir,ReadOnly:false,MountPath:/host/run/multus/cni/net.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:cnibin,ReadOnly:false,MountPath:/host/opt/cni/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:multus-socket-dir-parent,ReadOnly:false,MountPath:/host/run/multus,SubPath:,MountPropagation:*HostToContainer,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:host-run-k8s-cni-cncf-io,ReadOnly:false,MountPath:/run/k8s.cni.cncf.io,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:host-run-netns,ReadOnly:false,MountPath:/run/netns,SubPath:,MountPropagation:*HostToContainer,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:host-var-lib-cni-bin,ReadOnly:false,MountPath:/var/lib/cni/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:host-var-lib-cni-multus,ReadOnly:false,MountPath:/var/lib/cni/multus,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:host-var-lib-kubelet,ReadOnly:false,MountPath:/var/lib/kubelet,SubPath:,MountPropagation:*HostToContainer,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:hostroot,ReadOnly:false,MountPath:/hostroot,SubPath:,MountPropagation:*HostToContainer,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:multus-conf-dir,ReadOnly:false,MountPath:/etc/cni/multus/net.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:multus-daemon-config,ReadOnly:true,MountPath:/etc/cni/net.d/multus.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:host-run-multus-certs,ReadOnly:false,MountPath:/etc/cni/multus/certs,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:etc-kubernetes,ReadOnly:false,MountPath:/etc/kubernetes,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-szqtp,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:*true,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod multus-4rv9b_openshift-multus(051f7e1c-2d47-4be9-bbd5-14feec16eb16): CreateContainerConfigError: services have not yet been read at least once, cannot construct envvars Nov 28 09:57:54 crc kubenswrapper[4838]: > logger="UnhandledError" Nov 28 09:57:54 crc kubenswrapper[4838]: I1128 09:57:54.836492 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"7fbae0872f24d3d63fb64f6ae19fb61a045c927cc3dcb5cc6bd2f95c17302372"} Nov 28 09:57:54 crc kubenswrapper[4838]: E1128 09:57:54.837074 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CreateContainerConfigError: \"services have not yet been read at least once, cannot construct envvars\"" pod="openshift-multus/multus-4rv9b" podUID="051f7e1c-2d47-4be9-bbd5-14feec16eb16" Nov 28 09:57:54 crc kubenswrapper[4838]: I1128 09:57:54.837489 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"8a90004baf1ccb8ee09cdf2ba7c6651c279a8b8370461051c1d34a799dcd48bb"} Nov 28 09:57:54 crc kubenswrapper[4838]: E1128 09:57:54.839144 4838 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:egress-router-binary-copy,Image:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c,Command:[/entrypoint/cnibincopy.sh],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:RHEL8_SOURCE_DIRECTORY,Value:/usr/src/egress-router-cni/rhel8/bin/,ValueFrom:nil,},EnvVar{Name:RHEL9_SOURCE_DIRECTORY,Value:/usr/src/egress-router-cni/rhel9/bin/,ValueFrom:nil,},EnvVar{Name:DEFAULT_SOURCE_DIRECTORY,Value:/usr/src/egress-router-cni/bin/,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:cni-binary-copy,ReadOnly:false,MountPath:/entrypoint,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:cnibin,ReadOnly:false,MountPath:/host/opt/cni/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:os-release,ReadOnly:true,MountPath:/host/etc/os-release,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-jgs5r,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:nil,Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod multus-additional-cni-plugins-58mh7_openshift-multus(3f556bd7-3b15-4d7d-b8e2-4815bb5c9c7d): CreateContainerConfigError: services have not yet been read at least once, cannot construct envvars" logger="UnhandledError" Nov 28 09:57:54 crc kubenswrapper[4838]: E1128 09:57:54.839459 4838 kuberuntime_manager.go:1274] "Unhandled Error" err=< Nov 28 09:57:54 crc kubenswrapper[4838]: container &Container{Name:network-operator,Image:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b,Command:[/bin/bash -c #!/bin/bash Nov 28 09:57:54 crc kubenswrapper[4838]: set -o allexport Nov 28 09:57:54 crc kubenswrapper[4838]: if [[ -f /etc/kubernetes/apiserver-url.env ]]; then Nov 28 09:57:54 crc kubenswrapper[4838]: source /etc/kubernetes/apiserver-url.env Nov 28 09:57:54 crc kubenswrapper[4838]: else Nov 28 09:57:54 crc kubenswrapper[4838]: echo "Error: /etc/kubernetes/apiserver-url.env is missing" Nov 28 09:57:54 crc kubenswrapper[4838]: exit 1 Nov 28 09:57:54 crc kubenswrapper[4838]: fi Nov 28 09:57:54 crc kubenswrapper[4838]: exec /usr/bin/cluster-network-operator start --listen=0.0.0.0:9104 Nov 28 09:57:54 crc kubenswrapper[4838]: ],Args:[],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:cno,HostPort:9104,ContainerPort:9104,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:RELEASE_VERSION,Value:4.18.1,ValueFrom:nil,},EnvVar{Name:KUBE_PROXY_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b97554198294bf544fbc116c94a0a1fb2ec8a4de0e926bf9d9e320135f0bee6f,ValueFrom:nil,},EnvVar{Name:KUBE_RBAC_PROXY_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09,ValueFrom:nil,},EnvVar{Name:MULTUS_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26,ValueFrom:nil,},EnvVar{Name:MULTUS_ADMISSION_CONTROLLER_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317,ValueFrom:nil,},EnvVar{Name:CNI_PLUGINS_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc,ValueFrom:nil,},EnvVar{Name:BOND_CNI_PLUGIN_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78,ValueFrom:nil,},EnvVar{Name:WHEREABOUTS_CNI_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4,ValueFrom:nil,},EnvVar{Name:ROUTE_OVERRRIDE_CNI_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa,ValueFrom:nil,},EnvVar{Name:MULTUS_NETWORKPOLICY_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:23f833d3738d68706eb2f2868bd76bd71cee016cffa6faf5f045a60cc8c6eddd,ValueFrom:nil,},EnvVar{Name:OVN_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2,ValueFrom:nil,},EnvVar{Name:OVN_NB_RAFT_ELECTION_TIMER,Value:10,ValueFrom:nil,},EnvVar{Name:OVN_SB_RAFT_ELECTION_TIMER,Value:16,ValueFrom:nil,},EnvVar{Name:OVN_NORTHD_PROBE_INTERVAL,Value:10000,ValueFrom:nil,},EnvVar{Name:OVN_CONTROLLER_INACTIVITY_PROBE,Value:180000,ValueFrom:nil,},EnvVar{Name:OVN_NB_INACTIVITY_PROBE,Value:60000,ValueFrom:nil,},EnvVar{Name:EGRESS_ROUTER_CNI_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c,ValueFrom:nil,},EnvVar{Name:NETWORK_METRICS_DAEMON_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d,ValueFrom:nil,},EnvVar{Name:NETWORK_CHECK_SOURCE_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b,ValueFrom:nil,},EnvVar{Name:NETWORK_CHECK_TARGET_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b,ValueFrom:nil,},EnvVar{Name:NETWORK_OPERATOR_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b,ValueFrom:nil,},EnvVar{Name:CLOUD_NETWORK_CONFIG_CONTROLLER_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8048f1cb0be521f09749c0a489503cd56d85b68c6ca93380e082cfd693cd97a8,ValueFrom:nil,},EnvVar{Name:CLI_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2,ValueFrom:nil,},EnvVar{Name:FRR_K8S_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5dbf844e49bb46b78586930149e5e5f5dc121014c8afd10fe36f3651967cc256,ValueFrom:nil,},EnvVar{Name:NETWORKING_CONSOLE_PLUGIN_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd,ValueFrom:nil,},EnvVar{Name:POD_NAME,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.name,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{52428800 0} {} 50Mi BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:host-etc-kube,ReadOnly:true,MountPath:/etc/kubernetes,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:metrics-tls,ReadOnly:false,MountPath:/var/run/secrets/serving-cert,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-rdwmf,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:nil,Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod network-operator-58b4c7f79c-55gtf_openshift-network-operator(37a5e44f-9a88-4405-be8a-b645485e7312): CreateContainerConfigError: services have not yet been read at least once, cannot construct envvars Nov 28 09:57:54 crc kubenswrapper[4838]: > logger="UnhandledError" Nov 28 09:57:54 crc kubenswrapper[4838]: E1128 09:57:54.839657 4838 kuberuntime_manager.go:1274] "Unhandled Error" err=< Nov 28 09:57:54 crc kubenswrapper[4838]: container &Container{Name:approver,Image:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2,Command:[/bin/bash -c set -xe Nov 28 09:57:54 crc kubenswrapper[4838]: if [[ -f "/env/_master" ]]; then Nov 28 09:57:54 crc kubenswrapper[4838]: set -o allexport Nov 28 09:57:54 crc kubenswrapper[4838]: source "/env/_master" Nov 28 09:57:54 crc kubenswrapper[4838]: set +o allexport Nov 28 09:57:54 crc kubenswrapper[4838]: fi Nov 28 09:57:54 crc kubenswrapper[4838]: Nov 28 09:57:54 crc kubenswrapper[4838]: echo "I$(date "+%m%d %H:%M:%S.%N") - network-node-identity - start approver" Nov 28 09:57:54 crc kubenswrapper[4838]: exec /usr/bin/ovnkube-identity --k8s-apiserver=https://api-int.crc.testing:6443 \ Nov 28 09:57:54 crc kubenswrapper[4838]: --disable-webhook \ Nov 28 09:57:54 crc kubenswrapper[4838]: --csr-acceptance-conditions="/var/run/ovnkube-identity-config/additional-cert-acceptance-cond.json" \ Nov 28 09:57:54 crc kubenswrapper[4838]: --loglevel="${LOGLEVEL}" Nov 28 09:57:54 crc kubenswrapper[4838]: ],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LOGLEVEL,Value:4,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{52428800 0} {} 50Mi BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:env-overrides,ReadOnly:false,MountPath:/env,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ovnkube-identity-cm,ReadOnly:false,MountPath:/var/run/ovnkube-identity-config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-s2kz5,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000470000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod network-node-identity-vrzqb_openshift-network-node-identity(ef543e1b-8068-4ea3-b32a-61027b32e95d): CreateContainerConfigError: services have not yet been read at least once, cannot construct envvars Nov 28 09:57:54 crc kubenswrapper[4838]: > logger="UnhandledError" Nov 28 09:57:54 crc kubenswrapper[4838]: I1128 09:57:54.840101 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" event={"ID":"5c3daa53-8c4e-4e30-aeba-146602dd45cd","Type":"ContainerStarted","Data":"c54de276a68a3140bdcfa9ffd5016b85d3264f102ec87ab93377c9e13c38218b"} Nov 28 09:57:54 crc kubenswrapper[4838]: E1128 09:57:54.840581 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"egress-router-binary-copy\" with CreateContainerConfigError: \"services have not yet been read at least once, cannot construct envvars\"" pod="openshift-multus/multus-additional-cni-plugins-58mh7" podUID="3f556bd7-3b15-4d7d-b8e2-4815bb5c9c7d" Nov 28 09:57:54 crc kubenswrapper[4838]: E1128 09:57:54.840604 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"network-operator\" with CreateContainerConfigError: \"services have not yet been read at least once, cannot construct envvars\"" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" podUID="37a5e44f-9a88-4405-be8a-b645485e7312" Nov 28 09:57:54 crc kubenswrapper[4838]: E1128 09:57:54.841203 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"webhook\" with CreateContainerConfigError: \"services have not yet been read at least once, cannot construct envvars\", failed to \"StartContainer\" for \"approver\" with CreateContainerConfigError: \"services have not yet been read at least once, cannot construct envvars\"]" pod="openshift-network-node-identity/network-node-identity-vrzqb" podUID="ef543e1b-8068-4ea3-b32a-61027b32e95d" Nov 28 09:57:54 crc kubenswrapper[4838]: E1128 09:57:54.844686 4838 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:machine-config-daemon,Image:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a,Command:[/usr/bin/machine-config-daemon],Args:[start --payload-version=4.18.1],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:health,HostPort:8798,ContainerPort:8798,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:NODE_NAME,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:spec.nodeName,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{52428800 0} {} 50Mi BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:rootfs,ReadOnly:false,MountPath:/rootfs,SubPath:,MountPropagation:*HostToContainer,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-9dmrk,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/health,Port:{0 8798 },Host:127.0.0.1,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:120,TimeoutSeconds:1,PeriodSeconds:30,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:*true,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:*false,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod machine-config-daemon-5dxdd_openshift-machine-config-operator(5c3daa53-8c4e-4e30-aeba-146602dd45cd): CreateContainerConfigError: services have not yet been read at least once, cannot construct envvars" logger="UnhandledError" Nov 28 09:57:54 crc kubenswrapper[4838]: E1128 09:57:54.845494 4838 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:iptables-alerter,Image:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2,Command:[/iptables-alerter/iptables-alerter.sh],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONTAINER_RUNTIME_ENDPOINT,Value:unix:///run/crio/crio.sock,ValueFrom:nil,},EnvVar{Name:ALERTER_POD_NAME,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.name,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{68157440 0} {} 65Mi BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:iptables-alerter-script,ReadOnly:false,MountPath:/iptables-alerter,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:host-slash,ReadOnly:true,MountPath:/host,SubPath:,MountPropagation:*HostToContainer,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-rczfb,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:*true,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod iptables-alerter-4ln5h_openshift-network-operator(d75a4c96-2883-4a0b-bab2-0fab2b6c0b49): CreateContainerConfigError: services have not yet been read at least once, cannot construct envvars" logger="UnhandledError" Nov 28 09:57:54 crc kubenswrapper[4838]: I1128 09:57:54.845596 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-sft2b" event={"ID":"200cdb91-cc86-40be-a5b6-30f7b9beba6d","Type":"ContainerStarted","Data":"3da4fdddfcc4170aee286b7d330016d4a83c3b08fba2666b697bbf14081f2640"} Nov 28 09:57:54 crc kubenswrapper[4838]: E1128 09:57:54.846567 4838 kuberuntime_manager.go:1274] "Unhandled Error" err=< Nov 28 09:57:54 crc kubenswrapper[4838]: container &Container{Name:node-ca,Image:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f,Command:[/bin/sh -c trap 'jobs -p | xargs -r kill; echo shutting down node-ca; exit 0' TERM Nov 28 09:57:54 crc kubenswrapper[4838]: while [ true ]; Nov 28 09:57:54 crc kubenswrapper[4838]: do Nov 28 09:57:54 crc kubenswrapper[4838]: for f in $(ls /tmp/serviceca); do Nov 28 09:57:54 crc kubenswrapper[4838]: echo $f Nov 28 09:57:54 crc kubenswrapper[4838]: ca_file_path="/tmp/serviceca/${f}" Nov 28 09:57:54 crc kubenswrapper[4838]: f=$(echo $f | sed -r 's/(.*)\.\./\1:/') Nov 28 09:57:54 crc kubenswrapper[4838]: reg_dir_path="/etc/docker/certs.d/${f}" Nov 28 09:57:54 crc kubenswrapper[4838]: if [ -e "${reg_dir_path}" ]; then Nov 28 09:57:54 crc kubenswrapper[4838]: cp -u $ca_file_path $reg_dir_path/ca.crt Nov 28 09:57:54 crc kubenswrapper[4838]: else Nov 28 09:57:54 crc kubenswrapper[4838]: mkdir $reg_dir_path Nov 28 09:57:54 crc kubenswrapper[4838]: cp $ca_file_path $reg_dir_path/ca.crt Nov 28 09:57:54 crc kubenswrapper[4838]: fi Nov 28 09:57:54 crc kubenswrapper[4838]: done Nov 28 09:57:54 crc kubenswrapper[4838]: for d in $(ls /etc/docker/certs.d); do Nov 28 09:57:54 crc kubenswrapper[4838]: echo $d Nov 28 09:57:54 crc kubenswrapper[4838]: dp=$(echo $d | sed -r 's/(.*):/\1\.\./') Nov 28 09:57:54 crc kubenswrapper[4838]: reg_conf_path="/tmp/serviceca/${dp}" Nov 28 09:57:54 crc kubenswrapper[4838]: if [ ! -e "${reg_conf_path}" ]; then Nov 28 09:57:54 crc kubenswrapper[4838]: rm -rf /etc/docker/certs.d/$d Nov 28 09:57:54 crc kubenswrapper[4838]: fi Nov 28 09:57:54 crc kubenswrapper[4838]: done Nov 28 09:57:54 crc kubenswrapper[4838]: sleep 60 & wait ${!} Nov 28 09:57:54 crc kubenswrapper[4838]: done Nov 28 09:57:54 crc kubenswrapper[4838]: ],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{10485760 0} {} 10Mi BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:serviceca,ReadOnly:false,MountPath:/tmp/serviceca,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:host,ReadOnly:false,MountPath:/etc/docker/certs.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-c2jkq,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:*true,SELinuxOptions:nil,RunAsUser:*1001,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:*0,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod node-ca-tj8hl_openshift-image-registry(cbb3c60a-bf9f-4a62-9310-30898e42be4f): CreateContainerConfigError: services have not yet been read at least once, cannot construct envvars Nov 28 09:57:54 crc kubenswrapper[4838]: > logger="UnhandledError" Nov 28 09:57:54 crc kubenswrapper[4838]: E1128 09:57:54.846615 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"iptables-alerter\" with CreateContainerConfigError: \"services have not yet been read at least once, cannot construct envvars\"" pod="openshift-network-operator/iptables-alerter-4ln5h" podUID="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" Nov 28 09:57:54 crc kubenswrapper[4838]: E1128 09:57:54.846770 4838 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09,Command:[],Args:[--secure-listen-address=0.0.0.0:9001 --config-file=/etc/kube-rbac-proxy/config-file.yaml --tls-cipher-suites=TLS_AES_128_GCM_SHA256,TLS_AES_256_GCM_SHA384,TLS_CHACHA20_POLY1305_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256 --tls-min-version=VersionTLS12 --upstream=http://127.0.0.1:8797 --logtostderr=true --tls-cert-file=/etc/tls/private/tls.crt --tls-private-key-file=/etc/tls/private/tls.key],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:metrics,HostPort:9001,ContainerPort:9001,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{52428800 0} {} 50Mi BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:proxy-tls,ReadOnly:false,MountPath:/etc/tls/private,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:mcd-auth-proxy-config,ReadOnly:false,MountPath:/etc/kube-rbac-proxy,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-9dmrk,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:nil,Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod machine-config-daemon-5dxdd_openshift-machine-config-operator(5c3daa53-8c4e-4e30-aeba-146602dd45cd): CreateContainerConfigError: services have not yet been read at least once, cannot construct envvars" logger="UnhandledError" Nov 28 09:57:54 crc kubenswrapper[4838]: I1128 09:57:54.847528 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-tj8hl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cbb3c60a-bf9f-4a62-9310-30898e42be4f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c2jkq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:54Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-tj8hl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:57:54 crc kubenswrapper[4838]: E1128 09:57:54.847655 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"node-ca\" with CreateContainerConfigError: \"services have not yet been read at least once, cannot construct envvars\"" pod="openshift-image-registry/node-ca-tj8hl" podUID="cbb3c60a-bf9f-4a62-9310-30898e42be4f" Nov 28 09:57:54 crc kubenswrapper[4838]: E1128 09:57:54.847932 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"machine-config-daemon\" with CreateContainerConfigError: \"services have not yet been read at least once, cannot construct envvars\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with CreateContainerConfigError: \"services have not yet been read at least once, cannot construct envvars\"]" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" Nov 28 09:57:54 crc kubenswrapper[4838]: E1128 09:57:54.854863 4838 kuberuntime_manager.go:1274] "Unhandled Error" err=< Nov 28 09:57:54 crc kubenswrapper[4838]: container &Container{Name:dns-node-resolver,Image:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2,Command:[/bin/bash -c #!/bin/bash Nov 28 09:57:54 crc kubenswrapper[4838]: set -uo pipefail Nov 28 09:57:54 crc kubenswrapper[4838]: Nov 28 09:57:54 crc kubenswrapper[4838]: trap 'jobs -p | xargs kill || true; wait; exit 0' TERM Nov 28 09:57:54 crc kubenswrapper[4838]: Nov 28 09:57:54 crc kubenswrapper[4838]: OPENSHIFT_MARKER="openshift-generated-node-resolver" Nov 28 09:57:54 crc kubenswrapper[4838]: HOSTS_FILE="/etc/hosts" Nov 28 09:57:54 crc kubenswrapper[4838]: TEMP_FILE="/etc/hosts.tmp" Nov 28 09:57:54 crc kubenswrapper[4838]: Nov 28 09:57:54 crc kubenswrapper[4838]: IFS=', ' read -r -a services <<< "${SERVICES}" Nov 28 09:57:54 crc kubenswrapper[4838]: Nov 28 09:57:54 crc kubenswrapper[4838]: # Make a temporary file with the old hosts file's attributes. Nov 28 09:57:54 crc kubenswrapper[4838]: if ! cp -f --attributes-only "${HOSTS_FILE}" "${TEMP_FILE}"; then Nov 28 09:57:54 crc kubenswrapper[4838]: echo "Failed to preserve hosts file. Exiting." Nov 28 09:57:54 crc kubenswrapper[4838]: exit 1 Nov 28 09:57:54 crc kubenswrapper[4838]: fi Nov 28 09:57:54 crc kubenswrapper[4838]: Nov 28 09:57:54 crc kubenswrapper[4838]: while true; do Nov 28 09:57:54 crc kubenswrapper[4838]: declare -A svc_ips Nov 28 09:57:54 crc kubenswrapper[4838]: for svc in "${services[@]}"; do Nov 28 09:57:54 crc kubenswrapper[4838]: # Fetch service IP from cluster dns if present. We make several tries Nov 28 09:57:54 crc kubenswrapper[4838]: # to do it: IPv4, IPv6, IPv4 over TCP and IPv6 over TCP. The two last ones Nov 28 09:57:54 crc kubenswrapper[4838]: # are for deployments with Kuryr on older OpenStack (OSP13) - those do not Nov 28 09:57:54 crc kubenswrapper[4838]: # support UDP loadbalancers and require reaching DNS through TCP. Nov 28 09:57:54 crc kubenswrapper[4838]: cmds=('dig -t A @"${NAMESERVER}" +short "${svc}.${CLUSTER_DOMAIN}"|grep -v "^;"' Nov 28 09:57:54 crc kubenswrapper[4838]: 'dig -t AAAA @"${NAMESERVER}" +short "${svc}.${CLUSTER_DOMAIN}"|grep -v "^;"' Nov 28 09:57:54 crc kubenswrapper[4838]: 'dig -t A +tcp +retry=0 @"${NAMESERVER}" +short "${svc}.${CLUSTER_DOMAIN}"|grep -v "^;"' Nov 28 09:57:54 crc kubenswrapper[4838]: 'dig -t AAAA +tcp +retry=0 @"${NAMESERVER}" +short "${svc}.${CLUSTER_DOMAIN}"|grep -v "^;"') Nov 28 09:57:54 crc kubenswrapper[4838]: for i in ${!cmds[*]} Nov 28 09:57:54 crc kubenswrapper[4838]: do Nov 28 09:57:54 crc kubenswrapper[4838]: ips=($(eval "${cmds[i]}")) Nov 28 09:57:54 crc kubenswrapper[4838]: if [[ "$?" -eq 0 && "${#ips[@]}" -ne 0 ]]; then Nov 28 09:57:54 crc kubenswrapper[4838]: svc_ips["${svc}"]="${ips[@]}" Nov 28 09:57:54 crc kubenswrapper[4838]: break Nov 28 09:57:54 crc kubenswrapper[4838]: fi Nov 28 09:57:54 crc kubenswrapper[4838]: done Nov 28 09:57:54 crc kubenswrapper[4838]: done Nov 28 09:57:54 crc kubenswrapper[4838]: Nov 28 09:57:54 crc kubenswrapper[4838]: # Update /etc/hosts only if we get valid service IPs Nov 28 09:57:54 crc kubenswrapper[4838]: # We will not update /etc/hosts when there is coredns service outage or api unavailability Nov 28 09:57:54 crc kubenswrapper[4838]: # Stale entries could exist in /etc/hosts if the service is deleted Nov 28 09:57:54 crc kubenswrapper[4838]: if [[ -n "${svc_ips[*]-}" ]]; then Nov 28 09:57:54 crc kubenswrapper[4838]: # Build a new hosts file from /etc/hosts with our custom entries filtered out Nov 28 09:57:54 crc kubenswrapper[4838]: if ! sed --silent "/# ${OPENSHIFT_MARKER}/d; w ${TEMP_FILE}" "${HOSTS_FILE}"; then Nov 28 09:57:54 crc kubenswrapper[4838]: # Only continue rebuilding the hosts entries if its original content is preserved Nov 28 09:57:54 crc kubenswrapper[4838]: sleep 60 & wait Nov 28 09:57:54 crc kubenswrapper[4838]: continue Nov 28 09:57:54 crc kubenswrapper[4838]: fi Nov 28 09:57:54 crc kubenswrapper[4838]: Nov 28 09:57:54 crc kubenswrapper[4838]: # Append resolver entries for services Nov 28 09:57:54 crc kubenswrapper[4838]: rc=0 Nov 28 09:57:54 crc kubenswrapper[4838]: for svc in "${!svc_ips[@]}"; do Nov 28 09:57:54 crc kubenswrapper[4838]: for ip in ${svc_ips[${svc}]}; do Nov 28 09:57:54 crc kubenswrapper[4838]: echo "${ip} ${svc} ${svc}.${CLUSTER_DOMAIN} # ${OPENSHIFT_MARKER}" >> "${TEMP_FILE}" || rc=$? Nov 28 09:57:54 crc kubenswrapper[4838]: done Nov 28 09:57:54 crc kubenswrapper[4838]: done Nov 28 09:57:54 crc kubenswrapper[4838]: if [[ $rc -ne 0 ]]; then Nov 28 09:57:54 crc kubenswrapper[4838]: sleep 60 & wait Nov 28 09:57:54 crc kubenswrapper[4838]: continue Nov 28 09:57:54 crc kubenswrapper[4838]: fi Nov 28 09:57:54 crc kubenswrapper[4838]: Nov 28 09:57:54 crc kubenswrapper[4838]: Nov 28 09:57:54 crc kubenswrapper[4838]: # TODO: Update /etc/hosts atomically to avoid any inconsistent behavior Nov 28 09:57:54 crc kubenswrapper[4838]: # Replace /etc/hosts with our modified version if needed Nov 28 09:57:54 crc kubenswrapper[4838]: cmp "${TEMP_FILE}" "${HOSTS_FILE}" || cp -f "${TEMP_FILE}" "${HOSTS_FILE}" Nov 28 09:57:54 crc kubenswrapper[4838]: # TEMP_FILE is not removed to avoid file create/delete and attributes copy churn Nov 28 09:57:54 crc kubenswrapper[4838]: fi Nov 28 09:57:54 crc kubenswrapper[4838]: sleep 60 & wait Nov 28 09:57:54 crc kubenswrapper[4838]: unset svc_ips Nov 28 09:57:54 crc kubenswrapper[4838]: done Nov 28 09:57:54 crc kubenswrapper[4838]: ],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:SERVICES,Value:image-registry.openshift-image-registry.svc,ValueFrom:nil,},EnvVar{Name:NAMESERVER,Value:10.217.4.10,ValueFrom:nil,},EnvVar{Name:CLUSTER_DOMAIN,Value:cluster.local,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{22020096 0} {} 21Mi BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:hosts-file,ReadOnly:false,MountPath:/etc/hosts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-lpt6k,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:*true,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod node-resolver-sft2b_openshift-dns(200cdb91-cc86-40be-a5b6-30f7b9beba6d): CreateContainerConfigError: services have not yet been read at least once, cannot construct envvars Nov 28 09:57:54 crc kubenswrapper[4838]: > logger="UnhandledError" Nov 28 09:57:54 crc kubenswrapper[4838]: E1128 09:57:54.857795 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"dns-node-resolver\" with CreateContainerConfigError: \"services have not yet been read at least once, cannot construct envvars\"" pod="openshift-dns/node-resolver-sft2b" podUID="200cdb91-cc86-40be-a5b6-30f7b9beba6d" Nov 28 09:57:54 crc kubenswrapper[4838]: I1128 09:57:54.866108 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:57:54 crc kubenswrapper[4838]: I1128 09:57:54.883005 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:57:54 crc kubenswrapper[4838]: I1128 09:57:54.895482 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:57:54 crc kubenswrapper[4838]: I1128 09:57:54.909083 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:57:54 crc kubenswrapper[4838]: I1128 09:57:54.917582 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:57:54 crc kubenswrapper[4838]: I1128 09:57:54.917616 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:57:54 crc kubenswrapper[4838]: I1128 09:57:54.917626 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:57:54 crc kubenswrapper[4838]: I1128 09:57:54.917641 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:57:54 crc kubenswrapper[4838]: I1128 09:57:54.917652 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:57:54Z","lastTransitionTime":"2025-11-28T09:57:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:57:54 crc kubenswrapper[4838]: I1128 09:57:54.924800 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-58mh7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f556bd7-3b15-4d7d-b8e2-4815bb5c9c7d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-58mh7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:57:54 crc kubenswrapper[4838]: I1128 09:57:54.933117 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-sft2b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"200cdb91-cc86-40be-a5b6-30f7b9beba6d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lpt6k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-sft2b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:57:54 crc kubenswrapper[4838]: I1128 09:57:54.948982 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:57:54 crc kubenswrapper[4838]: I1128 09:57:54.963440 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4rv9b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"051f7e1c-2d47-4be9-bbd5-14feec16eb16\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-szqtp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4rv9b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:57:54 crc kubenswrapper[4838]: I1128 09:57:54.975261 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5c3daa53-8c4e-4e30-aeba-146602dd45cd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9dmrk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9dmrk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-5dxdd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:57:54 crc kubenswrapper[4838]: I1128 09:57:54.991361 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42be35de-5c72-4cc2-a5e4-fb7872425cdf\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-policy-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-policy-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5a39765f9493a3a9454db77d07855624ff5645e9dbd898e6dcb880d7a01a8c42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://426741a23e7b1b9fae6769b581c0d44694f957b0da985476923801395fad082f\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T09:57:46Z\\\",\\\"message\\\":\\\"+ timeout 3m /bin/bash -exuo pipefail -c 'while [ -n \\\\\\\"$(ss -Htanop \\\\\\\\( sport = 10357 \\\\\\\\))\\\\\\\" ]; do sleep 1; done'\\\\n++ ss -Htanop '(' sport = 10357 ')'\\\\n+ '[' -n '' ']'\\\\n+ exec cluster-policy-controller start --config=/etc/kubernetes/static-pod-resources/configmaps/cluster-policy-controller-config/config.yaml --kubeconfig=/etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig --namespace=openshift-kube-controller-manager -v=2\\\\nI1128 09:57:12.554377 1 leaderelection.go:121] The leader election gives 4 retries and allows for 30s of clock skew. The kube-apiserver downtime tolerance is 78s. Worst non-graceful lease acquisition is 2m43s. Worst graceful lease acquisition is {26s}.\\\\nI1128 09:57:12.555793 1 observer_polling.go:159] Starting file observer\\\\nI1128 09:57:12.567187 1 builder.go:298] cluster-policy-controller version 4.18.0-202501230001.p0.g5fd8525.assembly.stream.el9-5fd8525-5fd852525909ce6eab52972ba9ce8fcf56528eb9\\\\nI1128 09:57:12.568976 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.crt::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.key\\\\\\\"\\\\nI1128 09:57:45.000857 1 cmd.go:138] Received SIGTERM or SIGINT signal, shutting down controller.\\\\nF1128 09:57:46.203931 1 cmd.go:179] failed checking apiserver connectivity: client rate limiter Wait returned an error: context deadline exceeded\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:10Z\\\"}},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef54c8995d6bb8e077c6d1c3d796b6f2ce88370b6cadc4f040f590760103320b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fab4ce68cf732b3e6b32f68e84805013d646a9cbd6d5c55ea3d2f41a7f9db83d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2da93de71b5896e3c9ba192df2896b946b1376fefe2a87cf21adb87ea665be04\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:08Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:57:55 crc kubenswrapper[4838]: I1128 09:57:55.001541 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:57:55 crc kubenswrapper[4838]: I1128 09:57:55.020101 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gmhsj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"41b01f7d-5c75-49de-86f7-87e04bf71194\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-gmhsj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:57:55 crc kubenswrapper[4838]: I1128 09:57:55.020652 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:57:55 crc kubenswrapper[4838]: I1128 09:57:55.020705 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:57:55 crc kubenswrapper[4838]: I1128 09:57:55.020746 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:57:55 crc kubenswrapper[4838]: I1128 09:57:55.020765 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:57:55 crc kubenswrapper[4838]: I1128 09:57:55.020779 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:57:55Z","lastTransitionTime":"2025-11-28T09:57:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:57:55 crc kubenswrapper[4838]: I1128 09:57:55.031525 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:57:55 crc kubenswrapper[4838]: I1128 09:57:55.047937 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4rv9b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"051f7e1c-2d47-4be9-bbd5-14feec16eb16\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-szqtp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4rv9b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:57:55 crc kubenswrapper[4838]: I1128 09:57:55.061435 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:57:55 crc kubenswrapper[4838]: I1128 09:57:55.090831 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gmhsj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"41b01f7d-5c75-49de-86f7-87e04bf71194\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-gmhsj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:57:55 crc kubenswrapper[4838]: I1128 09:57:55.105826 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5c3daa53-8c4e-4e30-aeba-146602dd45cd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9dmrk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9dmrk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-5dxdd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:57:55 crc kubenswrapper[4838]: I1128 09:57:55.123373 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:57:55 crc kubenswrapper[4838]: I1128 09:57:55.123455 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:57:55 crc kubenswrapper[4838]: I1128 09:57:55.123481 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:57:55 crc kubenswrapper[4838]: I1128 09:57:55.123510 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:57:55 crc kubenswrapper[4838]: I1128 09:57:55.123531 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:57:55Z","lastTransitionTime":"2025-11-28T09:57:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:57:55 crc kubenswrapper[4838]: I1128 09:57:55.124435 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42be35de-5c72-4cc2-a5e4-fb7872425cdf\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-policy-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-policy-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5a39765f9493a3a9454db77d07855624ff5645e9dbd898e6dcb880d7a01a8c42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://426741a23e7b1b9fae6769b581c0d44694f957b0da985476923801395fad082f\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T09:57:46Z\\\",\\\"message\\\":\\\"+ timeout 3m /bin/bash -exuo pipefail -c 'while [ -n \\\\\\\"$(ss -Htanop \\\\\\\\( sport = 10357 \\\\\\\\))\\\\\\\" ]; do sleep 1; done'\\\\n++ ss -Htanop '(' sport = 10357 ')'\\\\n+ '[' -n '' ']'\\\\n+ exec cluster-policy-controller start --config=/etc/kubernetes/static-pod-resources/configmaps/cluster-policy-controller-config/config.yaml --kubeconfig=/etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig --namespace=openshift-kube-controller-manager -v=2\\\\nI1128 09:57:12.554377 1 leaderelection.go:121] The leader election gives 4 retries and allows for 30s of clock skew. The kube-apiserver downtime tolerance is 78s. Worst non-graceful lease acquisition is 2m43s. Worst graceful lease acquisition is {26s}.\\\\nI1128 09:57:12.555793 1 observer_polling.go:159] Starting file observer\\\\nI1128 09:57:12.567187 1 builder.go:298] cluster-policy-controller version 4.18.0-202501230001.p0.g5fd8525.assembly.stream.el9-5fd8525-5fd852525909ce6eab52972ba9ce8fcf56528eb9\\\\nI1128 09:57:12.568976 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.crt::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.key\\\\\\\"\\\\nI1128 09:57:45.000857 1 cmd.go:138] Received SIGTERM or SIGINT signal, shutting down controller.\\\\nF1128 09:57:46.203931 1 cmd.go:179] failed checking apiserver connectivity: client rate limiter Wait returned an error: context deadline exceeded\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:10Z\\\"}},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef54c8995d6bb8e077c6d1c3d796b6f2ce88370b6cadc4f040f590760103320b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fab4ce68cf732b3e6b32f68e84805013d646a9cbd6d5c55ea3d2f41a7f9db83d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2da93de71b5896e3c9ba192df2896b946b1376fefe2a87cf21adb87ea665be04\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:08Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:57:55 crc kubenswrapper[4838]: I1128 09:57:55.147179 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:57:55 crc kubenswrapper[4838]: I1128 09:57:55.162119 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:57:55 crc kubenswrapper[4838]: I1128 09:57:55.178495 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-tj8hl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cbb3c60a-bf9f-4a62-9310-30898e42be4f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c2jkq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:54Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-tj8hl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:57:55 crc kubenswrapper[4838]: I1128 09:57:55.197214 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:57:55 crc kubenswrapper[4838]: I1128 09:57:55.211536 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-58mh7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f556bd7-3b15-4d7d-b8e2-4815bb5c9c7d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-58mh7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:57:55 crc kubenswrapper[4838]: I1128 09:57:55.223323 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-sft2b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"200cdb91-cc86-40be-a5b6-30f7b9beba6d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lpt6k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-sft2b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:57:55 crc kubenswrapper[4838]: I1128 09:57:55.226690 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:57:55 crc kubenswrapper[4838]: I1128 09:57:55.226795 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:57:55 crc kubenswrapper[4838]: I1128 09:57:55.226816 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:57:55 crc kubenswrapper[4838]: I1128 09:57:55.226847 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:57:55 crc kubenswrapper[4838]: I1128 09:57:55.226868 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:57:55Z","lastTransitionTime":"2025-11-28T09:57:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:57:55 crc kubenswrapper[4838]: I1128 09:57:55.233880 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 09:57:55 crc kubenswrapper[4838]: I1128 09:57:55.233986 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 09:57:55 crc kubenswrapper[4838]: I1128 09:57:55.234055 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 09:57:55 crc kubenswrapper[4838]: I1128 09:57:55.234099 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 09:57:55 crc kubenswrapper[4838]: E1128 09:57:55.234184 4838 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 28 09:57:55 crc kubenswrapper[4838]: E1128 09:57:55.234208 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 09:57:57.234167412 +0000 UTC m=+48.933141622 (durationBeforeRetry 2s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 09:57:55 crc kubenswrapper[4838]: E1128 09:57:55.234247 4838 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 28 09:57:55 crc kubenswrapper[4838]: E1128 09:57:55.234261 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-28 09:57:57.234239524 +0000 UTC m=+48.933213724 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 28 09:57:55 crc kubenswrapper[4838]: I1128 09:57:55.234389 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 09:57:55 crc kubenswrapper[4838]: E1128 09:57:55.234473 4838 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 28 09:57:55 crc kubenswrapper[4838]: E1128 09:57:55.234505 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-28 09:57:57.234461359 +0000 UTC m=+48.933435539 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 28 09:57:55 crc kubenswrapper[4838]: E1128 09:57:55.234508 4838 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 28 09:57:55 crc kubenswrapper[4838]: E1128 09:57:55.234554 4838 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 09:57:55 crc kubenswrapper[4838]: E1128 09:57:55.234632 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-28 09:57:57.234609363 +0000 UTC m=+48.933583573 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 09:57:55 crc kubenswrapper[4838]: E1128 09:57:55.234708 4838 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 28 09:57:55 crc kubenswrapper[4838]: E1128 09:57:55.234757 4838 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 28 09:57:55 crc kubenswrapper[4838]: E1128 09:57:55.234774 4838 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 09:57:55 crc kubenswrapper[4838]: E1128 09:57:55.234834 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-28 09:57:57.234818008 +0000 UTC m=+48.933792218 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 09:57:55 crc kubenswrapper[4838]: I1128 09:57:55.237789 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:57:55 crc kubenswrapper[4838]: I1128 09:57:55.329886 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:57:55 crc kubenswrapper[4838]: I1128 09:57:55.329943 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:57:55 crc kubenswrapper[4838]: I1128 09:57:55.329960 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:57:55 crc kubenswrapper[4838]: I1128 09:57:55.329984 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:57:55 crc kubenswrapper[4838]: I1128 09:57:55.330000 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:57:55Z","lastTransitionTime":"2025-11-28T09:57:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:57:55 crc kubenswrapper[4838]: I1128 09:57:55.433910 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:57:55 crc kubenswrapper[4838]: I1128 09:57:55.433981 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:57:55 crc kubenswrapper[4838]: I1128 09:57:55.434005 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:57:55 crc kubenswrapper[4838]: I1128 09:57:55.434034 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:57:55 crc kubenswrapper[4838]: I1128 09:57:55.434057 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:57:55Z","lastTransitionTime":"2025-11-28T09:57:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:57:55 crc kubenswrapper[4838]: I1128 09:57:55.536943 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:57:55 crc kubenswrapper[4838]: I1128 09:57:55.536999 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:57:55 crc kubenswrapper[4838]: I1128 09:57:55.537017 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:57:55 crc kubenswrapper[4838]: I1128 09:57:55.537039 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:57:55 crc kubenswrapper[4838]: I1128 09:57:55.537056 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:57:55Z","lastTransitionTime":"2025-11-28T09:57:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:57:55 crc kubenswrapper[4838]: I1128 09:57:55.561748 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 09:57:55 crc kubenswrapper[4838]: I1128 09:57:55.561748 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 09:57:55 crc kubenswrapper[4838]: E1128 09:57:55.561891 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 09:57:55 crc kubenswrapper[4838]: E1128 09:57:55.562060 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 09:57:55 crc kubenswrapper[4838]: I1128 09:57:55.562349 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 09:57:55 crc kubenswrapper[4838]: E1128 09:57:55.562486 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 09:57:55 crc kubenswrapper[4838]: I1128 09:57:55.640018 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:57:55 crc kubenswrapper[4838]: I1128 09:57:55.640072 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:57:55 crc kubenswrapper[4838]: I1128 09:57:55.640091 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:57:55 crc kubenswrapper[4838]: I1128 09:57:55.640114 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:57:55 crc kubenswrapper[4838]: I1128 09:57:55.640131 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:57:55Z","lastTransitionTime":"2025-11-28T09:57:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:57:55 crc kubenswrapper[4838]: I1128 09:57:55.694616 4838 reflector.go:368] Caches populated for *v1.Service from k8s.io/client-go/informers/factory.go:160 Nov 28 09:57:55 crc kubenswrapper[4838]: I1128 09:57:55.743121 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:57:55 crc kubenswrapper[4838]: I1128 09:57:55.743200 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:57:55 crc kubenswrapper[4838]: I1128 09:57:55.743223 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:57:55 crc kubenswrapper[4838]: I1128 09:57:55.743255 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:57:55 crc kubenswrapper[4838]: I1128 09:57:55.743279 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:57:55Z","lastTransitionTime":"2025-11-28T09:57:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:57:55 crc kubenswrapper[4838]: I1128 09:57:55.847359 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:57:55 crc kubenswrapper[4838]: I1128 09:57:55.848186 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:57:55 crc kubenswrapper[4838]: I1128 09:57:55.848206 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:57:55 crc kubenswrapper[4838]: I1128 09:57:55.848675 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:57:55 crc kubenswrapper[4838]: I1128 09:57:55.848701 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:57:55Z","lastTransitionTime":"2025-11-28T09:57:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:57:55 crc kubenswrapper[4838]: I1128 09:57:55.849862 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-tj8hl" event={"ID":"cbb3c60a-bf9f-4a62-9310-30898e42be4f","Type":"ContainerStarted","Data":"18e25179e8c8a6ea6f8c1ab4bc7a2eb9cbb330dd77437fe9ea3c6288c2e0f27a"} Nov 28 09:57:55 crc kubenswrapper[4838]: I1128 09:57:55.870658 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:57:55 crc kubenswrapper[4838]: I1128 09:57:55.889341 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:57:55 crc kubenswrapper[4838]: I1128 09:57:55.910922 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-58mh7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f556bd7-3b15-4d7d-b8e2-4815bb5c9c7d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-58mh7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:57:55 crc kubenswrapper[4838]: I1128 09:57:55.926235 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-sft2b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"200cdb91-cc86-40be-a5b6-30f7b9beba6d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lpt6k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-sft2b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:57:55 crc kubenswrapper[4838]: I1128 09:57:55.941239 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:57:55 crc kubenswrapper[4838]: I1128 09:57:55.953702 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:57:55 crc kubenswrapper[4838]: I1128 09:57:55.953790 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:57:55 crc kubenswrapper[4838]: I1128 09:57:55.953807 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:57:55 crc kubenswrapper[4838]: I1128 09:57:55.953832 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:57:55 crc kubenswrapper[4838]: I1128 09:57:55.953849 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:57:55Z","lastTransitionTime":"2025-11-28T09:57:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:57:55 crc kubenswrapper[4838]: I1128 09:57:55.956798 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4rv9b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"051f7e1c-2d47-4be9-bbd5-14feec16eb16\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-szqtp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4rv9b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:57:55 crc kubenswrapper[4838]: I1128 09:57:55.974223 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42be35de-5c72-4cc2-a5e4-fb7872425cdf\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-policy-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-policy-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5a39765f9493a3a9454db77d07855624ff5645e9dbd898e6dcb880d7a01a8c42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://426741a23e7b1b9fae6769b581c0d44694f957b0da985476923801395fad082f\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T09:57:46Z\\\",\\\"message\\\":\\\"+ timeout 3m /bin/bash -exuo pipefail -c 'while [ -n \\\\\\\"$(ss -Htanop \\\\\\\\( sport = 10357 \\\\\\\\))\\\\\\\" ]; do sleep 1; done'\\\\n++ ss -Htanop '(' sport = 10357 ')'\\\\n+ '[' -n '' ']'\\\\n+ exec cluster-policy-controller start --config=/etc/kubernetes/static-pod-resources/configmaps/cluster-policy-controller-config/config.yaml --kubeconfig=/etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig --namespace=openshift-kube-controller-manager -v=2\\\\nI1128 09:57:12.554377 1 leaderelection.go:121] The leader election gives 4 retries and allows for 30s of clock skew. The kube-apiserver downtime tolerance is 78s. Worst non-graceful lease acquisition is 2m43s. Worst graceful lease acquisition is {26s}.\\\\nI1128 09:57:12.555793 1 observer_polling.go:159] Starting file observer\\\\nI1128 09:57:12.567187 1 builder.go:298] cluster-policy-controller version 4.18.0-202501230001.p0.g5fd8525.assembly.stream.el9-5fd8525-5fd852525909ce6eab52972ba9ce8fcf56528eb9\\\\nI1128 09:57:12.568976 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.crt::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.key\\\\\\\"\\\\nI1128 09:57:45.000857 1 cmd.go:138] Received SIGTERM or SIGINT signal, shutting down controller.\\\\nF1128 09:57:46.203931 1 cmd.go:179] failed checking apiserver connectivity: client rate limiter Wait returned an error: context deadline exceeded\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:10Z\\\"}},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef54c8995d6bb8e077c6d1c3d796b6f2ce88370b6cadc4f040f590760103320b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fab4ce68cf732b3e6b32f68e84805013d646a9cbd6d5c55ea3d2f41a7f9db83d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2da93de71b5896e3c9ba192df2896b946b1376fefe2a87cf21adb87ea665be04\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:08Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:57:55 crc kubenswrapper[4838]: I1128 09:57:55.990121 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:57:56 crc kubenswrapper[4838]: I1128 09:57:56.018436 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gmhsj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"41b01f7d-5c75-49de-86f7-87e04bf71194\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-gmhsj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:57:56 crc kubenswrapper[4838]: I1128 09:57:56.032464 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5c3daa53-8c4e-4e30-aeba-146602dd45cd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9dmrk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9dmrk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-5dxdd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:57:56 crc kubenswrapper[4838]: I1128 09:57:56.043702 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:57:56 crc kubenswrapper[4838]: I1128 09:57:56.055978 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:57:56 crc kubenswrapper[4838]: I1128 09:57:56.056815 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:57:56 crc kubenswrapper[4838]: I1128 09:57:56.056860 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:57:56 crc kubenswrapper[4838]: I1128 09:57:56.056878 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:57:56 crc kubenswrapper[4838]: I1128 09:57:56.056903 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:57:56 crc kubenswrapper[4838]: I1128 09:57:56.056921 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:57:56Z","lastTransitionTime":"2025-11-28T09:57:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:57:56 crc kubenswrapper[4838]: I1128 09:57:56.064391 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-tj8hl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cbb3c60a-bf9f-4a62-9310-30898e42be4f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c2jkq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:54Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-tj8hl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:57:56 crc kubenswrapper[4838]: I1128 09:57:56.160370 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:57:56 crc kubenswrapper[4838]: I1128 09:57:56.160438 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:57:56 crc kubenswrapper[4838]: I1128 09:57:56.160457 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:57:56 crc kubenswrapper[4838]: I1128 09:57:56.160485 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:57:56 crc kubenswrapper[4838]: I1128 09:57:56.160503 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:57:56Z","lastTransitionTime":"2025-11-28T09:57:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:57:56 crc kubenswrapper[4838]: I1128 09:57:56.263707 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:57:56 crc kubenswrapper[4838]: I1128 09:57:56.263825 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:57:56 crc kubenswrapper[4838]: I1128 09:57:56.263843 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:57:56 crc kubenswrapper[4838]: I1128 09:57:56.263867 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:57:56 crc kubenswrapper[4838]: I1128 09:57:56.263889 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:57:56Z","lastTransitionTime":"2025-11-28T09:57:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:57:56 crc kubenswrapper[4838]: I1128 09:57:56.367140 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:57:56 crc kubenswrapper[4838]: I1128 09:57:56.367192 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:57:56 crc kubenswrapper[4838]: I1128 09:57:56.367208 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:57:56 crc kubenswrapper[4838]: I1128 09:57:56.367232 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:57:56 crc kubenswrapper[4838]: I1128 09:57:56.367248 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:57:56Z","lastTransitionTime":"2025-11-28T09:57:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:57:56 crc kubenswrapper[4838]: I1128 09:57:56.470230 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:57:56 crc kubenswrapper[4838]: I1128 09:57:56.470300 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:57:56 crc kubenswrapper[4838]: I1128 09:57:56.470320 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:57:56 crc kubenswrapper[4838]: I1128 09:57:56.470347 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:57:56 crc kubenswrapper[4838]: I1128 09:57:56.470365 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:57:56Z","lastTransitionTime":"2025-11-28T09:57:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:57:56 crc kubenswrapper[4838]: I1128 09:57:56.573527 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:57:56 crc kubenswrapper[4838]: I1128 09:57:56.573593 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:57:56 crc kubenswrapper[4838]: I1128 09:57:56.573613 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:57:56 crc kubenswrapper[4838]: I1128 09:57:56.573636 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:57:56 crc kubenswrapper[4838]: I1128 09:57:56.573657 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:57:56Z","lastTransitionTime":"2025-11-28T09:57:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:57:56 crc kubenswrapper[4838]: I1128 09:57:56.679599 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:57:56 crc kubenswrapper[4838]: I1128 09:57:56.679681 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:57:56 crc kubenswrapper[4838]: I1128 09:57:56.679698 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:57:56 crc kubenswrapper[4838]: I1128 09:57:56.679761 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:57:56 crc kubenswrapper[4838]: I1128 09:57:56.679778 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:57:56Z","lastTransitionTime":"2025-11-28T09:57:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:57:56 crc kubenswrapper[4838]: I1128 09:57:56.782404 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:57:56 crc kubenswrapper[4838]: I1128 09:57:56.782469 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:57:56 crc kubenswrapper[4838]: I1128 09:57:56.782485 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:57:56 crc kubenswrapper[4838]: I1128 09:57:56.782511 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:57:56 crc kubenswrapper[4838]: I1128 09:57:56.782532 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:57:56Z","lastTransitionTime":"2025-11-28T09:57:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:57:56 crc kubenswrapper[4838]: I1128 09:57:56.855394 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-tj8hl" event={"ID":"cbb3c60a-bf9f-4a62-9310-30898e42be4f","Type":"ContainerStarted","Data":"e6fe3f1cbc53f02e2556c5fb44cffcf13330c3240a0ff10a8471080466416bd1"} Nov 28 09:57:56 crc kubenswrapper[4838]: I1128 09:57:56.882182 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:57:56 crc kubenswrapper[4838]: I1128 09:57:56.886661 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:57:56 crc kubenswrapper[4838]: I1128 09:57:56.886752 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:57:56 crc kubenswrapper[4838]: I1128 09:57:56.886771 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:57:56 crc kubenswrapper[4838]: I1128 09:57:56.886796 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:57:56 crc kubenswrapper[4838]: I1128 09:57:56.886813 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:57:56Z","lastTransitionTime":"2025-11-28T09:57:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:57:56 crc kubenswrapper[4838]: I1128 09:57:56.902413 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-58mh7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f556bd7-3b15-4d7d-b8e2-4815bb5c9c7d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-58mh7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:57:56 crc kubenswrapper[4838]: I1128 09:57:56.915232 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-sft2b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"200cdb91-cc86-40be-a5b6-30f7b9beba6d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lpt6k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-sft2b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:57:56 crc kubenswrapper[4838]: I1128 09:57:56.932505 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:57:56 crc kubenswrapper[4838]: I1128 09:57:56.947525 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:57:56 crc kubenswrapper[4838]: I1128 09:57:56.972761 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4rv9b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"051f7e1c-2d47-4be9-bbd5-14feec16eb16\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-szqtp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4rv9b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:57:56 crc kubenswrapper[4838]: I1128 09:57:56.988410 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:57:56 crc kubenswrapper[4838]: I1128 09:57:56.990133 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:57:56 crc kubenswrapper[4838]: I1128 09:57:56.990193 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:57:56 crc kubenswrapper[4838]: I1128 09:57:56.990213 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:57:56 crc kubenswrapper[4838]: I1128 09:57:56.990239 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:57:56 crc kubenswrapper[4838]: I1128 09:57:56.990256 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:57:56Z","lastTransitionTime":"2025-11-28T09:57:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:57:57 crc kubenswrapper[4838]: I1128 09:57:57.014916 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gmhsj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"41b01f7d-5c75-49de-86f7-87e04bf71194\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-gmhsj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:57:57 crc kubenswrapper[4838]: I1128 09:57:57.026970 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:57:57 crc kubenswrapper[4838]: I1128 09:57:57.027054 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:57:57 crc kubenswrapper[4838]: I1128 09:57:57.027081 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:57:57 crc kubenswrapper[4838]: I1128 09:57:57.027107 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:57:57 crc kubenswrapper[4838]: I1128 09:57:57.027124 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:57:57Z","lastTransitionTime":"2025-11-28T09:57:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:57:57 crc kubenswrapper[4838]: I1128 09:57:57.028425 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5c3daa53-8c4e-4e30-aeba-146602dd45cd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9dmrk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9dmrk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-5dxdd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:57:57 crc kubenswrapper[4838]: E1128 09:57:57.042529 4838 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:57:57Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:57Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:57:57Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:57Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:57:57Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:57Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:57:57Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:57Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2d884793-8973-45d8-9335-b721f6accbac\\\",\\\"systemUUID\\\":\\\"e14391b4-beaf-4b9f-9de4-e3bbde3f3327\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:57:57 crc kubenswrapper[4838]: I1128 09:57:57.044063 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42be35de-5c72-4cc2-a5e4-fb7872425cdf\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-policy-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-policy-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5a39765f9493a3a9454db77d07855624ff5645e9dbd898e6dcb880d7a01a8c42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://426741a23e7b1b9fae6769b581c0d44694f957b0da985476923801395fad082f\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T09:57:46Z\\\",\\\"message\\\":\\\"+ timeout 3m /bin/bash -exuo pipefail -c 'while [ -n \\\\\\\"$(ss -Htanop \\\\\\\\( sport = 10357 \\\\\\\\))\\\\\\\" ]; do sleep 1; done'\\\\n++ ss -Htanop '(' sport = 10357 ')'\\\\n+ '[' -n '' ']'\\\\n+ exec cluster-policy-controller start --config=/etc/kubernetes/static-pod-resources/configmaps/cluster-policy-controller-config/config.yaml --kubeconfig=/etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig --namespace=openshift-kube-controller-manager -v=2\\\\nI1128 09:57:12.554377 1 leaderelection.go:121] The leader election gives 4 retries and allows for 30s of clock skew. The kube-apiserver downtime tolerance is 78s. Worst non-graceful lease acquisition is 2m43s. Worst graceful lease acquisition is {26s}.\\\\nI1128 09:57:12.555793 1 observer_polling.go:159] Starting file observer\\\\nI1128 09:57:12.567187 1 builder.go:298] cluster-policy-controller version 4.18.0-202501230001.p0.g5fd8525.assembly.stream.el9-5fd8525-5fd852525909ce6eab52972ba9ce8fcf56528eb9\\\\nI1128 09:57:12.568976 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.crt::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.key\\\\\\\"\\\\nI1128 09:57:45.000857 1 cmd.go:138] Received SIGTERM or SIGINT signal, shutting down controller.\\\\nF1128 09:57:46.203931 1 cmd.go:179] failed checking apiserver connectivity: client rate limiter Wait returned an error: context deadline exceeded\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:10Z\\\"}},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef54c8995d6bb8e077c6d1c3d796b6f2ce88370b6cadc4f040f590760103320b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fab4ce68cf732b3e6b32f68e84805013d646a9cbd6d5c55ea3d2f41a7f9db83d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2da93de71b5896e3c9ba192df2896b946b1376fefe2a87cf21adb87ea665be04\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:08Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:57:57 crc kubenswrapper[4838]: I1128 09:57:57.047851 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:57:57 crc kubenswrapper[4838]: I1128 09:57:57.047920 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:57:57 crc kubenswrapper[4838]: I1128 09:57:57.047945 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:57:57 crc kubenswrapper[4838]: I1128 09:57:57.047972 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:57:57 crc kubenswrapper[4838]: I1128 09:57:57.047989 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:57:57Z","lastTransitionTime":"2025-11-28T09:57:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:57:57 crc kubenswrapper[4838]: I1128 09:57:57.059448 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:57:57 crc kubenswrapper[4838]: E1128 09:57:57.065041 4838 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:57:57Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:57Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:57:57Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:57Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:57:57Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:57Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:57:57Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:57Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2d884793-8973-45d8-9335-b721f6accbac\\\",\\\"systemUUID\\\":\\\"e14391b4-beaf-4b9f-9de4-e3bbde3f3327\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:57:57 crc kubenswrapper[4838]: I1128 09:57:57.070177 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:57:57 crc kubenswrapper[4838]: I1128 09:57:57.070241 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:57:57 crc kubenswrapper[4838]: I1128 09:57:57.070260 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:57:57 crc kubenswrapper[4838]: I1128 09:57:57.070285 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:57:57 crc kubenswrapper[4838]: I1128 09:57:57.070301 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:57:57Z","lastTransitionTime":"2025-11-28T09:57:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:57:57 crc kubenswrapper[4838]: I1128 09:57:57.074337 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:57:57 crc kubenswrapper[4838]: I1128 09:57:57.085929 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-tj8hl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cbb3c60a-bf9f-4a62-9310-30898e42be4f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e6fe3f1cbc53f02e2556c5fb44cffcf13330c3240a0ff10a8471080466416bd1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c2jkq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:54Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-tj8hl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:57:57 crc kubenswrapper[4838]: E1128 09:57:57.086098 4838 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:57:57Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:57Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:57:57Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:57Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:57:57Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:57Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:57:57Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:57Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2d884793-8973-45d8-9335-b721f6accbac\\\",\\\"systemUUID\\\":\\\"e14391b4-beaf-4b9f-9de4-e3bbde3f3327\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:57:57 crc kubenswrapper[4838]: I1128 09:57:57.091149 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:57:57 crc kubenswrapper[4838]: I1128 09:57:57.091183 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:57:57 crc kubenswrapper[4838]: I1128 09:57:57.091193 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:57:57 crc kubenswrapper[4838]: I1128 09:57:57.091212 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:57:57 crc kubenswrapper[4838]: I1128 09:57:57.091225 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:57:57Z","lastTransitionTime":"2025-11-28T09:57:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:57:57 crc kubenswrapper[4838]: E1128 09:57:57.109564 4838 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:57:57Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:57Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:57:57Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:57Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:57:57Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:57Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:57:57Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:57Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2d884793-8973-45d8-9335-b721f6accbac\\\",\\\"systemUUID\\\":\\\"e14391b4-beaf-4b9f-9de4-e3bbde3f3327\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:57:57 crc kubenswrapper[4838]: I1128 09:57:57.115222 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:57:57 crc kubenswrapper[4838]: I1128 09:57:57.115255 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:57:57 crc kubenswrapper[4838]: I1128 09:57:57.115269 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:57:57 crc kubenswrapper[4838]: I1128 09:57:57.115288 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:57:57 crc kubenswrapper[4838]: I1128 09:57:57.115301 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:57:57Z","lastTransitionTime":"2025-11-28T09:57:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:57:57 crc kubenswrapper[4838]: E1128 09:57:57.132411 4838 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:57:57Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:57Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:57:57Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:57Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:57:57Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:57Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:57:57Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:57Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2d884793-8973-45d8-9335-b721f6accbac\\\",\\\"systemUUID\\\":\\\"e14391b4-beaf-4b9f-9de4-e3bbde3f3327\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:57:57 crc kubenswrapper[4838]: E1128 09:57:57.132664 4838 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 28 09:57:57 crc kubenswrapper[4838]: I1128 09:57:57.134903 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:57:57 crc kubenswrapper[4838]: I1128 09:57:57.134939 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:57:57 crc kubenswrapper[4838]: I1128 09:57:57.134954 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:57:57 crc kubenswrapper[4838]: I1128 09:57:57.134976 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:57:57 crc kubenswrapper[4838]: I1128 09:57:57.134994 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:57:57Z","lastTransitionTime":"2025-11-28T09:57:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:57:57 crc kubenswrapper[4838]: I1128 09:57:57.237682 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:57:57 crc kubenswrapper[4838]: I1128 09:57:57.237735 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:57:57 crc kubenswrapper[4838]: I1128 09:57:57.237744 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:57:57 crc kubenswrapper[4838]: I1128 09:57:57.237757 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:57:57 crc kubenswrapper[4838]: I1128 09:57:57.237766 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:57:57Z","lastTransitionTime":"2025-11-28T09:57:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:57:57 crc kubenswrapper[4838]: I1128 09:57:57.254775 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 09:57:57 crc kubenswrapper[4838]: I1128 09:57:57.255012 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 09:57:57 crc kubenswrapper[4838]: I1128 09:57:57.255104 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 09:57:57 crc kubenswrapper[4838]: E1128 09:57:57.255179 4838 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 28 09:57:57 crc kubenswrapper[4838]: E1128 09:57:57.255178 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 09:58:01.255138277 +0000 UTC m=+52.954112487 (durationBeforeRetry 4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 09:57:57 crc kubenswrapper[4838]: I1128 09:57:57.255255 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 09:57:57 crc kubenswrapper[4838]: E1128 09:57:57.255309 4838 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 28 09:57:57 crc kubenswrapper[4838]: E1128 09:57:57.255345 4838 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 28 09:57:57 crc kubenswrapper[4838]: E1128 09:57:57.255368 4838 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 09:57:57 crc kubenswrapper[4838]: E1128 09:57:57.255451 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-28 09:58:01.255422204 +0000 UTC m=+52.954396444 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 09:57:57 crc kubenswrapper[4838]: E1128 09:57:57.255463 4838 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 28 09:57:57 crc kubenswrapper[4838]: E1128 09:57:57.255483 4838 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 28 09:57:57 crc kubenswrapper[4838]: E1128 09:57:57.255489 4838 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 28 09:57:57 crc kubenswrapper[4838]: E1128 09:57:57.255496 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-28 09:58:01.255478886 +0000 UTC m=+52.954453206 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 28 09:57:57 crc kubenswrapper[4838]: E1128 09:57:57.255510 4838 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 09:57:57 crc kubenswrapper[4838]: E1128 09:57:57.255528 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-28 09:58:01.255511317 +0000 UTC m=+52.954485607 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 28 09:57:57 crc kubenswrapper[4838]: E1128 09:57:57.255567 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-28 09:58:01.255552558 +0000 UTC m=+52.954526768 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 09:57:57 crc kubenswrapper[4838]: I1128 09:57:57.255319 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 09:57:57 crc kubenswrapper[4838]: I1128 09:57:57.340187 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:57:57 crc kubenswrapper[4838]: I1128 09:57:57.340220 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:57:57 crc kubenswrapper[4838]: I1128 09:57:57.340228 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:57:57 crc kubenswrapper[4838]: I1128 09:57:57.340241 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:57:57 crc kubenswrapper[4838]: I1128 09:57:57.340252 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:57:57Z","lastTransitionTime":"2025-11-28T09:57:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:57:57 crc kubenswrapper[4838]: I1128 09:57:57.444076 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:57:57 crc kubenswrapper[4838]: I1128 09:57:57.444177 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:57:57 crc kubenswrapper[4838]: I1128 09:57:57.444197 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:57:57 crc kubenswrapper[4838]: I1128 09:57:57.444222 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:57:57 crc kubenswrapper[4838]: I1128 09:57:57.444241 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:57:57Z","lastTransitionTime":"2025-11-28T09:57:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:57:57 crc kubenswrapper[4838]: I1128 09:57:57.548045 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:57:57 crc kubenswrapper[4838]: I1128 09:57:57.548106 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:57:57 crc kubenswrapper[4838]: I1128 09:57:57.548119 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:57:57 crc kubenswrapper[4838]: I1128 09:57:57.548139 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:57:57 crc kubenswrapper[4838]: I1128 09:57:57.548154 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:57:57Z","lastTransitionTime":"2025-11-28T09:57:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:57:57 crc kubenswrapper[4838]: I1128 09:57:57.561986 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 09:57:57 crc kubenswrapper[4838]: I1128 09:57:57.562134 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 09:57:57 crc kubenswrapper[4838]: E1128 09:57:57.562183 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 09:57:57 crc kubenswrapper[4838]: I1128 09:57:57.562281 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 09:57:57 crc kubenswrapper[4838]: E1128 09:57:57.562392 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 09:57:57 crc kubenswrapper[4838]: E1128 09:57:57.562431 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 09:57:57 crc kubenswrapper[4838]: I1128 09:57:57.651984 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:57:57 crc kubenswrapper[4838]: I1128 09:57:57.652052 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:57:57 crc kubenswrapper[4838]: I1128 09:57:57.652071 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:57:57 crc kubenswrapper[4838]: I1128 09:57:57.652095 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:57:57 crc kubenswrapper[4838]: I1128 09:57:57.652113 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:57:57Z","lastTransitionTime":"2025-11-28T09:57:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:57:57 crc kubenswrapper[4838]: I1128 09:57:57.708929 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 28 09:57:57 crc kubenswrapper[4838]: I1128 09:57:57.719479 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler/openshift-kube-scheduler-crc"] Nov 28 09:57:57 crc kubenswrapper[4838]: I1128 09:57:57.723773 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:57:57 crc kubenswrapper[4838]: I1128 09:57:57.737701 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:57:57 crc kubenswrapper[4838]: I1128 09:57:57.755669 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-58mh7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f556bd7-3b15-4d7d-b8e2-4815bb5c9c7d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-58mh7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:57:57 crc kubenswrapper[4838]: I1128 09:57:57.757331 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:57:57 crc kubenswrapper[4838]: I1128 09:57:57.757408 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:57:57 crc kubenswrapper[4838]: I1128 09:57:57.757434 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:57:57 crc kubenswrapper[4838]: I1128 09:57:57.757469 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:57:57 crc kubenswrapper[4838]: I1128 09:57:57.757493 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:57:57Z","lastTransitionTime":"2025-11-28T09:57:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:57:57 crc kubenswrapper[4838]: I1128 09:57:57.768038 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-sft2b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"200cdb91-cc86-40be-a5b6-30f7b9beba6d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lpt6k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-sft2b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:57:57 crc kubenswrapper[4838]: I1128 09:57:57.782837 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:57:57 crc kubenswrapper[4838]: I1128 09:57:57.798001 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4rv9b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"051f7e1c-2d47-4be9-bbd5-14feec16eb16\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-szqtp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4rv9b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:57:57 crc kubenswrapper[4838]: I1128 09:57:57.811619 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5c3daa53-8c4e-4e30-aeba-146602dd45cd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9dmrk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9dmrk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-5dxdd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:57:57 crc kubenswrapper[4838]: I1128 09:57:57.826877 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42be35de-5c72-4cc2-a5e4-fb7872425cdf\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-policy-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-policy-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5a39765f9493a3a9454db77d07855624ff5645e9dbd898e6dcb880d7a01a8c42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://426741a23e7b1b9fae6769b581c0d44694f957b0da985476923801395fad082f\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T09:57:46Z\\\",\\\"message\\\":\\\"+ timeout 3m /bin/bash -exuo pipefail -c 'while [ -n \\\\\\\"$(ss -Htanop \\\\\\\\( sport = 10357 \\\\\\\\))\\\\\\\" ]; do sleep 1; done'\\\\n++ ss -Htanop '(' sport = 10357 ')'\\\\n+ '[' -n '' ']'\\\\n+ exec cluster-policy-controller start --config=/etc/kubernetes/static-pod-resources/configmaps/cluster-policy-controller-config/config.yaml --kubeconfig=/etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig --namespace=openshift-kube-controller-manager -v=2\\\\nI1128 09:57:12.554377 1 leaderelection.go:121] The leader election gives 4 retries and allows for 30s of clock skew. The kube-apiserver downtime tolerance is 78s. Worst non-graceful lease acquisition is 2m43s. Worst graceful lease acquisition is {26s}.\\\\nI1128 09:57:12.555793 1 observer_polling.go:159] Starting file observer\\\\nI1128 09:57:12.567187 1 builder.go:298] cluster-policy-controller version 4.18.0-202501230001.p0.g5fd8525.assembly.stream.el9-5fd8525-5fd852525909ce6eab52972ba9ce8fcf56528eb9\\\\nI1128 09:57:12.568976 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.crt::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.key\\\\\\\"\\\\nI1128 09:57:45.000857 1 cmd.go:138] Received SIGTERM or SIGINT signal, shutting down controller.\\\\nF1128 09:57:46.203931 1 cmd.go:179] failed checking apiserver connectivity: client rate limiter Wait returned an error: context deadline exceeded\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:10Z\\\"}},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef54c8995d6bb8e077c6d1c3d796b6f2ce88370b6cadc4f040f590760103320b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fab4ce68cf732b3e6b32f68e84805013d646a9cbd6d5c55ea3d2f41a7f9db83d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2da93de71b5896e3c9ba192df2896b946b1376fefe2a87cf21adb87ea665be04\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:08Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:57:57 crc kubenswrapper[4838]: I1128 09:57:57.840885 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:57:57 crc kubenswrapper[4838]: I1128 09:57:57.860793 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:57:57 crc kubenswrapper[4838]: I1128 09:57:57.860834 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:57:57 crc kubenswrapper[4838]: I1128 09:57:57.860851 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:57:57 crc kubenswrapper[4838]: I1128 09:57:57.860873 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:57:57 crc kubenswrapper[4838]: I1128 09:57:57.860891 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:57:57Z","lastTransitionTime":"2025-11-28T09:57:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:57:57 crc kubenswrapper[4838]: I1128 09:57:57.883645 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gmhsj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"41b01f7d-5c75-49de-86f7-87e04bf71194\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-gmhsj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:57:57 crc kubenswrapper[4838]: I1128 09:57:57.898498 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-tj8hl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cbb3c60a-bf9f-4a62-9310-30898e42be4f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e6fe3f1cbc53f02e2556c5fb44cffcf13330c3240a0ff10a8471080466416bd1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c2jkq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:54Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-tj8hl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:57:57 crc kubenswrapper[4838]: I1128 09:57:57.913536 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:57:57 crc kubenswrapper[4838]: I1128 09:57:57.927677 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:57:57 crc kubenswrapper[4838]: I1128 09:57:57.964639 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:57:57 crc kubenswrapper[4838]: I1128 09:57:57.964709 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:57:57 crc kubenswrapper[4838]: I1128 09:57:57.964760 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:57:57 crc kubenswrapper[4838]: I1128 09:57:57.964785 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:57:57 crc kubenswrapper[4838]: I1128 09:57:57.964804 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:57:57Z","lastTransitionTime":"2025-11-28T09:57:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:57:58 crc kubenswrapper[4838]: I1128 09:57:58.067398 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:57:58 crc kubenswrapper[4838]: I1128 09:57:58.067648 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:57:58 crc kubenswrapper[4838]: I1128 09:57:58.067828 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:57:58 crc kubenswrapper[4838]: I1128 09:57:58.067969 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:57:58 crc kubenswrapper[4838]: I1128 09:57:58.068084 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:57:58Z","lastTransitionTime":"2025-11-28T09:57:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:57:58 crc kubenswrapper[4838]: I1128 09:57:58.171557 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:57:58 crc kubenswrapper[4838]: I1128 09:57:58.171623 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:57:58 crc kubenswrapper[4838]: I1128 09:57:58.171645 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:57:58 crc kubenswrapper[4838]: I1128 09:57:58.171672 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:57:58 crc kubenswrapper[4838]: I1128 09:57:58.171692 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:57:58Z","lastTransitionTime":"2025-11-28T09:57:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:57:58 crc kubenswrapper[4838]: I1128 09:57:58.275251 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:57:58 crc kubenswrapper[4838]: I1128 09:57:58.275583 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:57:58 crc kubenswrapper[4838]: I1128 09:57:58.275863 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:57:58 crc kubenswrapper[4838]: I1128 09:57:58.276042 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:57:58 crc kubenswrapper[4838]: I1128 09:57:58.276175 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:57:58Z","lastTransitionTime":"2025-11-28T09:57:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:57:58 crc kubenswrapper[4838]: I1128 09:57:58.379676 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:57:58 crc kubenswrapper[4838]: I1128 09:57:58.379783 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:57:58 crc kubenswrapper[4838]: I1128 09:57:58.379802 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:57:58 crc kubenswrapper[4838]: I1128 09:57:58.379827 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:57:58 crc kubenswrapper[4838]: I1128 09:57:58.379845 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:57:58Z","lastTransitionTime":"2025-11-28T09:57:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:57:58 crc kubenswrapper[4838]: I1128 09:57:58.482607 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:57:58 crc kubenswrapper[4838]: I1128 09:57:58.482688 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:57:58 crc kubenswrapper[4838]: I1128 09:57:58.482705 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:57:58 crc kubenswrapper[4838]: I1128 09:57:58.482773 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:57:58 crc kubenswrapper[4838]: I1128 09:57:58.482810 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:57:58Z","lastTransitionTime":"2025-11-28T09:57:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:57:58 crc kubenswrapper[4838]: I1128 09:57:58.583082 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:57:58 crc kubenswrapper[4838]: I1128 09:57:58.587040 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:57:58 crc kubenswrapper[4838]: I1128 09:57:58.587105 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:57:58 crc kubenswrapper[4838]: I1128 09:57:58.587123 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:57:58 crc kubenswrapper[4838]: I1128 09:57:58.587146 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:57:58 crc kubenswrapper[4838]: I1128 09:57:58.587162 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:57:58Z","lastTransitionTime":"2025-11-28T09:57:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:57:58 crc kubenswrapper[4838]: I1128 09:57:58.603524 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:57:58 crc kubenswrapper[4838]: I1128 09:57:58.637052 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-58mh7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f556bd7-3b15-4d7d-b8e2-4815bb5c9c7d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-58mh7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:57:58 crc kubenswrapper[4838]: I1128 09:57:58.660142 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-sft2b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"200cdb91-cc86-40be-a5b6-30f7b9beba6d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lpt6k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-sft2b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:57:58 crc kubenswrapper[4838]: I1128 09:57:58.676479 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:57:58 crc kubenswrapper[4838]: I1128 09:57:58.689065 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:57:58 crc kubenswrapper[4838]: I1128 09:57:58.689104 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:57:58 crc kubenswrapper[4838]: I1128 09:57:58.689113 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:57:58 crc kubenswrapper[4838]: I1128 09:57:58.689127 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:57:58 crc kubenswrapper[4838]: I1128 09:57:58.689136 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:57:58Z","lastTransitionTime":"2025-11-28T09:57:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:57:58 crc kubenswrapper[4838]: I1128 09:57:58.690973 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4rv9b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"051f7e1c-2d47-4be9-bbd5-14feec16eb16\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-szqtp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4rv9b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:57:58 crc kubenswrapper[4838]: I1128 09:57:58.693635 4838 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 09:57:58 crc kubenswrapper[4838]: I1128 09:57:58.703841 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42be35de-5c72-4cc2-a5e4-fb7872425cdf\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-policy-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-policy-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5a39765f9493a3a9454db77d07855624ff5645e9dbd898e6dcb880d7a01a8c42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://426741a23e7b1b9fae6769b581c0d44694f957b0da985476923801395fad082f\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T09:57:46Z\\\",\\\"message\\\":\\\"+ timeout 3m /bin/bash -exuo pipefail -c 'while [ -n \\\\\\\"$(ss -Htanop \\\\\\\\( sport = 10357 \\\\\\\\))\\\\\\\" ]; do sleep 1; done'\\\\n++ ss -Htanop '(' sport = 10357 ')'\\\\n+ '[' -n '' ']'\\\\n+ exec cluster-policy-controller start --config=/etc/kubernetes/static-pod-resources/configmaps/cluster-policy-controller-config/config.yaml --kubeconfig=/etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig --namespace=openshift-kube-controller-manager -v=2\\\\nI1128 09:57:12.554377 1 leaderelection.go:121] The leader election gives 4 retries and allows for 30s of clock skew. The kube-apiserver downtime tolerance is 78s. Worst non-graceful lease acquisition is 2m43s. Worst graceful lease acquisition is {26s}.\\\\nI1128 09:57:12.555793 1 observer_polling.go:159] Starting file observer\\\\nI1128 09:57:12.567187 1 builder.go:298] cluster-policy-controller version 4.18.0-202501230001.p0.g5fd8525.assembly.stream.el9-5fd8525-5fd852525909ce6eab52972ba9ce8fcf56528eb9\\\\nI1128 09:57:12.568976 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.crt::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.key\\\\\\\"\\\\nI1128 09:57:45.000857 1 cmd.go:138] Received SIGTERM or SIGINT signal, shutting down controller.\\\\nF1128 09:57:46.203931 1 cmd.go:179] failed checking apiserver connectivity: client rate limiter Wait returned an error: context deadline exceeded\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:10Z\\\"}},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef54c8995d6bb8e077c6d1c3d796b6f2ce88370b6cadc4f040f590760103320b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fab4ce68cf732b3e6b32f68e84805013d646a9cbd6d5c55ea3d2f41a7f9db83d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2da93de71b5896e3c9ba192df2896b946b1376fefe2a87cf21adb87ea665be04\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:08Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:57:58 crc kubenswrapper[4838]: I1128 09:57:58.708455 4838 scope.go:117] "RemoveContainer" containerID="2ad3223a9346861cf1b27af8c95207349f10af6f416380747e32c4faf1d3add4" Nov 28 09:57:58 crc kubenswrapper[4838]: I1128 09:57:58.708899 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Nov 28 09:57:58 crc kubenswrapper[4838]: I1128 09:57:58.717347 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:57:58 crc kubenswrapper[4838]: I1128 09:57:58.741370 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gmhsj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"41b01f7d-5c75-49de-86f7-87e04bf71194\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-gmhsj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:57:58 crc kubenswrapper[4838]: I1128 09:57:58.786043 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5c3daa53-8c4e-4e30-aeba-146602dd45cd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9dmrk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9dmrk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-5dxdd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:57:58 crc kubenswrapper[4838]: I1128 09:57:58.791263 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:57:58 crc kubenswrapper[4838]: I1128 09:57:58.791296 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:57:58 crc kubenswrapper[4838]: I1128 09:57:58.791312 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:57:58 crc kubenswrapper[4838]: I1128 09:57:58.791333 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:57:58 crc kubenswrapper[4838]: I1128 09:57:58.791348 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:57:58Z","lastTransitionTime":"2025-11-28T09:57:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:57:58 crc kubenswrapper[4838]: I1128 09:57:58.796043 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"faf44892-fdd2-4b45-8772-20049c555d3b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8f8f5484d008289a9e34ceaffd3cf2582565e7265003b0a6a913fe424760fc65\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://496ac21c6d7e650f191b3bc29ab676bab6ba40727c5ac4d517833ef9a115ae07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://698aacf6e94caf2da7095c89716d63d935ff60d95cb91c9a39dfe9282cbba005\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8352058616fa4ed90ca907e547bad2201d9aa696330f1eb8434c3c3d54d71d89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8352058616fa4ed90ca907e547bad2201d9aa696330f1eb8434c3c3d54d71d89\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:09Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:08Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:57:58 crc kubenswrapper[4838]: I1128 09:57:58.805264 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:57:58 crc kubenswrapper[4838]: I1128 09:57:58.816389 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:57:58 crc kubenswrapper[4838]: I1128 09:57:58.824586 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-tj8hl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cbb3c60a-bf9f-4a62-9310-30898e42be4f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e6fe3f1cbc53f02e2556c5fb44cffcf13330c3240a0ff10a8471080466416bd1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c2jkq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:54Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-tj8hl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:57:58 crc kubenswrapper[4838]: I1128 09:57:58.838459 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:57:58 crc kubenswrapper[4838]: I1128 09:57:58.848929 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4rv9b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"051f7e1c-2d47-4be9-bbd5-14feec16eb16\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-szqtp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4rv9b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:57:58 crc kubenswrapper[4838]: I1128 09:57:58.872192 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gmhsj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"41b01f7d-5c75-49de-86f7-87e04bf71194\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-gmhsj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:57:58 crc kubenswrapper[4838]: I1128 09:57:58.881823 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5c3daa53-8c4e-4e30-aeba-146602dd45cd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9dmrk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9dmrk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-5dxdd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:57:58 crc kubenswrapper[4838]: I1128 09:57:58.893692 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42be35de-5c72-4cc2-a5e4-fb7872425cdf\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-policy-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"message\\\":\\\"containers with unready status: [cluster-policy-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5a39765f9493a3a9454db77d07855624ff5645e9dbd898e6dcb880d7a01a8c42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://426741a23e7b1b9fae6769b581c0d44694f957b0da985476923801395fad082f\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T09:57:46Z\\\",\\\"message\\\":\\\"+ timeout 3m /bin/bash -exuo pipefail -c 'while [ -n \\\\\\\"$(ss -Htanop \\\\\\\\( sport = 10357 \\\\\\\\))\\\\\\\" ]; do sleep 1; done'\\\\n++ ss -Htanop '(' sport = 10357 ')'\\\\n+ '[' -n '' ']'\\\\n+ exec cluster-policy-controller start --config=/etc/kubernetes/static-pod-resources/configmaps/cluster-policy-controller-config/config.yaml --kubeconfig=/etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig --namespace=openshift-kube-controller-manager -v=2\\\\nI1128 09:57:12.554377 1 leaderelection.go:121] The leader election gives 4 retries and allows for 30s of clock skew. The kube-apiserver downtime tolerance is 78s. Worst non-graceful lease acquisition is 2m43s. Worst graceful lease acquisition is {26s}.\\\\nI1128 09:57:12.555793 1 observer_polling.go:159] Starting file observer\\\\nI1128 09:57:12.567187 1 builder.go:298] cluster-policy-controller version 4.18.0-202501230001.p0.g5fd8525.assembly.stream.el9-5fd8525-5fd852525909ce6eab52972ba9ce8fcf56528eb9\\\\nI1128 09:57:12.568976 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.crt::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.key\\\\\\\"\\\\nI1128 09:57:45.000857 1 cmd.go:138] Received SIGTERM or SIGINT signal, shutting down controller.\\\\nF1128 09:57:46.203931 1 cmd.go:179] failed checking apiserver connectivity: client rate limiter Wait returned an error: context deadline exceeded\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:10Z\\\"}},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef54c8995d6bb8e077c6d1c3d796b6f2ce88370b6cadc4f040f590760103320b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fab4ce68cf732b3e6b32f68e84805013d646a9cbd6d5c55ea3d2f41a7f9db83d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2da93de71b5896e3c9ba192df2896b946b1376fefe2a87cf21adb87ea665be04\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:08Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:57:58 crc kubenswrapper[4838]: I1128 09:57:58.895030 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:57:58 crc kubenswrapper[4838]: I1128 09:57:58.895106 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:57:58 crc kubenswrapper[4838]: I1128 09:57:58.895130 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:57:58 crc kubenswrapper[4838]: I1128 09:57:58.895160 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:57:58 crc kubenswrapper[4838]: I1128 09:57:58.895183 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:57:58Z","lastTransitionTime":"2025-11-28T09:57:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:57:58 crc kubenswrapper[4838]: I1128 09:57:58.908959 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1630b1c6-63b5-4481-a711-0485765d37e3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3a04db28528da269759635186b06952f9a3dc4c2b130458354a5bf9ef994db8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://065f0c39a0f1fbdb83a6a758ddd20a4af3ddf96297ce35551b570f5e6c9deb89\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T09:57:35Z\\\",\\\"message\\\":\\\"rpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"192.168.126.11:2379\\\\\\\", ServerName: \\\\\\\"192.168.126.11:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp 192.168.126.11:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:30.193159 13 logging.go:55] [core] [Channel #1 SubChannel #5]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"192.168.126.11:2379\\\\\\\", ServerName: \\\\\\\"192.168.126.11:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp 192.168.126.11:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:30.410371 13 logging.go:55] [core] [Channel #7 SubChannel #8]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"192.168.126.11:2379\\\\\\\", ServerName: \\\\\\\"192.168.126.11:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp 192.168.126.11:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:30.766067 13 logging.go:55] [core] [Channel #7 SubChannel #9]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"localhost:2379\\\\\\\", ServerName: \\\\\\\"localhost:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp [::1]:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:30.792318 13 logging.go:55] [core] [Channel #2 SubChannel #4]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"localhost:2379\\\\\\\", ServerName: \\\\\\\"localhost:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp [::1]:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:34.548830 13 logging.go:55] [core] [Channel #1 SubChannel #6]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"localhost:2379\\\\\\\", ServerName: \\\\\\\"localhost:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: authentication handshake failed: context canceled\\\\\\\"\\\\nE1128 09:57:34.559534 13 run.go:72] \\\\\\\"command failed\\\\\\\" err=\\\\\\\"context deadline exceeded\\\\\\\"\\\\nI1128 09:57:34.572762 1 main.go:235] Termination finished with exit code 1\\\\nI1128 09:57:34.572819 1 main.go:188] Deleting termination lock file \\\\\\\"/var/log/kube-apiserver/.terminating\\\\\\\"\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25d620ea6d7c38547e89d61e7a60f227d28b21f18d563055db47256b266d5b6b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://837cc7d33d601516b4ea56a283f71167b41da7c769070c97ea77f29e97cf1555\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ad3223a9346861cf1b27af8c95207349f10af6f416380747e32c4faf1d3add4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ad3223a9346861cf1b27af8c95207349f10af6f416380747e32c4faf1d3add4\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T09:57:48Z\\\",\\\"message\\\":\\\"le observer\\\\nW1128 09:57:46.202169 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1128 09:57:46.202478 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 09:57:46.203984 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2688063589/tls.crt::/tmp/serving-cert-2688063589/tls.key\\\\\\\"\\\\nI1128 09:57:46.517349 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 09:57:46.756626 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 09:57:46.756681 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 09:57:46.756806 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 09:57:46.756825 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 09:57:46.768610 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1128 09:57:46.768654 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1128 09:57:46.768660 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 09:57:46.768697 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 09:57:46.768707 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 09:57:46.768714 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 09:57:46.768746 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 09:57:46.768752 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1128 09:57:46.772242 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:40Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c690f38f717fbfbd681f21f5dc845b94601530b4fc0860fdbeb1317042c793b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:18Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b4d4af6fc5dd97d5b6104ef79b62fd241db5659dfdfd496a38536453b207a4e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b4d4af6fc5dd97d5b6104ef79b62fd241db5659dfdfd496a38536453b207a4e5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:08Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:57:58 crc kubenswrapper[4838]: I1128 09:57:58.924879 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:57:58 crc kubenswrapper[4838]: I1128 09:57:58.935602 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:57:58 crc kubenswrapper[4838]: I1128 09:57:58.943264 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-tj8hl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cbb3c60a-bf9f-4a62-9310-30898e42be4f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e6fe3f1cbc53f02e2556c5fb44cffcf13330c3240a0ff10a8471080466416bd1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c2jkq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:54Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-tj8hl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:57:58 crc kubenswrapper[4838]: I1128 09:57:58.951924 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"faf44892-fdd2-4b45-8772-20049c555d3b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8f8f5484d008289a9e34ceaffd3cf2582565e7265003b0a6a913fe424760fc65\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://496ac21c6d7e650f191b3bc29ab676bab6ba40727c5ac4d517833ef9a115ae07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://698aacf6e94caf2da7095c89716d63d935ff60d95cb91c9a39dfe9282cbba005\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8352058616fa4ed90ca907e547bad2201d9aa696330f1eb8434c3c3d54d71d89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8352058616fa4ed90ca907e547bad2201d9aa696330f1eb8434c3c3d54d71d89\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:09Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:08Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:57:58 crc kubenswrapper[4838]: I1128 09:57:58.961145 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:57:58 crc kubenswrapper[4838]: I1128 09:57:58.966772 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-sft2b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"200cdb91-cc86-40be-a5b6-30f7b9beba6d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lpt6k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-sft2b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:57:58 crc kubenswrapper[4838]: I1128 09:57:58.976234 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:57:58 crc kubenswrapper[4838]: I1128 09:57:58.985686 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:57:58 crc kubenswrapper[4838]: I1128 09:57:58.997891 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:57:58 crc kubenswrapper[4838]: I1128 09:57:58.997953 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:57:58 crc kubenswrapper[4838]: I1128 09:57:58.997971 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:57:58 crc kubenswrapper[4838]: I1128 09:57:58.997996 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:57:58 crc kubenswrapper[4838]: I1128 09:57:58.998016 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:57:58Z","lastTransitionTime":"2025-11-28T09:57:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:57:58 crc kubenswrapper[4838]: I1128 09:57:58.998405 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-58mh7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f556bd7-3b15-4d7d-b8e2-4815bb5c9c7d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-58mh7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:57:59 crc kubenswrapper[4838]: I1128 09:57:59.101577 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:57:59 crc kubenswrapper[4838]: I1128 09:57:59.101635 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:57:59 crc kubenswrapper[4838]: I1128 09:57:59.101653 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:57:59 crc kubenswrapper[4838]: I1128 09:57:59.101677 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:57:59 crc kubenswrapper[4838]: I1128 09:57:59.101694 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:57:59Z","lastTransitionTime":"2025-11-28T09:57:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:57:59 crc kubenswrapper[4838]: I1128 09:57:59.204834 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:57:59 crc kubenswrapper[4838]: I1128 09:57:59.205078 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:57:59 crc kubenswrapper[4838]: I1128 09:57:59.205309 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:57:59 crc kubenswrapper[4838]: I1128 09:57:59.205466 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:57:59 crc kubenswrapper[4838]: I1128 09:57:59.205585 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:57:59Z","lastTransitionTime":"2025-11-28T09:57:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:57:59 crc kubenswrapper[4838]: I1128 09:57:59.286173 4838 reflector.go:368] Caches populated for *v1.CSIDriver from k8s.io/client-go/informers/factory.go:160 Nov 28 09:57:59 crc kubenswrapper[4838]: I1128 09:57:59.308979 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:57:59 crc kubenswrapper[4838]: I1128 09:57:59.309123 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:57:59 crc kubenswrapper[4838]: I1128 09:57:59.309150 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:57:59 crc kubenswrapper[4838]: I1128 09:57:59.309181 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:57:59 crc kubenswrapper[4838]: I1128 09:57:59.309204 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:57:59Z","lastTransitionTime":"2025-11-28T09:57:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:57:59 crc kubenswrapper[4838]: I1128 09:57:59.339042 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 09:57:59 crc kubenswrapper[4838]: I1128 09:57:59.366089 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gmhsj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"41b01f7d-5c75-49de-86f7-87e04bf71194\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-gmhsj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:57:59 crc kubenswrapper[4838]: I1128 09:57:59.379541 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5c3daa53-8c4e-4e30-aeba-146602dd45cd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9dmrk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9dmrk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-5dxdd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:57:59 crc kubenswrapper[4838]: I1128 09:57:59.395361 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42be35de-5c72-4cc2-a5e4-fb7872425cdf\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5a39765f9493a3a9454db77d07855624ff5645e9dbd898e6dcb880d7a01a8c42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://426741a23e7b1b9fae6769b581c0d44694f957b0da985476923801395fad082f\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T09:57:46Z\\\",\\\"message\\\":\\\"+ timeout 3m /bin/bash -exuo pipefail -c 'while [ -n \\\\\\\"$(ss -Htanop \\\\\\\\( sport = 10357 \\\\\\\\))\\\\\\\" ]; do sleep 1; done'\\\\n++ ss -Htanop '(' sport = 10357 ')'\\\\n+ '[' -n '' ']'\\\\n+ exec cluster-policy-controller start --config=/etc/kubernetes/static-pod-resources/configmaps/cluster-policy-controller-config/config.yaml --kubeconfig=/etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig --namespace=openshift-kube-controller-manager -v=2\\\\nI1128 09:57:12.554377 1 leaderelection.go:121] The leader election gives 4 retries and allows for 30s of clock skew. The kube-apiserver downtime tolerance is 78s. Worst non-graceful lease acquisition is 2m43s. Worst graceful lease acquisition is {26s}.\\\\nI1128 09:57:12.555793 1 observer_polling.go:159] Starting file observer\\\\nI1128 09:57:12.567187 1 builder.go:298] cluster-policy-controller version 4.18.0-202501230001.p0.g5fd8525.assembly.stream.el9-5fd8525-5fd852525909ce6eab52972ba9ce8fcf56528eb9\\\\nI1128 09:57:12.568976 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.crt::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.key\\\\\\\"\\\\nI1128 09:57:45.000857 1 cmd.go:138] Received SIGTERM or SIGINT signal, shutting down controller.\\\\nF1128 09:57:46.203931 1 cmd.go:179] failed checking apiserver connectivity: client rate limiter Wait returned an error: context deadline exceeded\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:10Z\\\"}},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef54c8995d6bb8e077c6d1c3d796b6f2ce88370b6cadc4f040f590760103320b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fab4ce68cf732b3e6b32f68e84805013d646a9cbd6d5c55ea3d2f41a7f9db83d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2da93de71b5896e3c9ba192df2896b946b1376fefe2a87cf21adb87ea665be04\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:08Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:57:59 crc kubenswrapper[4838]: I1128 09:57:59.411365 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:57:59 crc kubenswrapper[4838]: I1128 09:57:59.411413 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:57:59 crc kubenswrapper[4838]: I1128 09:57:59.411427 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:57:59 crc kubenswrapper[4838]: I1128 09:57:59.411449 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:57:59 crc kubenswrapper[4838]: I1128 09:57:59.411463 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:57:59Z","lastTransitionTime":"2025-11-28T09:57:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:57:59 crc kubenswrapper[4838]: I1128 09:57:59.413317 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1630b1c6-63b5-4481-a711-0485765d37e3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3a04db28528da269759635186b06952f9a3dc4c2b130458354a5bf9ef994db8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://065f0c39a0f1fbdb83a6a758ddd20a4af3ddf96297ce35551b570f5e6c9deb89\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T09:57:35Z\\\",\\\"message\\\":\\\"rpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"192.168.126.11:2379\\\\\\\", ServerName: \\\\\\\"192.168.126.11:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp 192.168.126.11:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:30.193159 13 logging.go:55] [core] [Channel #1 SubChannel #5]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"192.168.126.11:2379\\\\\\\", ServerName: \\\\\\\"192.168.126.11:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp 192.168.126.11:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:30.410371 13 logging.go:55] [core] [Channel #7 SubChannel #8]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"192.168.126.11:2379\\\\\\\", ServerName: \\\\\\\"192.168.126.11:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp 192.168.126.11:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:30.766067 13 logging.go:55] [core] [Channel #7 SubChannel #9]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"localhost:2379\\\\\\\", ServerName: \\\\\\\"localhost:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp [::1]:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:30.792318 13 logging.go:55] [core] [Channel #2 SubChannel #4]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"localhost:2379\\\\\\\", ServerName: \\\\\\\"localhost:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp [::1]:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:34.548830 13 logging.go:55] [core] [Channel #1 SubChannel #6]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"localhost:2379\\\\\\\", ServerName: \\\\\\\"localhost:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: authentication handshake failed: context canceled\\\\\\\"\\\\nE1128 09:57:34.559534 13 run.go:72] \\\\\\\"command failed\\\\\\\" err=\\\\\\\"context deadline exceeded\\\\\\\"\\\\nI1128 09:57:34.572762 1 main.go:235] Termination finished with exit code 1\\\\nI1128 09:57:34.572819 1 main.go:188] Deleting termination lock file \\\\\\\"/var/log/kube-apiserver/.terminating\\\\\\\"\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25d620ea6d7c38547e89d61e7a60f227d28b21f18d563055db47256b266d5b6b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://837cc7d33d601516b4ea56a283f71167b41da7c769070c97ea77f29e97cf1555\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ad3223a9346861cf1b27af8c95207349f10af6f416380747e32c4faf1d3add4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ad3223a9346861cf1b27af8c95207349f10af6f416380747e32c4faf1d3add4\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T09:57:48Z\\\",\\\"message\\\":\\\"le observer\\\\nW1128 09:57:46.202169 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1128 09:57:46.202478 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 09:57:46.203984 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2688063589/tls.crt::/tmp/serving-cert-2688063589/tls.key\\\\\\\"\\\\nI1128 09:57:46.517349 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 09:57:46.756626 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 09:57:46.756681 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 09:57:46.756806 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 09:57:46.756825 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 09:57:46.768610 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1128 09:57:46.768654 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1128 09:57:46.768660 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 09:57:46.768697 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 09:57:46.768707 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 09:57:46.768714 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 09:57:46.768746 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 09:57:46.768752 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1128 09:57:46.772242 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:40Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c690f38f717fbfbd681f21f5dc845b94601530b4fc0860fdbeb1317042c793b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:18Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b4d4af6fc5dd97d5b6104ef79b62fd241db5659dfdfd496a38536453b207a4e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b4d4af6fc5dd97d5b6104ef79b62fd241db5659dfdfd496a38536453b207a4e5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:08Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:57:59 crc kubenswrapper[4838]: I1128 09:57:59.425776 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:57:59 crc kubenswrapper[4838]: I1128 09:57:59.441373 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:57:59 crc kubenswrapper[4838]: I1128 09:57:59.452439 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-tj8hl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cbb3c60a-bf9f-4a62-9310-30898e42be4f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e6fe3f1cbc53f02e2556c5fb44cffcf13330c3240a0ff10a8471080466416bd1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c2jkq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:54Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-tj8hl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:57:59 crc kubenswrapper[4838]: I1128 09:57:59.467820 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"faf44892-fdd2-4b45-8772-20049c555d3b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8f8f5484d008289a9e34ceaffd3cf2582565e7265003b0a6a913fe424760fc65\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://496ac21c6d7e650f191b3bc29ab676bab6ba40727c5ac4d517833ef9a115ae07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://698aacf6e94caf2da7095c89716d63d935ff60d95cb91c9a39dfe9282cbba005\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8352058616fa4ed90ca907e547bad2201d9aa696330f1eb8434c3c3d54d71d89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8352058616fa4ed90ca907e547bad2201d9aa696330f1eb8434c3c3d54d71d89\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:09Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:08Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:57:59 crc kubenswrapper[4838]: I1128 09:57:59.484172 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:57:59 crc kubenswrapper[4838]: I1128 09:57:59.495551 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-sft2b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"200cdb91-cc86-40be-a5b6-30f7b9beba6d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lpt6k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-sft2b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:57:59 crc kubenswrapper[4838]: I1128 09:57:59.511333 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:57:59 crc kubenswrapper[4838]: I1128 09:57:59.515616 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:57:59 crc kubenswrapper[4838]: I1128 09:57:59.515687 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:57:59 crc kubenswrapper[4838]: I1128 09:57:59.515705 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:57:59 crc kubenswrapper[4838]: I1128 09:57:59.515766 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:57:59 crc kubenswrapper[4838]: I1128 09:57:59.515785 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:57:59Z","lastTransitionTime":"2025-11-28T09:57:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:57:59 crc kubenswrapper[4838]: I1128 09:57:59.527588 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:57:59 crc kubenswrapper[4838]: I1128 09:57:59.546624 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-58mh7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f556bd7-3b15-4d7d-b8e2-4815bb5c9c7d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-58mh7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:57:59 crc kubenswrapper[4838]: I1128 09:57:59.562631 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 09:57:59 crc kubenswrapper[4838]: E1128 09:57:59.562925 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 09:57:59 crc kubenswrapper[4838]: I1128 09:57:59.563135 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 09:57:59 crc kubenswrapper[4838]: E1128 09:57:59.563395 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 09:57:59 crc kubenswrapper[4838]: I1128 09:57:59.563510 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 09:57:59 crc kubenswrapper[4838]: E1128 09:57:59.563600 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 09:57:59 crc kubenswrapper[4838]: I1128 09:57:59.565560 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:57:59 crc kubenswrapper[4838]: I1128 09:57:59.590946 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4rv9b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"051f7e1c-2d47-4be9-bbd5-14feec16eb16\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-szqtp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4rv9b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:57:59 crc kubenswrapper[4838]: I1128 09:57:59.619106 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:57:59 crc kubenswrapper[4838]: I1128 09:57:59.619156 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:57:59 crc kubenswrapper[4838]: I1128 09:57:59.619172 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:57:59 crc kubenswrapper[4838]: I1128 09:57:59.619195 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:57:59 crc kubenswrapper[4838]: I1128 09:57:59.619212 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:57:59Z","lastTransitionTime":"2025-11-28T09:57:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:57:59 crc kubenswrapper[4838]: I1128 09:57:59.721794 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:57:59 crc kubenswrapper[4838]: I1128 09:57:59.721843 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:57:59 crc kubenswrapper[4838]: I1128 09:57:59.721855 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:57:59 crc kubenswrapper[4838]: I1128 09:57:59.721877 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:57:59 crc kubenswrapper[4838]: I1128 09:57:59.721891 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:57:59Z","lastTransitionTime":"2025-11-28T09:57:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:57:59 crc kubenswrapper[4838]: I1128 09:57:59.825006 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:57:59 crc kubenswrapper[4838]: I1128 09:57:59.825051 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:57:59 crc kubenswrapper[4838]: I1128 09:57:59.825059 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:57:59 crc kubenswrapper[4838]: I1128 09:57:59.825074 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:57:59 crc kubenswrapper[4838]: I1128 09:57:59.825084 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:57:59Z","lastTransitionTime":"2025-11-28T09:57:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:57:59 crc kubenswrapper[4838]: I1128 09:57:59.871931 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/1.log" Nov 28 09:57:59 crc kubenswrapper[4838]: I1128 09:57:59.877809 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver/0.log" Nov 28 09:57:59 crc kubenswrapper[4838]: I1128 09:57:59.878426 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"3a821e5b105f62cf7f3b12714bfb0f58867a808d1c777f2fb711895c345d8ee9"} Nov 28 09:57:59 crc kubenswrapper[4838]: I1128 09:57:59.878997 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 09:57:59 crc kubenswrapper[4838]: I1128 09:57:59.898481 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42be35de-5c72-4cc2-a5e4-fb7872425cdf\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5a39765f9493a3a9454db77d07855624ff5645e9dbd898e6dcb880d7a01a8c42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://426741a23e7b1b9fae6769b581c0d44694f957b0da985476923801395fad082f\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T09:57:46Z\\\",\\\"message\\\":\\\"+ timeout 3m /bin/bash -exuo pipefail -c 'while [ -n \\\\\\\"$(ss -Htanop \\\\\\\\( sport = 10357 \\\\\\\\))\\\\\\\" ]; do sleep 1; done'\\\\n++ ss -Htanop '(' sport = 10357 ')'\\\\n+ '[' -n '' ']'\\\\n+ exec cluster-policy-controller start --config=/etc/kubernetes/static-pod-resources/configmaps/cluster-policy-controller-config/config.yaml --kubeconfig=/etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig --namespace=openshift-kube-controller-manager -v=2\\\\nI1128 09:57:12.554377 1 leaderelection.go:121] The leader election gives 4 retries and allows for 30s of clock skew. The kube-apiserver downtime tolerance is 78s. Worst non-graceful lease acquisition is 2m43s. Worst graceful lease acquisition is {26s}.\\\\nI1128 09:57:12.555793 1 observer_polling.go:159] Starting file observer\\\\nI1128 09:57:12.567187 1 builder.go:298] cluster-policy-controller version 4.18.0-202501230001.p0.g5fd8525.assembly.stream.el9-5fd8525-5fd852525909ce6eab52972ba9ce8fcf56528eb9\\\\nI1128 09:57:12.568976 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.crt::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.key\\\\\\\"\\\\nI1128 09:57:45.000857 1 cmd.go:138] Received SIGTERM or SIGINT signal, shutting down controller.\\\\nF1128 09:57:46.203931 1 cmd.go:179] failed checking apiserver connectivity: client rate limiter Wait returned an error: context deadline exceeded\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:10Z\\\"}},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef54c8995d6bb8e077c6d1c3d796b6f2ce88370b6cadc4f040f590760103320b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fab4ce68cf732b3e6b32f68e84805013d646a9cbd6d5c55ea3d2f41a7f9db83d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2da93de71b5896e3c9ba192df2896b946b1376fefe2a87cf21adb87ea665be04\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:08Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:57:59 crc kubenswrapper[4838]: I1128 09:57:59.916779 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1630b1c6-63b5-4481-a711-0485765d37e3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3a04db28528da269759635186b06952f9a3dc4c2b130458354a5bf9ef994db8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://065f0c39a0f1fbdb83a6a758ddd20a4af3ddf96297ce35551b570f5e6c9deb89\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T09:57:35Z\\\",\\\"message\\\":\\\"rpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"192.168.126.11:2379\\\\\\\", ServerName: \\\\\\\"192.168.126.11:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp 192.168.126.11:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:30.193159 13 logging.go:55] [core] [Channel #1 SubChannel #5]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"192.168.126.11:2379\\\\\\\", ServerName: \\\\\\\"192.168.126.11:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp 192.168.126.11:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:30.410371 13 logging.go:55] [core] [Channel #7 SubChannel #8]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"192.168.126.11:2379\\\\\\\", ServerName: \\\\\\\"192.168.126.11:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp 192.168.126.11:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:30.766067 13 logging.go:55] [core] [Channel #7 SubChannel #9]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"localhost:2379\\\\\\\", ServerName: \\\\\\\"localhost:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp [::1]:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:30.792318 13 logging.go:55] [core] [Channel #2 SubChannel #4]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"localhost:2379\\\\\\\", ServerName: \\\\\\\"localhost:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp [::1]:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:34.548830 13 logging.go:55] [core] [Channel #1 SubChannel #6]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"localhost:2379\\\\\\\", ServerName: \\\\\\\"localhost:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: authentication handshake failed: context canceled\\\\\\\"\\\\nE1128 09:57:34.559534 13 run.go:72] \\\\\\\"command failed\\\\\\\" err=\\\\\\\"context deadline exceeded\\\\\\\"\\\\nI1128 09:57:34.572762 1 main.go:235] Termination finished with exit code 1\\\\nI1128 09:57:34.572819 1 main.go:188] Deleting termination lock file \\\\\\\"/var/log/kube-apiserver/.terminating\\\\\\\"\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25d620ea6d7c38547e89d61e7a60f227d28b21f18d563055db47256b266d5b6b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://837cc7d33d601516b4ea56a283f71167b41da7c769070c97ea77f29e97cf1555\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a821e5b105f62cf7f3b12714bfb0f58867a808d1c777f2fb711895c345d8ee9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ad3223a9346861cf1b27af8c95207349f10af6f416380747e32c4faf1d3add4\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T09:57:48Z\\\",\\\"message\\\":\\\"le observer\\\\nW1128 09:57:46.202169 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1128 09:57:46.202478 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 09:57:46.203984 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2688063589/tls.crt::/tmp/serving-cert-2688063589/tls.key\\\\\\\"\\\\nI1128 09:57:46.517349 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 09:57:46.756626 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 09:57:46.756681 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 09:57:46.756806 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 09:57:46.756825 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 09:57:46.768610 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1128 09:57:46.768654 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1128 09:57:46.768660 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 09:57:46.768697 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 09:57:46.768707 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 09:57:46.768714 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 09:57:46.768746 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 09:57:46.768752 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1128 09:57:46.772242 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:40Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c690f38f717fbfbd681f21f5dc845b94601530b4fc0860fdbeb1317042c793b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:18Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b4d4af6fc5dd97d5b6104ef79b62fd241db5659dfdfd496a38536453b207a4e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b4d4af6fc5dd97d5b6104ef79b62fd241db5659dfdfd496a38536453b207a4e5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:08Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:57:59 crc kubenswrapper[4838]: I1128 09:57:59.928761 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:57:59 crc kubenswrapper[4838]: I1128 09:57:59.928823 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:57:59 crc kubenswrapper[4838]: I1128 09:57:59.928845 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:57:59 crc kubenswrapper[4838]: I1128 09:57:59.928874 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:57:59 crc kubenswrapper[4838]: I1128 09:57:59.928898 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:57:59Z","lastTransitionTime":"2025-11-28T09:57:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:57:59 crc kubenswrapper[4838]: I1128 09:57:59.930662 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:57:59 crc kubenswrapper[4838]: I1128 09:57:59.956466 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gmhsj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"41b01f7d-5c75-49de-86f7-87e04bf71194\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-gmhsj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:57:59 crc kubenswrapper[4838]: I1128 09:57:59.969366 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5c3daa53-8c4e-4e30-aeba-146602dd45cd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9dmrk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9dmrk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-5dxdd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:57:59 crc kubenswrapper[4838]: I1128 09:57:59.981804 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"faf44892-fdd2-4b45-8772-20049c555d3b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8f8f5484d008289a9e34ceaffd3cf2582565e7265003b0a6a913fe424760fc65\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://496ac21c6d7e650f191b3bc29ab676bab6ba40727c5ac4d517833ef9a115ae07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://698aacf6e94caf2da7095c89716d63d935ff60d95cb91c9a39dfe9282cbba005\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8352058616fa4ed90ca907e547bad2201d9aa696330f1eb8434c3c3d54d71d89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8352058616fa4ed90ca907e547bad2201d9aa696330f1eb8434c3c3d54d71d89\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:09Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:08Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:57:59 crc kubenswrapper[4838]: I1128 09:57:59.991943 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:58:00 crc kubenswrapper[4838]: I1128 09:58:00.004378 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:58:00 crc kubenswrapper[4838]: I1128 09:58:00.012440 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-tj8hl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cbb3c60a-bf9f-4a62-9310-30898e42be4f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e6fe3f1cbc53f02e2556c5fb44cffcf13330c3240a0ff10a8471080466416bd1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c2jkq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:54Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-tj8hl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:58:00 crc kubenswrapper[4838]: I1128 09:58:00.022752 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:58:00 crc kubenswrapper[4838]: I1128 09:58:00.032789 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:00 crc kubenswrapper[4838]: I1128 09:58:00.032827 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:00 crc kubenswrapper[4838]: I1128 09:58:00.032844 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:00 crc kubenswrapper[4838]: I1128 09:58:00.032867 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:00 crc kubenswrapper[4838]: I1128 09:58:00.032884 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:00Z","lastTransitionTime":"2025-11-28T09:58:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:00 crc kubenswrapper[4838]: I1128 09:58:00.035099 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:58:00 crc kubenswrapper[4838]: I1128 09:58:00.053656 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-58mh7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f556bd7-3b15-4d7d-b8e2-4815bb5c9c7d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-58mh7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:58:00 crc kubenswrapper[4838]: I1128 09:58:00.064663 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-sft2b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"200cdb91-cc86-40be-a5b6-30f7b9beba6d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lpt6k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-sft2b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:58:00 crc kubenswrapper[4838]: I1128 09:58:00.073765 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:58:00 crc kubenswrapper[4838]: I1128 09:58:00.084831 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4rv9b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"051f7e1c-2d47-4be9-bbd5-14feec16eb16\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-szqtp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4rv9b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:58:00 crc kubenswrapper[4838]: I1128 09:58:00.136310 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:00 crc kubenswrapper[4838]: I1128 09:58:00.136359 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:00 crc kubenswrapper[4838]: I1128 09:58:00.136376 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:00 crc kubenswrapper[4838]: I1128 09:58:00.136398 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:00 crc kubenswrapper[4838]: I1128 09:58:00.136415 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:00Z","lastTransitionTime":"2025-11-28T09:58:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:00 crc kubenswrapper[4838]: I1128 09:58:00.239346 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:00 crc kubenswrapper[4838]: I1128 09:58:00.239398 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:00 crc kubenswrapper[4838]: I1128 09:58:00.239415 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:00 crc kubenswrapper[4838]: I1128 09:58:00.239439 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:00 crc kubenswrapper[4838]: I1128 09:58:00.239456 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:00Z","lastTransitionTime":"2025-11-28T09:58:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:00 crc kubenswrapper[4838]: I1128 09:58:00.341870 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:00 crc kubenswrapper[4838]: I1128 09:58:00.341935 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:00 crc kubenswrapper[4838]: I1128 09:58:00.341955 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:00 crc kubenswrapper[4838]: I1128 09:58:00.341978 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:00 crc kubenswrapper[4838]: I1128 09:58:00.341997 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:00Z","lastTransitionTime":"2025-11-28T09:58:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:00 crc kubenswrapper[4838]: I1128 09:58:00.444664 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:00 crc kubenswrapper[4838]: I1128 09:58:00.444744 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:00 crc kubenswrapper[4838]: I1128 09:58:00.444762 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:00 crc kubenswrapper[4838]: I1128 09:58:00.444783 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:00 crc kubenswrapper[4838]: I1128 09:58:00.444797 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:00Z","lastTransitionTime":"2025-11-28T09:58:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:00 crc kubenswrapper[4838]: I1128 09:58:00.548033 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:00 crc kubenswrapper[4838]: I1128 09:58:00.548174 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:00 crc kubenswrapper[4838]: I1128 09:58:00.548194 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:00 crc kubenswrapper[4838]: I1128 09:58:00.548218 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:00 crc kubenswrapper[4838]: I1128 09:58:00.548236 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:00Z","lastTransitionTime":"2025-11-28T09:58:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:00 crc kubenswrapper[4838]: I1128 09:58:00.651468 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:00 crc kubenswrapper[4838]: I1128 09:58:00.651551 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:00 crc kubenswrapper[4838]: I1128 09:58:00.651568 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:00 crc kubenswrapper[4838]: I1128 09:58:00.651594 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:00 crc kubenswrapper[4838]: I1128 09:58:00.651611 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:00Z","lastTransitionTime":"2025-11-28T09:58:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:00 crc kubenswrapper[4838]: I1128 09:58:00.754677 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:00 crc kubenswrapper[4838]: I1128 09:58:00.754770 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:00 crc kubenswrapper[4838]: I1128 09:58:00.754797 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:00 crc kubenswrapper[4838]: I1128 09:58:00.754826 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:00 crc kubenswrapper[4838]: I1128 09:58:00.754849 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:00Z","lastTransitionTime":"2025-11-28T09:58:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:00 crc kubenswrapper[4838]: I1128 09:58:00.857878 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:00 crc kubenswrapper[4838]: I1128 09:58:00.857945 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:00 crc kubenswrapper[4838]: I1128 09:58:00.857968 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:00 crc kubenswrapper[4838]: I1128 09:58:00.857997 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:00 crc kubenswrapper[4838]: I1128 09:58:00.858015 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:00Z","lastTransitionTime":"2025-11-28T09:58:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:00 crc kubenswrapper[4838]: I1128 09:58:00.960775 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:00 crc kubenswrapper[4838]: I1128 09:58:00.960825 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:00 crc kubenswrapper[4838]: I1128 09:58:00.960842 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:00 crc kubenswrapper[4838]: I1128 09:58:00.960864 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:00 crc kubenswrapper[4838]: I1128 09:58:00.960878 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:00Z","lastTransitionTime":"2025-11-28T09:58:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:01 crc kubenswrapper[4838]: I1128 09:58:01.064140 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:01 crc kubenswrapper[4838]: I1128 09:58:01.064207 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:01 crc kubenswrapper[4838]: I1128 09:58:01.064225 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:01 crc kubenswrapper[4838]: I1128 09:58:01.064249 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:01 crc kubenswrapper[4838]: I1128 09:58:01.064268 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:01Z","lastTransitionTime":"2025-11-28T09:58:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:01 crc kubenswrapper[4838]: I1128 09:58:01.168097 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:01 crc kubenswrapper[4838]: I1128 09:58:01.168190 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:01 crc kubenswrapper[4838]: I1128 09:58:01.168218 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:01 crc kubenswrapper[4838]: I1128 09:58:01.168256 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:01 crc kubenswrapper[4838]: I1128 09:58:01.168285 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:01Z","lastTransitionTime":"2025-11-28T09:58:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:01 crc kubenswrapper[4838]: I1128 09:58:01.272511 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:01 crc kubenswrapper[4838]: I1128 09:58:01.272580 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:01 crc kubenswrapper[4838]: I1128 09:58:01.272592 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:01 crc kubenswrapper[4838]: I1128 09:58:01.272619 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:01 crc kubenswrapper[4838]: I1128 09:58:01.272641 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:01Z","lastTransitionTime":"2025-11-28T09:58:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:01 crc kubenswrapper[4838]: I1128 09:58:01.297891 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 09:58:01 crc kubenswrapper[4838]: I1128 09:58:01.298037 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 09:58:01 crc kubenswrapper[4838]: I1128 09:58:01.298102 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 09:58:01 crc kubenswrapper[4838]: I1128 09:58:01.298148 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 09:58:01 crc kubenswrapper[4838]: E1128 09:58:01.298180 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 09:58:09.298140976 +0000 UTC m=+60.997115186 (durationBeforeRetry 8s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 09:58:01 crc kubenswrapper[4838]: I1128 09:58:01.298248 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 09:58:01 crc kubenswrapper[4838]: E1128 09:58:01.298311 4838 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 28 09:58:01 crc kubenswrapper[4838]: E1128 09:58:01.298354 4838 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 28 09:58:01 crc kubenswrapper[4838]: E1128 09:58:01.298373 4838 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 09:58:01 crc kubenswrapper[4838]: E1128 09:58:01.298382 4838 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 28 09:58:01 crc kubenswrapper[4838]: E1128 09:58:01.298430 4838 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 28 09:58:01 crc kubenswrapper[4838]: E1128 09:58:01.298330 4838 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 28 09:58:01 crc kubenswrapper[4838]: E1128 09:58:01.298475 4838 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 28 09:58:01 crc kubenswrapper[4838]: E1128 09:58:01.298496 4838 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 09:58:01 crc kubenswrapper[4838]: E1128 09:58:01.298446 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-28 09:58:09.298420223 +0000 UTC m=+60.997394393 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 09:58:01 crc kubenswrapper[4838]: E1128 09:58:01.298574 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-28 09:58:09.298551217 +0000 UTC m=+60.997525427 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 28 09:58:01 crc kubenswrapper[4838]: E1128 09:58:01.298598 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-28 09:58:09.298585388 +0000 UTC m=+60.997559598 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 28 09:58:01 crc kubenswrapper[4838]: E1128 09:58:01.298636 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-28 09:58:09.298624879 +0000 UTC m=+60.997599089 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 09:58:01 crc kubenswrapper[4838]: I1128 09:58:01.375865 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:01 crc kubenswrapper[4838]: I1128 09:58:01.375911 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:01 crc kubenswrapper[4838]: I1128 09:58:01.375925 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:01 crc kubenswrapper[4838]: I1128 09:58:01.375942 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:01 crc kubenswrapper[4838]: I1128 09:58:01.375954 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:01Z","lastTransitionTime":"2025-11-28T09:58:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:01 crc kubenswrapper[4838]: I1128 09:58:01.479195 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:01 crc kubenswrapper[4838]: I1128 09:58:01.479262 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:01 crc kubenswrapper[4838]: I1128 09:58:01.479277 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:01 crc kubenswrapper[4838]: I1128 09:58:01.479302 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:01 crc kubenswrapper[4838]: I1128 09:58:01.479317 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:01Z","lastTransitionTime":"2025-11-28T09:58:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:01 crc kubenswrapper[4838]: I1128 09:58:01.561418 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 09:58:01 crc kubenswrapper[4838]: I1128 09:58:01.561551 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 09:58:01 crc kubenswrapper[4838]: E1128 09:58:01.561585 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 09:58:01 crc kubenswrapper[4838]: I1128 09:58:01.561556 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 09:58:01 crc kubenswrapper[4838]: E1128 09:58:01.561780 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 09:58:01 crc kubenswrapper[4838]: E1128 09:58:01.561894 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 09:58:01 crc kubenswrapper[4838]: I1128 09:58:01.582301 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:01 crc kubenswrapper[4838]: I1128 09:58:01.582342 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:01 crc kubenswrapper[4838]: I1128 09:58:01.582353 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:01 crc kubenswrapper[4838]: I1128 09:58:01.582374 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:01 crc kubenswrapper[4838]: I1128 09:58:01.582386 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:01Z","lastTransitionTime":"2025-11-28T09:58:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:01 crc kubenswrapper[4838]: I1128 09:58:01.687274 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:01 crc kubenswrapper[4838]: I1128 09:58:01.687335 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:01 crc kubenswrapper[4838]: I1128 09:58:01.687353 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:01 crc kubenswrapper[4838]: I1128 09:58:01.687380 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:01 crc kubenswrapper[4838]: I1128 09:58:01.687402 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:01Z","lastTransitionTime":"2025-11-28T09:58:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:01 crc kubenswrapper[4838]: I1128 09:58:01.791465 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:01 crc kubenswrapper[4838]: I1128 09:58:01.791583 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:01 crc kubenswrapper[4838]: I1128 09:58:01.791609 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:01 crc kubenswrapper[4838]: I1128 09:58:01.791638 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:01 crc kubenswrapper[4838]: I1128 09:58:01.791656 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:01Z","lastTransitionTime":"2025-11-28T09:58:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:01 crc kubenswrapper[4838]: I1128 09:58:01.894940 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:01 crc kubenswrapper[4838]: I1128 09:58:01.895071 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:01 crc kubenswrapper[4838]: I1128 09:58:01.895116 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:01 crc kubenswrapper[4838]: I1128 09:58:01.895174 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:01 crc kubenswrapper[4838]: I1128 09:58:01.895199 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:01Z","lastTransitionTime":"2025-11-28T09:58:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:01 crc kubenswrapper[4838]: I1128 09:58:01.997874 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:01 crc kubenswrapper[4838]: I1128 09:58:01.997993 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:01 crc kubenswrapper[4838]: I1128 09:58:01.998012 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:01 crc kubenswrapper[4838]: I1128 09:58:01.998038 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:01 crc kubenswrapper[4838]: I1128 09:58:01.998057 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:01Z","lastTransitionTime":"2025-11-28T09:58:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:02 crc kubenswrapper[4838]: I1128 09:58:02.100675 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:02 crc kubenswrapper[4838]: I1128 09:58:02.100777 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:02 crc kubenswrapper[4838]: I1128 09:58:02.100789 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:02 crc kubenswrapper[4838]: I1128 09:58:02.100815 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:02 crc kubenswrapper[4838]: I1128 09:58:02.100829 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:02Z","lastTransitionTime":"2025-11-28T09:58:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:02 crc kubenswrapper[4838]: I1128 09:58:02.204441 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:02 crc kubenswrapper[4838]: I1128 09:58:02.204525 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:02 crc kubenswrapper[4838]: I1128 09:58:02.204545 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:02 crc kubenswrapper[4838]: I1128 09:58:02.204577 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:02 crc kubenswrapper[4838]: I1128 09:58:02.204597 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:02Z","lastTransitionTime":"2025-11-28T09:58:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:02 crc kubenswrapper[4838]: I1128 09:58:02.308531 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:02 crc kubenswrapper[4838]: I1128 09:58:02.308605 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:02 crc kubenswrapper[4838]: I1128 09:58:02.308618 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:02 crc kubenswrapper[4838]: I1128 09:58:02.308642 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:02 crc kubenswrapper[4838]: I1128 09:58:02.308657 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:02Z","lastTransitionTime":"2025-11-28T09:58:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:02 crc kubenswrapper[4838]: I1128 09:58:02.411757 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:02 crc kubenswrapper[4838]: I1128 09:58:02.411832 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:02 crc kubenswrapper[4838]: I1128 09:58:02.411859 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:02 crc kubenswrapper[4838]: I1128 09:58:02.411888 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:02 crc kubenswrapper[4838]: I1128 09:58:02.411908 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:02Z","lastTransitionTime":"2025-11-28T09:58:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:02 crc kubenswrapper[4838]: I1128 09:58:02.514750 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:02 crc kubenswrapper[4838]: I1128 09:58:02.514863 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:02 crc kubenswrapper[4838]: I1128 09:58:02.514887 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:02 crc kubenswrapper[4838]: I1128 09:58:02.514965 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:02 crc kubenswrapper[4838]: I1128 09:58:02.514994 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:02Z","lastTransitionTime":"2025-11-28T09:58:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:02 crc kubenswrapper[4838]: I1128 09:58:02.618343 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:02 crc kubenswrapper[4838]: I1128 09:58:02.618414 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:02 crc kubenswrapper[4838]: I1128 09:58:02.618436 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:02 crc kubenswrapper[4838]: I1128 09:58:02.618465 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:02 crc kubenswrapper[4838]: I1128 09:58:02.618488 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:02Z","lastTransitionTime":"2025-11-28T09:58:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:02 crc kubenswrapper[4838]: I1128 09:58:02.721854 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:02 crc kubenswrapper[4838]: I1128 09:58:02.721932 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:02 crc kubenswrapper[4838]: I1128 09:58:02.721958 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:02 crc kubenswrapper[4838]: I1128 09:58:02.721988 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:02 crc kubenswrapper[4838]: I1128 09:58:02.722019 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:02Z","lastTransitionTime":"2025-11-28T09:58:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:02 crc kubenswrapper[4838]: I1128 09:58:02.825167 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:02 crc kubenswrapper[4838]: I1128 09:58:02.825234 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:02 crc kubenswrapper[4838]: I1128 09:58:02.825253 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:02 crc kubenswrapper[4838]: I1128 09:58:02.825279 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:02 crc kubenswrapper[4838]: I1128 09:58:02.825297 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:02Z","lastTransitionTime":"2025-11-28T09:58:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:02 crc kubenswrapper[4838]: I1128 09:58:02.929052 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:02 crc kubenswrapper[4838]: I1128 09:58:02.929089 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:02 crc kubenswrapper[4838]: I1128 09:58:02.929099 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:02 crc kubenswrapper[4838]: I1128 09:58:02.929115 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:02 crc kubenswrapper[4838]: I1128 09:58:02.929127 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:02Z","lastTransitionTime":"2025-11-28T09:58:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:03 crc kubenswrapper[4838]: I1128 09:58:03.031621 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:03 crc kubenswrapper[4838]: I1128 09:58:03.031691 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:03 crc kubenswrapper[4838]: I1128 09:58:03.031711 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:03 crc kubenswrapper[4838]: I1128 09:58:03.031769 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:03 crc kubenswrapper[4838]: I1128 09:58:03.031788 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:03Z","lastTransitionTime":"2025-11-28T09:58:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:03 crc kubenswrapper[4838]: I1128 09:58:03.135262 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:03 crc kubenswrapper[4838]: I1128 09:58:03.135332 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:03 crc kubenswrapper[4838]: I1128 09:58:03.135351 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:03 crc kubenswrapper[4838]: I1128 09:58:03.135377 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:03 crc kubenswrapper[4838]: I1128 09:58:03.135395 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:03Z","lastTransitionTime":"2025-11-28T09:58:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:03 crc kubenswrapper[4838]: I1128 09:58:03.239182 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:03 crc kubenswrapper[4838]: I1128 09:58:03.239268 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:03 crc kubenswrapper[4838]: I1128 09:58:03.239290 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:03 crc kubenswrapper[4838]: I1128 09:58:03.239322 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:03 crc kubenswrapper[4838]: I1128 09:58:03.239345 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:03Z","lastTransitionTime":"2025-11-28T09:58:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:03 crc kubenswrapper[4838]: I1128 09:58:03.342131 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:03 crc kubenswrapper[4838]: I1128 09:58:03.342203 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:03 crc kubenswrapper[4838]: I1128 09:58:03.342227 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:03 crc kubenswrapper[4838]: I1128 09:58:03.342260 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:03 crc kubenswrapper[4838]: I1128 09:58:03.342282 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:03Z","lastTransitionTime":"2025-11-28T09:58:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:03 crc kubenswrapper[4838]: I1128 09:58:03.446161 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:03 crc kubenswrapper[4838]: I1128 09:58:03.446225 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:03 crc kubenswrapper[4838]: I1128 09:58:03.446248 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:03 crc kubenswrapper[4838]: I1128 09:58:03.446279 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:03 crc kubenswrapper[4838]: I1128 09:58:03.446435 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:03Z","lastTransitionTime":"2025-11-28T09:58:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:03 crc kubenswrapper[4838]: I1128 09:58:03.550246 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:03 crc kubenswrapper[4838]: I1128 09:58:03.550310 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:03 crc kubenswrapper[4838]: I1128 09:58:03.550328 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:03 crc kubenswrapper[4838]: I1128 09:58:03.550357 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:03 crc kubenswrapper[4838]: I1128 09:58:03.550385 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:03Z","lastTransitionTime":"2025-11-28T09:58:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:03 crc kubenswrapper[4838]: I1128 09:58:03.562057 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 09:58:03 crc kubenswrapper[4838]: I1128 09:58:03.562109 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 09:58:03 crc kubenswrapper[4838]: I1128 09:58:03.562036 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 09:58:03 crc kubenswrapper[4838]: E1128 09:58:03.562216 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 09:58:03 crc kubenswrapper[4838]: E1128 09:58:03.562475 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 09:58:03 crc kubenswrapper[4838]: E1128 09:58:03.562654 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 09:58:03 crc kubenswrapper[4838]: I1128 09:58:03.587228 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-tnclp"] Nov 28 09:58:03 crc kubenswrapper[4838]: I1128 09:58:03.588123 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-tnclp" Nov 28 09:58:03 crc kubenswrapper[4838]: I1128 09:58:03.591588 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-control-plane-metrics-cert" Nov 28 09:58:03 crc kubenswrapper[4838]: I1128 09:58:03.591682 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-control-plane-dockercfg-gs7dd" Nov 28 09:58:03 crc kubenswrapper[4838]: I1128 09:58:03.603409 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5c3daa53-8c4e-4e30-aeba-146602dd45cd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9dmrk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9dmrk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-5dxdd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:58:03 crc kubenswrapper[4838]: I1128 09:58:03.625016 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ebace5c6-6ca4-48ff-9c50-c6b769d599b5-env-overrides\") pod \"ovnkube-control-plane-749d76644c-tnclp\" (UID: \"ebace5c6-6ca4-48ff-9c50-c6b769d599b5\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-tnclp" Nov 28 09:58:03 crc kubenswrapper[4838]: I1128 09:58:03.625076 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/ebace5c6-6ca4-48ff-9c50-c6b769d599b5-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-tnclp\" (UID: \"ebace5c6-6ca4-48ff-9c50-c6b769d599b5\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-tnclp" Nov 28 09:58:03 crc kubenswrapper[4838]: I1128 09:58:03.625133 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/ebace5c6-6ca4-48ff-9c50-c6b769d599b5-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-tnclp\" (UID: \"ebace5c6-6ca4-48ff-9c50-c6b769d599b5\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-tnclp" Nov 28 09:58:03 crc kubenswrapper[4838]: I1128 09:58:03.625282 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8pxnw\" (UniqueName: \"kubernetes.io/projected/ebace5c6-6ca4-48ff-9c50-c6b769d599b5-kube-api-access-8pxnw\") pod \"ovnkube-control-plane-749d76644c-tnclp\" (UID: \"ebace5c6-6ca4-48ff-9c50-c6b769d599b5\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-tnclp" Nov 28 09:58:03 crc kubenswrapper[4838]: I1128 09:58:03.639032 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42be35de-5c72-4cc2-a5e4-fb7872425cdf\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5a39765f9493a3a9454db77d07855624ff5645e9dbd898e6dcb880d7a01a8c42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://426741a23e7b1b9fae6769b581c0d44694f957b0da985476923801395fad082f\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T09:57:46Z\\\",\\\"message\\\":\\\"+ timeout 3m /bin/bash -exuo pipefail -c 'while [ -n \\\\\\\"$(ss -Htanop \\\\\\\\( sport = 10357 \\\\\\\\))\\\\\\\" ]; do sleep 1; done'\\\\n++ ss -Htanop '(' sport = 10357 ')'\\\\n+ '[' -n '' ']'\\\\n+ exec cluster-policy-controller start --config=/etc/kubernetes/static-pod-resources/configmaps/cluster-policy-controller-config/config.yaml --kubeconfig=/etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig --namespace=openshift-kube-controller-manager -v=2\\\\nI1128 09:57:12.554377 1 leaderelection.go:121] The leader election gives 4 retries and allows for 30s of clock skew. The kube-apiserver downtime tolerance is 78s. Worst non-graceful lease acquisition is 2m43s. Worst graceful lease acquisition is {26s}.\\\\nI1128 09:57:12.555793 1 observer_polling.go:159] Starting file observer\\\\nI1128 09:57:12.567187 1 builder.go:298] cluster-policy-controller version 4.18.0-202501230001.p0.g5fd8525.assembly.stream.el9-5fd8525-5fd852525909ce6eab52972ba9ce8fcf56528eb9\\\\nI1128 09:57:12.568976 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.crt::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.key\\\\\\\"\\\\nI1128 09:57:45.000857 1 cmd.go:138] Received SIGTERM or SIGINT signal, shutting down controller.\\\\nF1128 09:57:46.203931 1 cmd.go:179] failed checking apiserver connectivity: client rate limiter Wait returned an error: context deadline exceeded\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:10Z\\\"}},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef54c8995d6bb8e077c6d1c3d796b6f2ce88370b6cadc4f040f590760103320b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fab4ce68cf732b3e6b32f68e84805013d646a9cbd6d5c55ea3d2f41a7f9db83d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2da93de71b5896e3c9ba192df2896b946b1376fefe2a87cf21adb87ea665be04\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:08Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:58:03 crc kubenswrapper[4838]: I1128 09:58:03.653914 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:03 crc kubenswrapper[4838]: I1128 09:58:03.654015 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:03 crc kubenswrapper[4838]: I1128 09:58:03.654043 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:03 crc kubenswrapper[4838]: I1128 09:58:03.654082 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:03 crc kubenswrapper[4838]: I1128 09:58:03.654108 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:03Z","lastTransitionTime":"2025-11-28T09:58:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:03 crc kubenswrapper[4838]: I1128 09:58:03.656038 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1630b1c6-63b5-4481-a711-0485765d37e3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3a04db28528da269759635186b06952f9a3dc4c2b130458354a5bf9ef994db8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://065f0c39a0f1fbdb83a6a758ddd20a4af3ddf96297ce35551b570f5e6c9deb89\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T09:57:35Z\\\",\\\"message\\\":\\\"rpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"192.168.126.11:2379\\\\\\\", ServerName: \\\\\\\"192.168.126.11:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp 192.168.126.11:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:30.193159 13 logging.go:55] [core] [Channel #1 SubChannel #5]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"192.168.126.11:2379\\\\\\\", ServerName: \\\\\\\"192.168.126.11:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp 192.168.126.11:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:30.410371 13 logging.go:55] [core] [Channel #7 SubChannel #8]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"192.168.126.11:2379\\\\\\\", ServerName: \\\\\\\"192.168.126.11:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp 192.168.126.11:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:30.766067 13 logging.go:55] [core] [Channel #7 SubChannel #9]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"localhost:2379\\\\\\\", ServerName: \\\\\\\"localhost:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp [::1]:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:30.792318 13 logging.go:55] [core] [Channel #2 SubChannel #4]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"localhost:2379\\\\\\\", ServerName: \\\\\\\"localhost:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp [::1]:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:34.548830 13 logging.go:55] [core] [Channel #1 SubChannel #6]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"localhost:2379\\\\\\\", ServerName: \\\\\\\"localhost:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: authentication handshake failed: context canceled\\\\\\\"\\\\nE1128 09:57:34.559534 13 run.go:72] \\\\\\\"command failed\\\\\\\" err=\\\\\\\"context deadline exceeded\\\\\\\"\\\\nI1128 09:57:34.572762 1 main.go:235] Termination finished with exit code 1\\\\nI1128 09:57:34.572819 1 main.go:188] Deleting termination lock file \\\\\\\"/var/log/kube-apiserver/.terminating\\\\\\\"\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25d620ea6d7c38547e89d61e7a60f227d28b21f18d563055db47256b266d5b6b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://837cc7d33d601516b4ea56a283f71167b41da7c769070c97ea77f29e97cf1555\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a821e5b105f62cf7f3b12714bfb0f58867a808d1c777f2fb711895c345d8ee9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ad3223a9346861cf1b27af8c95207349f10af6f416380747e32c4faf1d3add4\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T09:57:48Z\\\",\\\"message\\\":\\\"le observer\\\\nW1128 09:57:46.202169 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1128 09:57:46.202478 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 09:57:46.203984 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2688063589/tls.crt::/tmp/serving-cert-2688063589/tls.key\\\\\\\"\\\\nI1128 09:57:46.517349 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 09:57:46.756626 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 09:57:46.756681 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 09:57:46.756806 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 09:57:46.756825 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 09:57:46.768610 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1128 09:57:46.768654 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1128 09:57:46.768660 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 09:57:46.768697 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 09:57:46.768707 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 09:57:46.768714 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 09:57:46.768746 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 09:57:46.768752 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1128 09:57:46.772242 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:40Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c690f38f717fbfbd681f21f5dc845b94601530b4fc0860fdbeb1317042c793b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:18Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b4d4af6fc5dd97d5b6104ef79b62fd241db5659dfdfd496a38536453b207a4e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b4d4af6fc5dd97d5b6104ef79b62fd241db5659dfdfd496a38536453b207a4e5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:08Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:58:03 crc kubenswrapper[4838]: I1128 09:58:03.667588 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:58:03 crc kubenswrapper[4838]: I1128 09:58:03.688428 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gmhsj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"41b01f7d-5c75-49de-86f7-87e04bf71194\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-gmhsj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:58:03 crc kubenswrapper[4838]: I1128 09:58:03.699531 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-tj8hl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cbb3c60a-bf9f-4a62-9310-30898e42be4f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e6fe3f1cbc53f02e2556c5fb44cffcf13330c3240a0ff10a8471080466416bd1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c2jkq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:54Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-tj8hl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:58:03 crc kubenswrapper[4838]: I1128 09:58:03.708231 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"faf44892-fdd2-4b45-8772-20049c555d3b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8f8f5484d008289a9e34ceaffd3cf2582565e7265003b0a6a913fe424760fc65\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://496ac21c6d7e650f191b3bc29ab676bab6ba40727c5ac4d517833ef9a115ae07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://698aacf6e94caf2da7095c89716d63d935ff60d95cb91c9a39dfe9282cbba005\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8352058616fa4ed90ca907e547bad2201d9aa696330f1eb8434c3c3d54d71d89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8352058616fa4ed90ca907e547bad2201d9aa696330f1eb8434c3c3d54d71d89\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:09Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:08Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:58:03 crc kubenswrapper[4838]: I1128 09:58:03.723007 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:58:03 crc kubenswrapper[4838]: I1128 09:58:03.725800 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/ebace5c6-6ca4-48ff-9c50-c6b769d599b5-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-tnclp\" (UID: \"ebace5c6-6ca4-48ff-9c50-c6b769d599b5\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-tnclp" Nov 28 09:58:03 crc kubenswrapper[4838]: I1128 09:58:03.725874 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8pxnw\" (UniqueName: \"kubernetes.io/projected/ebace5c6-6ca4-48ff-9c50-c6b769d599b5-kube-api-access-8pxnw\") pod \"ovnkube-control-plane-749d76644c-tnclp\" (UID: \"ebace5c6-6ca4-48ff-9c50-c6b769d599b5\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-tnclp" Nov 28 09:58:03 crc kubenswrapper[4838]: I1128 09:58:03.725916 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ebace5c6-6ca4-48ff-9c50-c6b769d599b5-env-overrides\") pod \"ovnkube-control-plane-749d76644c-tnclp\" (UID: \"ebace5c6-6ca4-48ff-9c50-c6b769d599b5\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-tnclp" Nov 28 09:58:03 crc kubenswrapper[4838]: I1128 09:58:03.725942 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/ebace5c6-6ca4-48ff-9c50-c6b769d599b5-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-tnclp\" (UID: \"ebace5c6-6ca4-48ff-9c50-c6b769d599b5\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-tnclp" Nov 28 09:58:03 crc kubenswrapper[4838]: I1128 09:58:03.726882 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ebace5c6-6ca4-48ff-9c50-c6b769d599b5-env-overrides\") pod \"ovnkube-control-plane-749d76644c-tnclp\" (UID: \"ebace5c6-6ca4-48ff-9c50-c6b769d599b5\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-tnclp" Nov 28 09:58:03 crc kubenswrapper[4838]: I1128 09:58:03.726939 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/ebace5c6-6ca4-48ff-9c50-c6b769d599b5-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-tnclp\" (UID: \"ebace5c6-6ca4-48ff-9c50-c6b769d599b5\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-tnclp" Nov 28 09:58:03 crc kubenswrapper[4838]: I1128 09:58:03.733000 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/ebace5c6-6ca4-48ff-9c50-c6b769d599b5-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-tnclp\" (UID: \"ebace5c6-6ca4-48ff-9c50-c6b769d599b5\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-tnclp" Nov 28 09:58:03 crc kubenswrapper[4838]: I1128 09:58:03.735464 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:58:03 crc kubenswrapper[4838]: I1128 09:58:03.748853 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8pxnw\" (UniqueName: \"kubernetes.io/projected/ebace5c6-6ca4-48ff-9c50-c6b769d599b5-kube-api-access-8pxnw\") pod \"ovnkube-control-plane-749d76644c-tnclp\" (UID: \"ebace5c6-6ca4-48ff-9c50-c6b769d599b5\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-tnclp" Nov 28 09:58:03 crc kubenswrapper[4838]: I1128 09:58:03.749399 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:58:03 crc kubenswrapper[4838]: I1128 09:58:03.756382 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:03 crc kubenswrapper[4838]: I1128 09:58:03.756405 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:03 crc kubenswrapper[4838]: I1128 09:58:03.756415 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:03 crc kubenswrapper[4838]: I1128 09:58:03.756428 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:03 crc kubenswrapper[4838]: I1128 09:58:03.756437 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:03Z","lastTransitionTime":"2025-11-28T09:58:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:03 crc kubenswrapper[4838]: I1128 09:58:03.774710 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:58:03 crc kubenswrapper[4838]: I1128 09:58:03.793275 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-58mh7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f556bd7-3b15-4d7d-b8e2-4815bb5c9c7d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-58mh7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:58:03 crc kubenswrapper[4838]: I1128 09:58:03.802510 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-sft2b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"200cdb91-cc86-40be-a5b6-30f7b9beba6d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lpt6k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-sft2b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:58:03 crc kubenswrapper[4838]: I1128 09:58:03.814150 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:58:03 crc kubenswrapper[4838]: I1128 09:58:03.825470 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4rv9b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"051f7e1c-2d47-4be9-bbd5-14feec16eb16\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-szqtp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4rv9b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:58:03 crc kubenswrapper[4838]: I1128 09:58:03.837175 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-tnclp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ebace5c6-6ca4-48ff-9c50-c6b769d599b5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:03Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:03Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8pxnw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8pxnw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:58:03Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-tnclp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:58:03 crc kubenswrapper[4838]: I1128 09:58:03.859517 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:03 crc kubenswrapper[4838]: I1128 09:58:03.859598 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:03 crc kubenswrapper[4838]: I1128 09:58:03.859623 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:03 crc kubenswrapper[4838]: I1128 09:58:03.859654 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:03 crc kubenswrapper[4838]: I1128 09:58:03.859679 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:03Z","lastTransitionTime":"2025-11-28T09:58:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:03 crc kubenswrapper[4838]: I1128 09:58:03.930628 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-tnclp" Nov 28 09:58:03 crc kubenswrapper[4838]: W1128 09:58:03.951020 4838 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podebace5c6_6ca4_48ff_9c50_c6b769d599b5.slice/crio-188e2accd3e336be738f08f6652e7a954e991dd72f6dcb0d0684392860279458 WatchSource:0}: Error finding container 188e2accd3e336be738f08f6652e7a954e991dd72f6dcb0d0684392860279458: Status 404 returned error can't find the container with id 188e2accd3e336be738f08f6652e7a954e991dd72f6dcb0d0684392860279458 Nov 28 09:58:03 crc kubenswrapper[4838]: I1128 09:58:03.964535 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:03 crc kubenswrapper[4838]: I1128 09:58:03.964600 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:03 crc kubenswrapper[4838]: I1128 09:58:03.964616 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:03 crc kubenswrapper[4838]: I1128 09:58:03.964641 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:03 crc kubenswrapper[4838]: I1128 09:58:03.964661 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:03Z","lastTransitionTime":"2025-11-28T09:58:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:04 crc kubenswrapper[4838]: I1128 09:58:04.067694 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:04 crc kubenswrapper[4838]: I1128 09:58:04.067748 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:04 crc kubenswrapper[4838]: I1128 09:58:04.067759 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:04 crc kubenswrapper[4838]: I1128 09:58:04.067773 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:04 crc kubenswrapper[4838]: I1128 09:58:04.067784 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:04Z","lastTransitionTime":"2025-11-28T09:58:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:04 crc kubenswrapper[4838]: I1128 09:58:04.172506 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:04 crc kubenswrapper[4838]: I1128 09:58:04.172550 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:04 crc kubenswrapper[4838]: I1128 09:58:04.172560 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:04 crc kubenswrapper[4838]: I1128 09:58:04.172577 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:04 crc kubenswrapper[4838]: I1128 09:58:04.172589 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:04Z","lastTransitionTime":"2025-11-28T09:58:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:04 crc kubenswrapper[4838]: I1128 09:58:04.274926 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:04 crc kubenswrapper[4838]: I1128 09:58:04.274974 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:04 crc kubenswrapper[4838]: I1128 09:58:04.274986 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:04 crc kubenswrapper[4838]: I1128 09:58:04.275003 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:04 crc kubenswrapper[4838]: I1128 09:58:04.275012 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:04Z","lastTransitionTime":"2025-11-28T09:58:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:04 crc kubenswrapper[4838]: I1128 09:58:04.378351 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:04 crc kubenswrapper[4838]: I1128 09:58:04.378413 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:04 crc kubenswrapper[4838]: I1128 09:58:04.378431 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:04 crc kubenswrapper[4838]: I1128 09:58:04.378458 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:04 crc kubenswrapper[4838]: I1128 09:58:04.378476 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:04Z","lastTransitionTime":"2025-11-28T09:58:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:04 crc kubenswrapper[4838]: I1128 09:58:04.481296 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:04 crc kubenswrapper[4838]: I1128 09:58:04.481350 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:04 crc kubenswrapper[4838]: I1128 09:58:04.481368 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:04 crc kubenswrapper[4838]: I1128 09:58:04.481395 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:04 crc kubenswrapper[4838]: I1128 09:58:04.481413 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:04Z","lastTransitionTime":"2025-11-28T09:58:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:04 crc kubenswrapper[4838]: I1128 09:58:04.584400 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:04 crc kubenswrapper[4838]: I1128 09:58:04.584478 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:04 crc kubenswrapper[4838]: I1128 09:58:04.584499 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:04 crc kubenswrapper[4838]: I1128 09:58:04.584535 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:04 crc kubenswrapper[4838]: I1128 09:58:04.584569 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:04Z","lastTransitionTime":"2025-11-28T09:58:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:04 crc kubenswrapper[4838]: I1128 09:58:04.688096 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:04 crc kubenswrapper[4838]: I1128 09:58:04.688167 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:04 crc kubenswrapper[4838]: I1128 09:58:04.688186 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:04 crc kubenswrapper[4838]: I1128 09:58:04.688211 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:04 crc kubenswrapper[4838]: I1128 09:58:04.688228 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:04Z","lastTransitionTime":"2025-11-28T09:58:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:04 crc kubenswrapper[4838]: I1128 09:58:04.790506 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:04 crc kubenswrapper[4838]: I1128 09:58:04.790547 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:04 crc kubenswrapper[4838]: I1128 09:58:04.790558 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:04 crc kubenswrapper[4838]: I1128 09:58:04.790575 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:04 crc kubenswrapper[4838]: I1128 09:58:04.790589 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:04Z","lastTransitionTime":"2025-11-28T09:58:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:04 crc kubenswrapper[4838]: I1128 09:58:04.893898 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:04 crc kubenswrapper[4838]: I1128 09:58:04.893965 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:04 crc kubenswrapper[4838]: I1128 09:58:04.893983 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:04 crc kubenswrapper[4838]: I1128 09:58:04.894007 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:04 crc kubenswrapper[4838]: I1128 09:58:04.894024 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:04Z","lastTransitionTime":"2025-11-28T09:58:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:04 crc kubenswrapper[4838]: I1128 09:58:04.896606 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-tnclp" event={"ID":"ebace5c6-6ca4-48ff-9c50-c6b769d599b5","Type":"ContainerStarted","Data":"efa96991cdc29f4f075bfd7751f620c7b091510ea68d577b243811ddd4140cec"} Nov 28 09:58:04 crc kubenswrapper[4838]: I1128 09:58:04.896656 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-tnclp" event={"ID":"ebace5c6-6ca4-48ff-9c50-c6b769d599b5","Type":"ContainerStarted","Data":"572204db3ac1da6b232430bf06fb87e94638e151ea0edc2f8b111deb7d82c58c"} Nov 28 09:58:04 crc kubenswrapper[4838]: I1128 09:58:04.896677 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-tnclp" event={"ID":"ebace5c6-6ca4-48ff-9c50-c6b769d599b5","Type":"ContainerStarted","Data":"188e2accd3e336be738f08f6652e7a954e991dd72f6dcb0d0684392860279458"} Nov 28 09:58:04 crc kubenswrapper[4838]: I1128 09:58:04.907892 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:58:04 crc kubenswrapper[4838]: I1128 09:58:04.925132 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gmhsj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"41b01f7d-5c75-49de-86f7-87e04bf71194\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-gmhsj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:58:04 crc kubenswrapper[4838]: I1128 09:58:04.939962 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5c3daa53-8c4e-4e30-aeba-146602dd45cd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9dmrk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9dmrk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-5dxdd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:58:04 crc kubenswrapper[4838]: I1128 09:58:04.949169 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42be35de-5c72-4cc2-a5e4-fb7872425cdf\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5a39765f9493a3a9454db77d07855624ff5645e9dbd898e6dcb880d7a01a8c42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://426741a23e7b1b9fae6769b581c0d44694f957b0da985476923801395fad082f\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T09:57:46Z\\\",\\\"message\\\":\\\"+ timeout 3m /bin/bash -exuo pipefail -c 'while [ -n \\\\\\\"$(ss -Htanop \\\\\\\\( sport = 10357 \\\\\\\\))\\\\\\\" ]; do sleep 1; done'\\\\n++ ss -Htanop '(' sport = 10357 ')'\\\\n+ '[' -n '' ']'\\\\n+ exec cluster-policy-controller start --config=/etc/kubernetes/static-pod-resources/configmaps/cluster-policy-controller-config/config.yaml --kubeconfig=/etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig --namespace=openshift-kube-controller-manager -v=2\\\\nI1128 09:57:12.554377 1 leaderelection.go:121] The leader election gives 4 retries and allows for 30s of clock skew. The kube-apiserver downtime tolerance is 78s. Worst non-graceful lease acquisition is 2m43s. Worst graceful lease acquisition is {26s}.\\\\nI1128 09:57:12.555793 1 observer_polling.go:159] Starting file observer\\\\nI1128 09:57:12.567187 1 builder.go:298] cluster-policy-controller version 4.18.0-202501230001.p0.g5fd8525.assembly.stream.el9-5fd8525-5fd852525909ce6eab52972ba9ce8fcf56528eb9\\\\nI1128 09:57:12.568976 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.crt::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.key\\\\\\\"\\\\nI1128 09:57:45.000857 1 cmd.go:138] Received SIGTERM or SIGINT signal, shutting down controller.\\\\nF1128 09:57:46.203931 1 cmd.go:179] failed checking apiserver connectivity: client rate limiter Wait returned an error: context deadline exceeded\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:10Z\\\"}},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef54c8995d6bb8e077c6d1c3d796b6f2ce88370b6cadc4f040f590760103320b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fab4ce68cf732b3e6b32f68e84805013d646a9cbd6d5c55ea3d2f41a7f9db83d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2da93de71b5896e3c9ba192df2896b946b1376fefe2a87cf21adb87ea665be04\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:08Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:58:04 crc kubenswrapper[4838]: I1128 09:58:04.965547 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1630b1c6-63b5-4481-a711-0485765d37e3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3a04db28528da269759635186b06952f9a3dc4c2b130458354a5bf9ef994db8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://065f0c39a0f1fbdb83a6a758ddd20a4af3ddf96297ce35551b570f5e6c9deb89\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T09:57:35Z\\\",\\\"message\\\":\\\"rpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"192.168.126.11:2379\\\\\\\", ServerName: \\\\\\\"192.168.126.11:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp 192.168.126.11:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:30.193159 13 logging.go:55] [core] [Channel #1 SubChannel #5]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"192.168.126.11:2379\\\\\\\", ServerName: \\\\\\\"192.168.126.11:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp 192.168.126.11:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:30.410371 13 logging.go:55] [core] [Channel #7 SubChannel #8]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"192.168.126.11:2379\\\\\\\", ServerName: \\\\\\\"192.168.126.11:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp 192.168.126.11:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:30.766067 13 logging.go:55] [core] [Channel #7 SubChannel #9]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"localhost:2379\\\\\\\", ServerName: \\\\\\\"localhost:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp [::1]:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:30.792318 13 logging.go:55] [core] [Channel #2 SubChannel #4]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"localhost:2379\\\\\\\", ServerName: \\\\\\\"localhost:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp [::1]:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:34.548830 13 logging.go:55] [core] [Channel #1 SubChannel #6]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"localhost:2379\\\\\\\", ServerName: \\\\\\\"localhost:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: authentication handshake failed: context canceled\\\\\\\"\\\\nE1128 09:57:34.559534 13 run.go:72] \\\\\\\"command failed\\\\\\\" err=\\\\\\\"context deadline exceeded\\\\\\\"\\\\nI1128 09:57:34.572762 1 main.go:235] Termination finished with exit code 1\\\\nI1128 09:57:34.572819 1 main.go:188] Deleting termination lock file \\\\\\\"/var/log/kube-apiserver/.terminating\\\\\\\"\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25d620ea6d7c38547e89d61e7a60f227d28b21f18d563055db47256b266d5b6b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://837cc7d33d601516b4ea56a283f71167b41da7c769070c97ea77f29e97cf1555\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a821e5b105f62cf7f3b12714bfb0f58867a808d1c777f2fb711895c345d8ee9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ad3223a9346861cf1b27af8c95207349f10af6f416380747e32c4faf1d3add4\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T09:57:48Z\\\",\\\"message\\\":\\\"le observer\\\\nW1128 09:57:46.202169 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1128 09:57:46.202478 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 09:57:46.203984 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2688063589/tls.crt::/tmp/serving-cert-2688063589/tls.key\\\\\\\"\\\\nI1128 09:57:46.517349 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 09:57:46.756626 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 09:57:46.756681 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 09:57:46.756806 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 09:57:46.756825 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 09:57:46.768610 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1128 09:57:46.768654 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1128 09:57:46.768660 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 09:57:46.768697 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 09:57:46.768707 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 09:57:46.768714 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 09:57:46.768746 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 09:57:46.768752 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1128 09:57:46.772242 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:40Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c690f38f717fbfbd681f21f5dc845b94601530b4fc0860fdbeb1317042c793b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:18Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b4d4af6fc5dd97d5b6104ef79b62fd241db5659dfdfd496a38536453b207a4e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b4d4af6fc5dd97d5b6104ef79b62fd241db5659dfdfd496a38536453b207a4e5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:08Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:58:04 crc kubenswrapper[4838]: I1128 09:58:04.973952 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:58:04 crc kubenswrapper[4838]: I1128 09:58:04.987291 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:58:04 crc kubenswrapper[4838]: I1128 09:58:04.996099 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-tj8hl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cbb3c60a-bf9f-4a62-9310-30898e42be4f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e6fe3f1cbc53f02e2556c5fb44cffcf13330c3240a0ff10a8471080466416bd1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c2jkq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:54Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-tj8hl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:58:04 crc kubenswrapper[4838]: I1128 09:58:04.996949 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:04 crc kubenswrapper[4838]: I1128 09:58:04.997003 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:04 crc kubenswrapper[4838]: I1128 09:58:04.997022 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:04 crc kubenswrapper[4838]: I1128 09:58:04.997048 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:04 crc kubenswrapper[4838]: I1128 09:58:04.997065 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:04Z","lastTransitionTime":"2025-11-28T09:58:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:05 crc kubenswrapper[4838]: I1128 09:58:05.008078 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"faf44892-fdd2-4b45-8772-20049c555d3b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8f8f5484d008289a9e34ceaffd3cf2582565e7265003b0a6a913fe424760fc65\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://496ac21c6d7e650f191b3bc29ab676bab6ba40727c5ac4d517833ef9a115ae07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://698aacf6e94caf2da7095c89716d63d935ff60d95cb91c9a39dfe9282cbba005\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8352058616fa4ed90ca907e547bad2201d9aa696330f1eb8434c3c3d54d71d89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8352058616fa4ed90ca907e547bad2201d9aa696330f1eb8434c3c3d54d71d89\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:09Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:08Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:58:05 crc kubenswrapper[4838]: I1128 09:58:05.025196 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-58mh7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f556bd7-3b15-4d7d-b8e2-4815bb5c9c7d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-58mh7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:58:05 crc kubenswrapper[4838]: I1128 09:58:05.032827 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-sft2b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"200cdb91-cc86-40be-a5b6-30f7b9beba6d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lpt6k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-sft2b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:58:05 crc kubenswrapper[4838]: I1128 09:58:05.037486 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/network-metrics-daemon-p69l6"] Nov 28 09:58:05 crc kubenswrapper[4838]: I1128 09:58:05.038176 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-p69l6" Nov 28 09:58:05 crc kubenswrapper[4838]: E1128 09:58:05.038289 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-p69l6" podUID="2a223cc8-af33-4e83-8bfc-2676c5700447" Nov 28 09:58:05 crc kubenswrapper[4838]: I1128 09:58:05.045356 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:58:05 crc kubenswrapper[4838]: I1128 09:58:05.060542 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:58:05 crc kubenswrapper[4838]: I1128 09:58:05.071336 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4rv9b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"051f7e1c-2d47-4be9-bbd5-14feec16eb16\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-szqtp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4rv9b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:58:05 crc kubenswrapper[4838]: I1128 09:58:05.082870 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-tnclp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ebace5c6-6ca4-48ff-9c50-c6b769d599b5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://572204db3ac1da6b232430bf06fb87e94638e151ea0edc2f8b111deb7d82c58c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8pxnw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://efa96991cdc29f4f075bfd7751f620c7b091510ea68d577b243811ddd4140cec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8pxnw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:58:03Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-tnclp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:58:05 crc kubenswrapper[4838]: I1128 09:58:05.100284 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:05 crc kubenswrapper[4838]: I1128 09:58:05.100356 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:05 crc kubenswrapper[4838]: I1128 09:58:05.100379 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:05 crc kubenswrapper[4838]: I1128 09:58:05.100405 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:05 crc kubenswrapper[4838]: I1128 09:58:05.100427 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:05Z","lastTransitionTime":"2025-11-28T09:58:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:05 crc kubenswrapper[4838]: I1128 09:58:05.110685 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:58:05 crc kubenswrapper[4838]: I1128 09:58:05.126352 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:58:05 crc kubenswrapper[4838]: I1128 09:58:05.139086 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h25xx\" (UniqueName: \"kubernetes.io/projected/2a223cc8-af33-4e83-8bfc-2676c5700447-kube-api-access-h25xx\") pod \"network-metrics-daemon-p69l6\" (UID: \"2a223cc8-af33-4e83-8bfc-2676c5700447\") " pod="openshift-multus/network-metrics-daemon-p69l6" Nov 28 09:58:05 crc kubenswrapper[4838]: I1128 09:58:05.139218 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/2a223cc8-af33-4e83-8bfc-2676c5700447-metrics-certs\") pod \"network-metrics-daemon-p69l6\" (UID: \"2a223cc8-af33-4e83-8bfc-2676c5700447\") " pod="openshift-multus/network-metrics-daemon-p69l6" Nov 28 09:58:05 crc kubenswrapper[4838]: I1128 09:58:05.142705 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4rv9b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"051f7e1c-2d47-4be9-bbd5-14feec16eb16\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-szqtp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4rv9b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:58:05 crc kubenswrapper[4838]: I1128 09:58:05.156682 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-tnclp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ebace5c6-6ca4-48ff-9c50-c6b769d599b5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://572204db3ac1da6b232430bf06fb87e94638e151ea0edc2f8b111deb7d82c58c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8pxnw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://efa96991cdc29f4f075bfd7751f620c7b091510ea68d577b243811ddd4140cec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8pxnw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:58:03Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-tnclp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:58:05 crc kubenswrapper[4838]: I1128 09:58:05.171704 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42be35de-5c72-4cc2-a5e4-fb7872425cdf\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5a39765f9493a3a9454db77d07855624ff5645e9dbd898e6dcb880d7a01a8c42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://426741a23e7b1b9fae6769b581c0d44694f957b0da985476923801395fad082f\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T09:57:46Z\\\",\\\"message\\\":\\\"+ timeout 3m /bin/bash -exuo pipefail -c 'while [ -n \\\\\\\"$(ss -Htanop \\\\\\\\( sport = 10357 \\\\\\\\))\\\\\\\" ]; do sleep 1; done'\\\\n++ ss -Htanop '(' sport = 10357 ')'\\\\n+ '[' -n '' ']'\\\\n+ exec cluster-policy-controller start --config=/etc/kubernetes/static-pod-resources/configmaps/cluster-policy-controller-config/config.yaml --kubeconfig=/etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig --namespace=openshift-kube-controller-manager -v=2\\\\nI1128 09:57:12.554377 1 leaderelection.go:121] The leader election gives 4 retries and allows for 30s of clock skew. The kube-apiserver downtime tolerance is 78s. Worst non-graceful lease acquisition is 2m43s. Worst graceful lease acquisition is {26s}.\\\\nI1128 09:57:12.555793 1 observer_polling.go:159] Starting file observer\\\\nI1128 09:57:12.567187 1 builder.go:298] cluster-policy-controller version 4.18.0-202501230001.p0.g5fd8525.assembly.stream.el9-5fd8525-5fd852525909ce6eab52972ba9ce8fcf56528eb9\\\\nI1128 09:57:12.568976 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.crt::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.key\\\\\\\"\\\\nI1128 09:57:45.000857 1 cmd.go:138] Received SIGTERM or SIGINT signal, shutting down controller.\\\\nF1128 09:57:46.203931 1 cmd.go:179] failed checking apiserver connectivity: client rate limiter Wait returned an error: context deadline exceeded\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:10Z\\\"}},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef54c8995d6bb8e077c6d1c3d796b6f2ce88370b6cadc4f040f590760103320b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fab4ce68cf732b3e6b32f68e84805013d646a9cbd6d5c55ea3d2f41a7f9db83d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2da93de71b5896e3c9ba192df2896b946b1376fefe2a87cf21adb87ea665be04\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:08Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:58:05 crc kubenswrapper[4838]: I1128 09:58:05.189309 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1630b1c6-63b5-4481-a711-0485765d37e3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3a04db28528da269759635186b06952f9a3dc4c2b130458354a5bf9ef994db8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://065f0c39a0f1fbdb83a6a758ddd20a4af3ddf96297ce35551b570f5e6c9deb89\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T09:57:35Z\\\",\\\"message\\\":\\\"rpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"192.168.126.11:2379\\\\\\\", ServerName: \\\\\\\"192.168.126.11:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp 192.168.126.11:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:30.193159 13 logging.go:55] [core] [Channel #1 SubChannel #5]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"192.168.126.11:2379\\\\\\\", ServerName: \\\\\\\"192.168.126.11:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp 192.168.126.11:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:30.410371 13 logging.go:55] [core] [Channel #7 SubChannel #8]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"192.168.126.11:2379\\\\\\\", ServerName: \\\\\\\"192.168.126.11:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp 192.168.126.11:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:30.766067 13 logging.go:55] [core] [Channel #7 SubChannel #9]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"localhost:2379\\\\\\\", ServerName: \\\\\\\"localhost:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp [::1]:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:30.792318 13 logging.go:55] [core] [Channel #2 SubChannel #4]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"localhost:2379\\\\\\\", ServerName: \\\\\\\"localhost:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp [::1]:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:34.548830 13 logging.go:55] [core] [Channel #1 SubChannel #6]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"localhost:2379\\\\\\\", ServerName: \\\\\\\"localhost:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: authentication handshake failed: context canceled\\\\\\\"\\\\nE1128 09:57:34.559534 13 run.go:72] \\\\\\\"command failed\\\\\\\" err=\\\\\\\"context deadline exceeded\\\\\\\"\\\\nI1128 09:57:34.572762 1 main.go:235] Termination finished with exit code 1\\\\nI1128 09:57:34.572819 1 main.go:188] Deleting termination lock file \\\\\\\"/var/log/kube-apiserver/.terminating\\\\\\\"\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25d620ea6d7c38547e89d61e7a60f227d28b21f18d563055db47256b266d5b6b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://837cc7d33d601516b4ea56a283f71167b41da7c769070c97ea77f29e97cf1555\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a821e5b105f62cf7f3b12714bfb0f58867a808d1c777f2fb711895c345d8ee9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ad3223a9346861cf1b27af8c95207349f10af6f416380747e32c4faf1d3add4\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T09:57:48Z\\\",\\\"message\\\":\\\"le observer\\\\nW1128 09:57:46.202169 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1128 09:57:46.202478 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 09:57:46.203984 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2688063589/tls.crt::/tmp/serving-cert-2688063589/tls.key\\\\\\\"\\\\nI1128 09:57:46.517349 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 09:57:46.756626 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 09:57:46.756681 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 09:57:46.756806 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 09:57:46.756825 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 09:57:46.768610 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1128 09:57:46.768654 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1128 09:57:46.768660 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 09:57:46.768697 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 09:57:46.768707 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 09:57:46.768714 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 09:57:46.768746 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 09:57:46.768752 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1128 09:57:46.772242 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:40Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c690f38f717fbfbd681f21f5dc845b94601530b4fc0860fdbeb1317042c793b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:18Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b4d4af6fc5dd97d5b6104ef79b62fd241db5659dfdfd496a38536453b207a4e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b4d4af6fc5dd97d5b6104ef79b62fd241db5659dfdfd496a38536453b207a4e5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:08Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:58:05 crc kubenswrapper[4838]: I1128 09:58:05.203372 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:05 crc kubenswrapper[4838]: I1128 09:58:05.203450 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:05 crc kubenswrapper[4838]: I1128 09:58:05.203476 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:05 crc kubenswrapper[4838]: I1128 09:58:05.203507 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:05 crc kubenswrapper[4838]: I1128 09:58:05.203531 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:05Z","lastTransitionTime":"2025-11-28T09:58:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:05 crc kubenswrapper[4838]: I1128 09:58:05.203785 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:58:05 crc kubenswrapper[4838]: I1128 09:58:05.230006 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gmhsj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"41b01f7d-5c75-49de-86f7-87e04bf71194\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-gmhsj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:58:05 crc kubenswrapper[4838]: I1128 09:58:05.240518 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/2a223cc8-af33-4e83-8bfc-2676c5700447-metrics-certs\") pod \"network-metrics-daemon-p69l6\" (UID: \"2a223cc8-af33-4e83-8bfc-2676c5700447\") " pod="openshift-multus/network-metrics-daemon-p69l6" Nov 28 09:58:05 crc kubenswrapper[4838]: I1128 09:58:05.240651 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h25xx\" (UniqueName: \"kubernetes.io/projected/2a223cc8-af33-4e83-8bfc-2676c5700447-kube-api-access-h25xx\") pod \"network-metrics-daemon-p69l6\" (UID: \"2a223cc8-af33-4e83-8bfc-2676c5700447\") " pod="openshift-multus/network-metrics-daemon-p69l6" Nov 28 09:58:05 crc kubenswrapper[4838]: E1128 09:58:05.240751 4838 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 28 09:58:05 crc kubenswrapper[4838]: E1128 09:58:05.240815 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/2a223cc8-af33-4e83-8bfc-2676c5700447-metrics-certs podName:2a223cc8-af33-4e83-8bfc-2676c5700447 nodeName:}" failed. No retries permitted until 2025-11-28 09:58:05.74079987 +0000 UTC m=+57.439774040 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/2a223cc8-af33-4e83-8bfc-2676c5700447-metrics-certs") pod "network-metrics-daemon-p69l6" (UID: "2a223cc8-af33-4e83-8bfc-2676c5700447") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 28 09:58:05 crc kubenswrapper[4838]: I1128 09:58:05.243590 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5c3daa53-8c4e-4e30-aeba-146602dd45cd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9dmrk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9dmrk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-5dxdd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:58:05 crc kubenswrapper[4838]: I1128 09:58:05.256462 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"faf44892-fdd2-4b45-8772-20049c555d3b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8f8f5484d008289a9e34ceaffd3cf2582565e7265003b0a6a913fe424760fc65\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://496ac21c6d7e650f191b3bc29ab676bab6ba40727c5ac4d517833ef9a115ae07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://698aacf6e94caf2da7095c89716d63d935ff60d95cb91c9a39dfe9282cbba005\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8352058616fa4ed90ca907e547bad2201d9aa696330f1eb8434c3c3d54d71d89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8352058616fa4ed90ca907e547bad2201d9aa696330f1eb8434c3c3d54d71d89\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:09Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:08Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:58:05 crc kubenswrapper[4838]: I1128 09:58:05.266032 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h25xx\" (UniqueName: \"kubernetes.io/projected/2a223cc8-af33-4e83-8bfc-2676c5700447-kube-api-access-h25xx\") pod \"network-metrics-daemon-p69l6\" (UID: \"2a223cc8-af33-4e83-8bfc-2676c5700447\") " pod="openshift-multus/network-metrics-daemon-p69l6" Nov 28 09:58:05 crc kubenswrapper[4838]: I1128 09:58:05.271493 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:58:05 crc kubenswrapper[4838]: I1128 09:58:05.285200 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:58:05 crc kubenswrapper[4838]: I1128 09:58:05.294149 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-tj8hl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cbb3c60a-bf9f-4a62-9310-30898e42be4f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e6fe3f1cbc53f02e2556c5fb44cffcf13330c3240a0ff10a8471080466416bd1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c2jkq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:54Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-tj8hl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:58:05 crc kubenswrapper[4838]: I1128 09:58:05.305656 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:58:05 crc kubenswrapper[4838]: I1128 09:58:05.308629 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:05 crc kubenswrapper[4838]: I1128 09:58:05.308682 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:05 crc kubenswrapper[4838]: I1128 09:58:05.308695 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:05 crc kubenswrapper[4838]: I1128 09:58:05.308730 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:05 crc kubenswrapper[4838]: I1128 09:58:05.308748 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:05Z","lastTransitionTime":"2025-11-28T09:58:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:05 crc kubenswrapper[4838]: I1128 09:58:05.319347 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:58:05 crc kubenswrapper[4838]: I1128 09:58:05.331228 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-58mh7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f556bd7-3b15-4d7d-b8e2-4815bb5c9c7d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-58mh7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:58:05 crc kubenswrapper[4838]: I1128 09:58:05.341461 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-sft2b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"200cdb91-cc86-40be-a5b6-30f7b9beba6d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lpt6k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-sft2b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:58:05 crc kubenswrapper[4838]: I1128 09:58:05.350041 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-p69l6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2a223cc8-af33-4e83-8bfc-2676c5700447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h25xx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h25xx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:58:05Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-p69l6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:58:05 crc kubenswrapper[4838]: I1128 09:58:05.411927 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:05 crc kubenswrapper[4838]: I1128 09:58:05.412008 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:05 crc kubenswrapper[4838]: I1128 09:58:05.412025 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:05 crc kubenswrapper[4838]: I1128 09:58:05.412053 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:05 crc kubenswrapper[4838]: I1128 09:58:05.412071 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:05Z","lastTransitionTime":"2025-11-28T09:58:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:05 crc kubenswrapper[4838]: I1128 09:58:05.514296 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:05 crc kubenswrapper[4838]: I1128 09:58:05.514400 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:05 crc kubenswrapper[4838]: I1128 09:58:05.514420 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:05 crc kubenswrapper[4838]: I1128 09:58:05.514446 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:05 crc kubenswrapper[4838]: I1128 09:58:05.514462 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:05Z","lastTransitionTime":"2025-11-28T09:58:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:05 crc kubenswrapper[4838]: I1128 09:58:05.561437 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 09:58:05 crc kubenswrapper[4838]: I1128 09:58:05.561454 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 09:58:05 crc kubenswrapper[4838]: E1128 09:58:05.561710 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 09:58:05 crc kubenswrapper[4838]: I1128 09:58:05.561816 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 09:58:05 crc kubenswrapper[4838]: E1128 09:58:05.562430 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 09:58:05 crc kubenswrapper[4838]: E1128 09:58:05.562494 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 09:58:05 crc kubenswrapper[4838]: I1128 09:58:05.617272 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:05 crc kubenswrapper[4838]: I1128 09:58:05.617315 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:05 crc kubenswrapper[4838]: I1128 09:58:05.617327 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:05 crc kubenswrapper[4838]: I1128 09:58:05.617344 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:05 crc kubenswrapper[4838]: I1128 09:58:05.617356 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:05Z","lastTransitionTime":"2025-11-28T09:58:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:05 crc kubenswrapper[4838]: I1128 09:58:05.719869 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:05 crc kubenswrapper[4838]: I1128 09:58:05.719896 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:05 crc kubenswrapper[4838]: I1128 09:58:05.719903 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:05 crc kubenswrapper[4838]: I1128 09:58:05.719916 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:05 crc kubenswrapper[4838]: I1128 09:58:05.719924 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:05Z","lastTransitionTime":"2025-11-28T09:58:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:05 crc kubenswrapper[4838]: I1128 09:58:05.745299 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/2a223cc8-af33-4e83-8bfc-2676c5700447-metrics-certs\") pod \"network-metrics-daemon-p69l6\" (UID: \"2a223cc8-af33-4e83-8bfc-2676c5700447\") " pod="openshift-multus/network-metrics-daemon-p69l6" Nov 28 09:58:05 crc kubenswrapper[4838]: E1128 09:58:05.745415 4838 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 28 09:58:05 crc kubenswrapper[4838]: E1128 09:58:05.745578 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/2a223cc8-af33-4e83-8bfc-2676c5700447-metrics-certs podName:2a223cc8-af33-4e83-8bfc-2676c5700447 nodeName:}" failed. No retries permitted until 2025-11-28 09:58:06.745541909 +0000 UTC m=+58.444516119 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/2a223cc8-af33-4e83-8bfc-2676c5700447-metrics-certs") pod "network-metrics-daemon-p69l6" (UID: "2a223cc8-af33-4e83-8bfc-2676c5700447") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 28 09:58:05 crc kubenswrapper[4838]: I1128 09:58:05.825362 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:05 crc kubenswrapper[4838]: I1128 09:58:05.825417 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:05 crc kubenswrapper[4838]: I1128 09:58:05.825436 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:05 crc kubenswrapper[4838]: I1128 09:58:05.825462 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:05 crc kubenswrapper[4838]: I1128 09:58:05.825483 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:05Z","lastTransitionTime":"2025-11-28T09:58:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:05 crc kubenswrapper[4838]: I1128 09:58:05.901976 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"3968fb11df6b2265691177838400dcb08e03d330c166dd880b3acfac7ec7938f"} Nov 28 09:58:05 crc kubenswrapper[4838]: I1128 09:58:05.902036 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"e91940e5398321649eac2960a545bb4bbe047113c81f152aa55894cabee55006"} Nov 28 09:58:05 crc kubenswrapper[4838]: I1128 09:58:05.903359 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-4rv9b" event={"ID":"051f7e1c-2d47-4be9-bbd5-14feec16eb16","Type":"ContainerStarted","Data":"f641ae0677a50fbc7b20fac7bb5c567f031af1b6a06fbe9e965091efa4ee4ec3"} Nov 28 09:58:05 crc kubenswrapper[4838]: I1128 09:58:05.928935 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:05 crc kubenswrapper[4838]: I1128 09:58:05.928993 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:05 crc kubenswrapper[4838]: I1128 09:58:05.929009 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:05 crc kubenswrapper[4838]: I1128 09:58:05.929034 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:05 crc kubenswrapper[4838]: I1128 09:58:05.929050 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:05Z","lastTransitionTime":"2025-11-28T09:58:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:05 crc kubenswrapper[4838]: I1128 09:58:05.942826 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 09:58:05 crc kubenswrapper[4838]: I1128 09:58:05.971790 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3968fb11df6b2265691177838400dcb08e03d330c166dd880b3acfac7ec7938f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e91940e5398321649eac2960a545bb4bbe047113c81f152aa55894cabee55006\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:05Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:05 crc kubenswrapper[4838]: I1128 09:58:05.988397 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-58mh7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f556bd7-3b15-4d7d-b8e2-4815bb5c9c7d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-58mh7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:05Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:05 crc kubenswrapper[4838]: I1128 09:58:05.999313 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-sft2b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"200cdb91-cc86-40be-a5b6-30f7b9beba6d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lpt6k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-sft2b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:05Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:06 crc kubenswrapper[4838]: I1128 09:58:06.012269 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-p69l6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2a223cc8-af33-4e83-8bfc-2676c5700447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h25xx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h25xx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:58:05Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-p69l6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:06Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:06 crc kubenswrapper[4838]: I1128 09:58:06.024657 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:06Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:06 crc kubenswrapper[4838]: I1128 09:58:06.032221 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:06 crc kubenswrapper[4838]: I1128 09:58:06.032317 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:06 crc kubenswrapper[4838]: I1128 09:58:06.032334 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:06 crc kubenswrapper[4838]: I1128 09:58:06.032644 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:06 crc kubenswrapper[4838]: I1128 09:58:06.032743 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:06Z","lastTransitionTime":"2025-11-28T09:58:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:06 crc kubenswrapper[4838]: I1128 09:58:06.036009 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4rv9b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"051f7e1c-2d47-4be9-bbd5-14feec16eb16\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-szqtp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4rv9b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:06Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:06 crc kubenswrapper[4838]: I1128 09:58:06.046491 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-tnclp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ebace5c6-6ca4-48ff-9c50-c6b769d599b5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://572204db3ac1da6b232430bf06fb87e94638e151ea0edc2f8b111deb7d82c58c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8pxnw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://efa96991cdc29f4f075bfd7751f620c7b091510ea68d577b243811ddd4140cec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8pxnw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:58:03Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-tnclp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:06Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:06 crc kubenswrapper[4838]: I1128 09:58:06.059688 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42be35de-5c72-4cc2-a5e4-fb7872425cdf\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5a39765f9493a3a9454db77d07855624ff5645e9dbd898e6dcb880d7a01a8c42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://426741a23e7b1b9fae6769b581c0d44694f957b0da985476923801395fad082f\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T09:57:46Z\\\",\\\"message\\\":\\\"+ timeout 3m /bin/bash -exuo pipefail -c 'while [ -n \\\\\\\"$(ss -Htanop \\\\\\\\( sport = 10357 \\\\\\\\))\\\\\\\" ]; do sleep 1; done'\\\\n++ ss -Htanop '(' sport = 10357 ')'\\\\n+ '[' -n '' ']'\\\\n+ exec cluster-policy-controller start --config=/etc/kubernetes/static-pod-resources/configmaps/cluster-policy-controller-config/config.yaml --kubeconfig=/etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig --namespace=openshift-kube-controller-manager -v=2\\\\nI1128 09:57:12.554377 1 leaderelection.go:121] The leader election gives 4 retries and allows for 30s of clock skew. The kube-apiserver downtime tolerance is 78s. Worst non-graceful lease acquisition is 2m43s. Worst graceful lease acquisition is {26s}.\\\\nI1128 09:57:12.555793 1 observer_polling.go:159] Starting file observer\\\\nI1128 09:57:12.567187 1 builder.go:298] cluster-policy-controller version 4.18.0-202501230001.p0.g5fd8525.assembly.stream.el9-5fd8525-5fd852525909ce6eab52972ba9ce8fcf56528eb9\\\\nI1128 09:57:12.568976 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.crt::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.key\\\\\\\"\\\\nI1128 09:57:45.000857 1 cmd.go:138] Received SIGTERM or SIGINT signal, shutting down controller.\\\\nF1128 09:57:46.203931 1 cmd.go:179] failed checking apiserver connectivity: client rate limiter Wait returned an error: context deadline exceeded\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:10Z\\\"}},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef54c8995d6bb8e077c6d1c3d796b6f2ce88370b6cadc4f040f590760103320b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fab4ce68cf732b3e6b32f68e84805013d646a9cbd6d5c55ea3d2f41a7f9db83d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2da93de71b5896e3c9ba192df2896b946b1376fefe2a87cf21adb87ea665be04\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:08Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:06Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:06 crc kubenswrapper[4838]: I1128 09:58:06.076654 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1630b1c6-63b5-4481-a711-0485765d37e3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3a04db28528da269759635186b06952f9a3dc4c2b130458354a5bf9ef994db8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://065f0c39a0f1fbdb83a6a758ddd20a4af3ddf96297ce35551b570f5e6c9deb89\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T09:57:35Z\\\",\\\"message\\\":\\\"rpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"192.168.126.11:2379\\\\\\\", ServerName: \\\\\\\"192.168.126.11:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp 192.168.126.11:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:30.193159 13 logging.go:55] [core] [Channel #1 SubChannel #5]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"192.168.126.11:2379\\\\\\\", ServerName: \\\\\\\"192.168.126.11:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp 192.168.126.11:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:30.410371 13 logging.go:55] [core] [Channel #7 SubChannel #8]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"192.168.126.11:2379\\\\\\\", ServerName: \\\\\\\"192.168.126.11:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp 192.168.126.11:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:30.766067 13 logging.go:55] [core] [Channel #7 SubChannel #9]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"localhost:2379\\\\\\\", ServerName: \\\\\\\"localhost:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp [::1]:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:30.792318 13 logging.go:55] [core] [Channel #2 SubChannel #4]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"localhost:2379\\\\\\\", ServerName: \\\\\\\"localhost:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp [::1]:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:34.548830 13 logging.go:55] [core] [Channel #1 SubChannel #6]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"localhost:2379\\\\\\\", ServerName: \\\\\\\"localhost:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: authentication handshake failed: context canceled\\\\\\\"\\\\nE1128 09:57:34.559534 13 run.go:72] \\\\\\\"command failed\\\\\\\" err=\\\\\\\"context deadline exceeded\\\\\\\"\\\\nI1128 09:57:34.572762 1 main.go:235] Termination finished with exit code 1\\\\nI1128 09:57:34.572819 1 main.go:188] Deleting termination lock file \\\\\\\"/var/log/kube-apiserver/.terminating\\\\\\\"\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25d620ea6d7c38547e89d61e7a60f227d28b21f18d563055db47256b266d5b6b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://837cc7d33d601516b4ea56a283f71167b41da7c769070c97ea77f29e97cf1555\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a821e5b105f62cf7f3b12714bfb0f58867a808d1c777f2fb711895c345d8ee9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ad3223a9346861cf1b27af8c95207349f10af6f416380747e32c4faf1d3add4\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T09:57:48Z\\\",\\\"message\\\":\\\"le observer\\\\nW1128 09:57:46.202169 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1128 09:57:46.202478 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 09:57:46.203984 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2688063589/tls.crt::/tmp/serving-cert-2688063589/tls.key\\\\\\\"\\\\nI1128 09:57:46.517349 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 09:57:46.756626 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 09:57:46.756681 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 09:57:46.756806 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 09:57:46.756825 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 09:57:46.768610 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1128 09:57:46.768654 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1128 09:57:46.768660 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 09:57:46.768697 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 09:57:46.768707 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 09:57:46.768714 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 09:57:46.768746 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 09:57:46.768752 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1128 09:57:46.772242 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:40Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c690f38f717fbfbd681f21f5dc845b94601530b4fc0860fdbeb1317042c793b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:18Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b4d4af6fc5dd97d5b6104ef79b62fd241db5659dfdfd496a38536453b207a4e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b4d4af6fc5dd97d5b6104ef79b62fd241db5659dfdfd496a38536453b207a4e5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:08Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:06Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:06 crc kubenswrapper[4838]: I1128 09:58:06.086988 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:06Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:06 crc kubenswrapper[4838]: I1128 09:58:06.103356 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gmhsj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"41b01f7d-5c75-49de-86f7-87e04bf71194\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-gmhsj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:06Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:06 crc kubenswrapper[4838]: I1128 09:58:06.114265 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5c3daa53-8c4e-4e30-aeba-146602dd45cd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9dmrk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9dmrk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-5dxdd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:06Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:06 crc kubenswrapper[4838]: I1128 09:58:06.124182 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"faf44892-fdd2-4b45-8772-20049c555d3b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8f8f5484d008289a9e34ceaffd3cf2582565e7265003b0a6a913fe424760fc65\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://496ac21c6d7e650f191b3bc29ab676bab6ba40727c5ac4d517833ef9a115ae07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://698aacf6e94caf2da7095c89716d63d935ff60d95cb91c9a39dfe9282cbba005\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8352058616fa4ed90ca907e547bad2201d9aa696330f1eb8434c3c3d54d71d89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8352058616fa4ed90ca907e547bad2201d9aa696330f1eb8434c3c3d54d71d89\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:09Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:08Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:06Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:06 crc kubenswrapper[4838]: I1128 09:58:06.135093 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:06 crc kubenswrapper[4838]: I1128 09:58:06.135125 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:06 crc kubenswrapper[4838]: I1128 09:58:06.135137 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:06 crc kubenswrapper[4838]: I1128 09:58:06.135153 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:06 crc kubenswrapper[4838]: I1128 09:58:06.135162 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:06Z","lastTransitionTime":"2025-11-28T09:58:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:06 crc kubenswrapper[4838]: I1128 09:58:06.136381 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:06Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:06 crc kubenswrapper[4838]: I1128 09:58:06.146782 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:06Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:06 crc kubenswrapper[4838]: I1128 09:58:06.156822 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-tj8hl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cbb3c60a-bf9f-4a62-9310-30898e42be4f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e6fe3f1cbc53f02e2556c5fb44cffcf13330c3240a0ff10a8471080466416bd1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c2jkq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:54Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-tj8hl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:06Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:06 crc kubenswrapper[4838]: I1128 09:58:06.166594 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:06Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:06 crc kubenswrapper[4838]: I1128 09:58:06.177634 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4rv9b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"051f7e1c-2d47-4be9-bbd5-14feec16eb16\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f641ae0677a50fbc7b20fac7bb5c567f031af1b6a06fbe9e965091efa4ee4ec3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-szqtp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4rv9b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:06Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:06 crc kubenswrapper[4838]: I1128 09:58:06.188007 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-tnclp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ebace5c6-6ca4-48ff-9c50-c6b769d599b5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://572204db3ac1da6b232430bf06fb87e94638e151ea0edc2f8b111deb7d82c58c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8pxnw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://efa96991cdc29f4f075bfd7751f620c7b091510ea68d577b243811ddd4140cec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8pxnw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:58:03Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-tnclp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:06Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:06 crc kubenswrapper[4838]: I1128 09:58:06.200194 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42be35de-5c72-4cc2-a5e4-fb7872425cdf\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5a39765f9493a3a9454db77d07855624ff5645e9dbd898e6dcb880d7a01a8c42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://426741a23e7b1b9fae6769b581c0d44694f957b0da985476923801395fad082f\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T09:57:46Z\\\",\\\"message\\\":\\\"+ timeout 3m /bin/bash -exuo pipefail -c 'while [ -n \\\\\\\"$(ss -Htanop \\\\\\\\( sport = 10357 \\\\\\\\))\\\\\\\" ]; do sleep 1; done'\\\\n++ ss -Htanop '(' sport = 10357 ')'\\\\n+ '[' -n '' ']'\\\\n+ exec cluster-policy-controller start --config=/etc/kubernetes/static-pod-resources/configmaps/cluster-policy-controller-config/config.yaml --kubeconfig=/etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig --namespace=openshift-kube-controller-manager -v=2\\\\nI1128 09:57:12.554377 1 leaderelection.go:121] The leader election gives 4 retries and allows for 30s of clock skew. The kube-apiserver downtime tolerance is 78s. Worst non-graceful lease acquisition is 2m43s. Worst graceful lease acquisition is {26s}.\\\\nI1128 09:57:12.555793 1 observer_polling.go:159] Starting file observer\\\\nI1128 09:57:12.567187 1 builder.go:298] cluster-policy-controller version 4.18.0-202501230001.p0.g5fd8525.assembly.stream.el9-5fd8525-5fd852525909ce6eab52972ba9ce8fcf56528eb9\\\\nI1128 09:57:12.568976 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.crt::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.key\\\\\\\"\\\\nI1128 09:57:45.000857 1 cmd.go:138] Received SIGTERM or SIGINT signal, shutting down controller.\\\\nF1128 09:57:46.203931 1 cmd.go:179] failed checking apiserver connectivity: client rate limiter Wait returned an error: context deadline exceeded\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:10Z\\\"}},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef54c8995d6bb8e077c6d1c3d796b6f2ce88370b6cadc4f040f590760103320b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fab4ce68cf732b3e6b32f68e84805013d646a9cbd6d5c55ea3d2f41a7f9db83d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2da93de71b5896e3c9ba192df2896b946b1376fefe2a87cf21adb87ea665be04\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:08Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:06Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:06 crc kubenswrapper[4838]: I1128 09:58:06.212399 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1630b1c6-63b5-4481-a711-0485765d37e3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3a04db28528da269759635186b06952f9a3dc4c2b130458354a5bf9ef994db8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://065f0c39a0f1fbdb83a6a758ddd20a4af3ddf96297ce35551b570f5e6c9deb89\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T09:57:35Z\\\",\\\"message\\\":\\\"rpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"192.168.126.11:2379\\\\\\\", ServerName: \\\\\\\"192.168.126.11:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp 192.168.126.11:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:30.193159 13 logging.go:55] [core] [Channel #1 SubChannel #5]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"192.168.126.11:2379\\\\\\\", ServerName: \\\\\\\"192.168.126.11:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp 192.168.126.11:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:30.410371 13 logging.go:55] [core] [Channel #7 SubChannel #8]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"192.168.126.11:2379\\\\\\\", ServerName: \\\\\\\"192.168.126.11:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp 192.168.126.11:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:30.766067 13 logging.go:55] [core] [Channel #7 SubChannel #9]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"localhost:2379\\\\\\\", ServerName: \\\\\\\"localhost:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp [::1]:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:30.792318 13 logging.go:55] [core] [Channel #2 SubChannel #4]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"localhost:2379\\\\\\\", ServerName: \\\\\\\"localhost:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp [::1]:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:34.548830 13 logging.go:55] [core] [Channel #1 SubChannel #6]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"localhost:2379\\\\\\\", ServerName: \\\\\\\"localhost:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: authentication handshake failed: context canceled\\\\\\\"\\\\nE1128 09:57:34.559534 13 run.go:72] \\\\\\\"command failed\\\\\\\" err=\\\\\\\"context deadline exceeded\\\\\\\"\\\\nI1128 09:57:34.572762 1 main.go:235] Termination finished with exit code 1\\\\nI1128 09:57:34.572819 1 main.go:188] Deleting termination lock file \\\\\\\"/var/log/kube-apiserver/.terminating\\\\\\\"\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25d620ea6d7c38547e89d61e7a60f227d28b21f18d563055db47256b266d5b6b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://837cc7d33d601516b4ea56a283f71167b41da7c769070c97ea77f29e97cf1555\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a821e5b105f62cf7f3b12714bfb0f58867a808d1c777f2fb711895c345d8ee9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ad3223a9346861cf1b27af8c95207349f10af6f416380747e32c4faf1d3add4\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T09:57:48Z\\\",\\\"message\\\":\\\"le observer\\\\nW1128 09:57:46.202169 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1128 09:57:46.202478 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 09:57:46.203984 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2688063589/tls.crt::/tmp/serving-cert-2688063589/tls.key\\\\\\\"\\\\nI1128 09:57:46.517349 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 09:57:46.756626 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 09:57:46.756681 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 09:57:46.756806 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 09:57:46.756825 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 09:57:46.768610 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1128 09:57:46.768654 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1128 09:57:46.768660 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 09:57:46.768697 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 09:57:46.768707 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 09:57:46.768714 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 09:57:46.768746 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 09:57:46.768752 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1128 09:57:46.772242 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:40Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c690f38f717fbfbd681f21f5dc845b94601530b4fc0860fdbeb1317042c793b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:18Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b4d4af6fc5dd97d5b6104ef79b62fd241db5659dfdfd496a38536453b207a4e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b4d4af6fc5dd97d5b6104ef79b62fd241db5659dfdfd496a38536453b207a4e5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:08Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:06Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:06 crc kubenswrapper[4838]: I1128 09:58:06.223893 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:06Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:06 crc kubenswrapper[4838]: I1128 09:58:06.237346 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:06 crc kubenswrapper[4838]: I1128 09:58:06.237387 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:06 crc kubenswrapper[4838]: I1128 09:58:06.237400 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:06 crc kubenswrapper[4838]: I1128 09:58:06.237420 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:06 crc kubenswrapper[4838]: I1128 09:58:06.237434 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:06Z","lastTransitionTime":"2025-11-28T09:58:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:06 crc kubenswrapper[4838]: I1128 09:58:06.242734 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gmhsj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"41b01f7d-5c75-49de-86f7-87e04bf71194\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-gmhsj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:06Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:06 crc kubenswrapper[4838]: I1128 09:58:06.252976 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5c3daa53-8c4e-4e30-aeba-146602dd45cd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9dmrk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9dmrk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-5dxdd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:06Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:06 crc kubenswrapper[4838]: I1128 09:58:06.264015 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"faf44892-fdd2-4b45-8772-20049c555d3b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8f8f5484d008289a9e34ceaffd3cf2582565e7265003b0a6a913fe424760fc65\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://496ac21c6d7e650f191b3bc29ab676bab6ba40727c5ac4d517833ef9a115ae07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://698aacf6e94caf2da7095c89716d63d935ff60d95cb91c9a39dfe9282cbba005\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8352058616fa4ed90ca907e547bad2201d9aa696330f1eb8434c3c3d54d71d89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8352058616fa4ed90ca907e547bad2201d9aa696330f1eb8434c3c3d54d71d89\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:09Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:08Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:06Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:06 crc kubenswrapper[4838]: I1128 09:58:06.275428 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:06Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:06 crc kubenswrapper[4838]: I1128 09:58:06.286485 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:06Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:06 crc kubenswrapper[4838]: I1128 09:58:06.294598 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-tj8hl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cbb3c60a-bf9f-4a62-9310-30898e42be4f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e6fe3f1cbc53f02e2556c5fb44cffcf13330c3240a0ff10a8471080466416bd1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c2jkq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:54Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-tj8hl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:06Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:06 crc kubenswrapper[4838]: I1128 09:58:06.306656 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:06Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:06 crc kubenswrapper[4838]: I1128 09:58:06.316894 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3968fb11df6b2265691177838400dcb08e03d330c166dd880b3acfac7ec7938f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e91940e5398321649eac2960a545bb4bbe047113c81f152aa55894cabee55006\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:06Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:06 crc kubenswrapper[4838]: I1128 09:58:06.330307 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-58mh7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f556bd7-3b15-4d7d-b8e2-4815bb5c9c7d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-58mh7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:06Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:06 crc kubenswrapper[4838]: I1128 09:58:06.338964 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-sft2b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"200cdb91-cc86-40be-a5b6-30f7b9beba6d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lpt6k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-sft2b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:06Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:06 crc kubenswrapper[4838]: I1128 09:58:06.339945 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:06 crc kubenswrapper[4838]: I1128 09:58:06.339987 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:06 crc kubenswrapper[4838]: I1128 09:58:06.339999 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:06 crc kubenswrapper[4838]: I1128 09:58:06.340014 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:06 crc kubenswrapper[4838]: I1128 09:58:06.340024 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:06Z","lastTransitionTime":"2025-11-28T09:58:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:06 crc kubenswrapper[4838]: I1128 09:58:06.352859 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-p69l6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2a223cc8-af33-4e83-8bfc-2676c5700447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h25xx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h25xx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:58:05Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-p69l6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:06Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:06 crc kubenswrapper[4838]: I1128 09:58:06.442779 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:06 crc kubenswrapper[4838]: I1128 09:58:06.442826 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:06 crc kubenswrapper[4838]: I1128 09:58:06.442840 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:06 crc kubenswrapper[4838]: I1128 09:58:06.442859 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:06 crc kubenswrapper[4838]: I1128 09:58:06.442871 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:06Z","lastTransitionTime":"2025-11-28T09:58:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:06 crc kubenswrapper[4838]: I1128 09:58:06.545076 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:06 crc kubenswrapper[4838]: I1128 09:58:06.545150 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:06 crc kubenswrapper[4838]: I1128 09:58:06.545168 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:06 crc kubenswrapper[4838]: I1128 09:58:06.545198 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:06 crc kubenswrapper[4838]: I1128 09:58:06.545216 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:06Z","lastTransitionTime":"2025-11-28T09:58:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:06 crc kubenswrapper[4838]: I1128 09:58:06.562261 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-p69l6" Nov 28 09:58:06 crc kubenswrapper[4838]: E1128 09:58:06.562935 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-p69l6" podUID="2a223cc8-af33-4e83-8bfc-2676c5700447" Nov 28 09:58:06 crc kubenswrapper[4838]: I1128 09:58:06.648187 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:06 crc kubenswrapper[4838]: I1128 09:58:06.648565 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:06 crc kubenswrapper[4838]: I1128 09:58:06.648576 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:06 crc kubenswrapper[4838]: I1128 09:58:06.648592 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:06 crc kubenswrapper[4838]: I1128 09:58:06.648604 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:06Z","lastTransitionTime":"2025-11-28T09:58:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:06 crc kubenswrapper[4838]: I1128 09:58:06.755391 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/2a223cc8-af33-4e83-8bfc-2676c5700447-metrics-certs\") pod \"network-metrics-daemon-p69l6\" (UID: \"2a223cc8-af33-4e83-8bfc-2676c5700447\") " pod="openshift-multus/network-metrics-daemon-p69l6" Nov 28 09:58:06 crc kubenswrapper[4838]: E1128 09:58:06.755551 4838 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 28 09:58:06 crc kubenswrapper[4838]: E1128 09:58:06.755605 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/2a223cc8-af33-4e83-8bfc-2676c5700447-metrics-certs podName:2a223cc8-af33-4e83-8bfc-2676c5700447 nodeName:}" failed. No retries permitted until 2025-11-28 09:58:08.755589751 +0000 UTC m=+60.454563921 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/2a223cc8-af33-4e83-8bfc-2676c5700447-metrics-certs") pod "network-metrics-daemon-p69l6" (UID: "2a223cc8-af33-4e83-8bfc-2676c5700447") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 28 09:58:06 crc kubenswrapper[4838]: I1128 09:58:06.760307 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:06 crc kubenswrapper[4838]: I1128 09:58:06.760330 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:06 crc kubenswrapper[4838]: I1128 09:58:06.760338 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:06 crc kubenswrapper[4838]: I1128 09:58:06.760390 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:06 crc kubenswrapper[4838]: I1128 09:58:06.760424 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:06Z","lastTransitionTime":"2025-11-28T09:58:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:06 crc kubenswrapper[4838]: I1128 09:58:06.862537 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:06 crc kubenswrapper[4838]: I1128 09:58:06.862566 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:06 crc kubenswrapper[4838]: I1128 09:58:06.862575 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:06 crc kubenswrapper[4838]: I1128 09:58:06.862589 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:06 crc kubenswrapper[4838]: I1128 09:58:06.862598 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:06Z","lastTransitionTime":"2025-11-28T09:58:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:06 crc kubenswrapper[4838]: I1128 09:58:06.909068 4838 generic.go:334] "Generic (PLEG): container finished" podID="3f556bd7-3b15-4d7d-b8e2-4815bb5c9c7d" containerID="f0a0fbfb7a81acf63b4deabef68d55dd843092bce1c00c27c127955417bede44" exitCode=0 Nov 28 09:58:06 crc kubenswrapper[4838]: I1128 09:58:06.909149 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-58mh7" event={"ID":"3f556bd7-3b15-4d7d-b8e2-4815bb5c9c7d","Type":"ContainerDied","Data":"f0a0fbfb7a81acf63b4deabef68d55dd843092bce1c00c27c127955417bede44"} Nov 28 09:58:06 crc kubenswrapper[4838]: I1128 09:58:06.911261 4838 generic.go:334] "Generic (PLEG): container finished" podID="41b01f7d-5c75-49de-86f7-87e04bf71194" containerID="9efead1ac3ae101f3a81e0c5568645b6ac107dc126ff57bc55745b86ffacb730" exitCode=0 Nov 28 09:58:06 crc kubenswrapper[4838]: I1128 09:58:06.911345 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gmhsj" event={"ID":"41b01f7d-5c75-49de-86f7-87e04bf71194","Type":"ContainerDied","Data":"9efead1ac3ae101f3a81e0c5568645b6ac107dc126ff57bc55745b86ffacb730"} Nov 28 09:58:06 crc kubenswrapper[4838]: I1128 09:58:06.913244 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"05cb23094534a9fdebbe33d8d34a79412ee49437c25e185c6dfa99384cbf9629"} Nov 28 09:58:06 crc kubenswrapper[4838]: I1128 09:58:06.930480 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:06Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:06 crc kubenswrapper[4838]: I1128 09:58:06.949812 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4rv9b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"051f7e1c-2d47-4be9-bbd5-14feec16eb16\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f641ae0677a50fbc7b20fac7bb5c567f031af1b6a06fbe9e965091efa4ee4ec3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-szqtp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4rv9b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:06Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:06 crc kubenswrapper[4838]: I1128 09:58:06.965766 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:06 crc kubenswrapper[4838]: I1128 09:58:06.965823 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:06 crc kubenswrapper[4838]: I1128 09:58:06.965841 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:06 crc kubenswrapper[4838]: I1128 09:58:06.965866 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:06 crc kubenswrapper[4838]: I1128 09:58:06.965883 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:06Z","lastTransitionTime":"2025-11-28T09:58:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:06 crc kubenswrapper[4838]: I1128 09:58:06.967684 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-tnclp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ebace5c6-6ca4-48ff-9c50-c6b769d599b5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://572204db3ac1da6b232430bf06fb87e94638e151ea0edc2f8b111deb7d82c58c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8pxnw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://efa96991cdc29f4f075bfd7751f620c7b091510ea68d577b243811ddd4140cec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8pxnw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:58:03Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-tnclp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:06Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:06 crc kubenswrapper[4838]: I1128 09:58:06.984492 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42be35de-5c72-4cc2-a5e4-fb7872425cdf\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5a39765f9493a3a9454db77d07855624ff5645e9dbd898e6dcb880d7a01a8c42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://426741a23e7b1b9fae6769b581c0d44694f957b0da985476923801395fad082f\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T09:57:46Z\\\",\\\"message\\\":\\\"+ timeout 3m /bin/bash -exuo pipefail -c 'while [ -n \\\\\\\"$(ss -Htanop \\\\\\\\( sport = 10357 \\\\\\\\))\\\\\\\" ]; do sleep 1; done'\\\\n++ ss -Htanop '(' sport = 10357 ')'\\\\n+ '[' -n '' ']'\\\\n+ exec cluster-policy-controller start --config=/etc/kubernetes/static-pod-resources/configmaps/cluster-policy-controller-config/config.yaml --kubeconfig=/etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig --namespace=openshift-kube-controller-manager -v=2\\\\nI1128 09:57:12.554377 1 leaderelection.go:121] The leader election gives 4 retries and allows for 30s of clock skew. The kube-apiserver downtime tolerance is 78s. Worst non-graceful lease acquisition is 2m43s. Worst graceful lease acquisition is {26s}.\\\\nI1128 09:57:12.555793 1 observer_polling.go:159] Starting file observer\\\\nI1128 09:57:12.567187 1 builder.go:298] cluster-policy-controller version 4.18.0-202501230001.p0.g5fd8525.assembly.stream.el9-5fd8525-5fd852525909ce6eab52972ba9ce8fcf56528eb9\\\\nI1128 09:57:12.568976 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.crt::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.key\\\\\\\"\\\\nI1128 09:57:45.000857 1 cmd.go:138] Received SIGTERM or SIGINT signal, shutting down controller.\\\\nF1128 09:57:46.203931 1 cmd.go:179] failed checking apiserver connectivity: client rate limiter Wait returned an error: context deadline exceeded\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:10Z\\\"}},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef54c8995d6bb8e077c6d1c3d796b6f2ce88370b6cadc4f040f590760103320b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fab4ce68cf732b3e6b32f68e84805013d646a9cbd6d5c55ea3d2f41a7f9db83d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2da93de71b5896e3c9ba192df2896b946b1376fefe2a87cf21adb87ea665be04\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:08Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:06Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:07 crc kubenswrapper[4838]: I1128 09:58:07.001742 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1630b1c6-63b5-4481-a711-0485765d37e3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3a04db28528da269759635186b06952f9a3dc4c2b130458354a5bf9ef994db8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://065f0c39a0f1fbdb83a6a758ddd20a4af3ddf96297ce35551b570f5e6c9deb89\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T09:57:35Z\\\",\\\"message\\\":\\\"rpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"192.168.126.11:2379\\\\\\\", ServerName: \\\\\\\"192.168.126.11:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp 192.168.126.11:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:30.193159 13 logging.go:55] [core] [Channel #1 SubChannel #5]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"192.168.126.11:2379\\\\\\\", ServerName: \\\\\\\"192.168.126.11:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp 192.168.126.11:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:30.410371 13 logging.go:55] [core] [Channel #7 SubChannel #8]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"192.168.126.11:2379\\\\\\\", ServerName: \\\\\\\"192.168.126.11:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp 192.168.126.11:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:30.766067 13 logging.go:55] [core] [Channel #7 SubChannel #9]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"localhost:2379\\\\\\\", ServerName: \\\\\\\"localhost:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp [::1]:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:30.792318 13 logging.go:55] [core] [Channel #2 SubChannel #4]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"localhost:2379\\\\\\\", ServerName: \\\\\\\"localhost:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp [::1]:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:34.548830 13 logging.go:55] [core] [Channel #1 SubChannel #6]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"localhost:2379\\\\\\\", ServerName: \\\\\\\"localhost:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: authentication handshake failed: context canceled\\\\\\\"\\\\nE1128 09:57:34.559534 13 run.go:72] \\\\\\\"command failed\\\\\\\" err=\\\\\\\"context deadline exceeded\\\\\\\"\\\\nI1128 09:57:34.572762 1 main.go:235] Termination finished with exit code 1\\\\nI1128 09:57:34.572819 1 main.go:188] Deleting termination lock file \\\\\\\"/var/log/kube-apiserver/.terminating\\\\\\\"\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25d620ea6d7c38547e89d61e7a60f227d28b21f18d563055db47256b266d5b6b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://837cc7d33d601516b4ea56a283f71167b41da7c769070c97ea77f29e97cf1555\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a821e5b105f62cf7f3b12714bfb0f58867a808d1c777f2fb711895c345d8ee9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ad3223a9346861cf1b27af8c95207349f10af6f416380747e32c4faf1d3add4\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T09:57:48Z\\\",\\\"message\\\":\\\"le observer\\\\nW1128 09:57:46.202169 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1128 09:57:46.202478 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 09:57:46.203984 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2688063589/tls.crt::/tmp/serving-cert-2688063589/tls.key\\\\\\\"\\\\nI1128 09:57:46.517349 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 09:57:46.756626 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 09:57:46.756681 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 09:57:46.756806 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 09:57:46.756825 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 09:57:46.768610 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1128 09:57:46.768654 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1128 09:57:46.768660 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 09:57:46.768697 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 09:57:46.768707 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 09:57:46.768714 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 09:57:46.768746 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 09:57:46.768752 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1128 09:57:46.772242 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:40Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c690f38f717fbfbd681f21f5dc845b94601530b4fc0860fdbeb1317042c793b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:18Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b4d4af6fc5dd97d5b6104ef79b62fd241db5659dfdfd496a38536453b207a4e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b4d4af6fc5dd97d5b6104ef79b62fd241db5659dfdfd496a38536453b207a4e5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:08Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:06Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:07 crc kubenswrapper[4838]: I1128 09:58:07.016255 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:07Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:07 crc kubenswrapper[4838]: I1128 09:58:07.041532 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gmhsj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"41b01f7d-5c75-49de-86f7-87e04bf71194\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-gmhsj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:07Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:07 crc kubenswrapper[4838]: I1128 09:58:07.055685 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5c3daa53-8c4e-4e30-aeba-146602dd45cd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9dmrk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9dmrk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-5dxdd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:07Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:07 crc kubenswrapper[4838]: I1128 09:58:07.068180 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:07 crc kubenswrapper[4838]: I1128 09:58:07.068210 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:07 crc kubenswrapper[4838]: I1128 09:58:07.068218 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:07 crc kubenswrapper[4838]: I1128 09:58:07.068233 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:07 crc kubenswrapper[4838]: I1128 09:58:07.068518 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:07Z","lastTransitionTime":"2025-11-28T09:58:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:07 crc kubenswrapper[4838]: I1128 09:58:07.070459 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"faf44892-fdd2-4b45-8772-20049c555d3b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8f8f5484d008289a9e34ceaffd3cf2582565e7265003b0a6a913fe424760fc65\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://496ac21c6d7e650f191b3bc29ab676bab6ba40727c5ac4d517833ef9a115ae07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://698aacf6e94caf2da7095c89716d63d935ff60d95cb91c9a39dfe9282cbba005\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8352058616fa4ed90ca907e547bad2201d9aa696330f1eb8434c3c3d54d71d89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8352058616fa4ed90ca907e547bad2201d9aa696330f1eb8434c3c3d54d71d89\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:09Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:08Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:07Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:07 crc kubenswrapper[4838]: I1128 09:58:07.085205 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:07Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:07 crc kubenswrapper[4838]: I1128 09:58:07.098615 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:07Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:07 crc kubenswrapper[4838]: I1128 09:58:07.109110 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-tj8hl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cbb3c60a-bf9f-4a62-9310-30898e42be4f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e6fe3f1cbc53f02e2556c5fb44cffcf13330c3240a0ff10a8471080466416bd1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c2jkq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:54Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-tj8hl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:07Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:07 crc kubenswrapper[4838]: I1128 09:58:07.123099 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:07Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:07 crc kubenswrapper[4838]: I1128 09:58:07.140987 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3968fb11df6b2265691177838400dcb08e03d330c166dd880b3acfac7ec7938f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e91940e5398321649eac2960a545bb4bbe047113c81f152aa55894cabee55006\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:07Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:07 crc kubenswrapper[4838]: I1128 09:58:07.156282 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-58mh7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f556bd7-3b15-4d7d-b8e2-4815bb5c9c7d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0a0fbfb7a81acf63b4deabef68d55dd843092bce1c00c27c127955417bede44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f0a0fbfb7a81acf63b4deabef68d55dd843092bce1c00c27c127955417bede44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-58mh7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:07Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:07 crc kubenswrapper[4838]: I1128 09:58:07.169973 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-sft2b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"200cdb91-cc86-40be-a5b6-30f7b9beba6d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lpt6k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-sft2b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:07Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:07 crc kubenswrapper[4838]: I1128 09:58:07.172844 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:07 crc kubenswrapper[4838]: I1128 09:58:07.172901 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:07 crc kubenswrapper[4838]: I1128 09:58:07.172920 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:07 crc kubenswrapper[4838]: I1128 09:58:07.172945 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:07 crc kubenswrapper[4838]: I1128 09:58:07.172961 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:07Z","lastTransitionTime":"2025-11-28T09:58:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:07 crc kubenswrapper[4838]: I1128 09:58:07.182924 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-p69l6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2a223cc8-af33-4e83-8bfc-2676c5700447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h25xx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h25xx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:58:05Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-p69l6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:07Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:07 crc kubenswrapper[4838]: I1128 09:58:07.199354 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"faf44892-fdd2-4b45-8772-20049c555d3b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8f8f5484d008289a9e34ceaffd3cf2582565e7265003b0a6a913fe424760fc65\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://496ac21c6d7e650f191b3bc29ab676bab6ba40727c5ac4d517833ef9a115ae07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://698aacf6e94caf2da7095c89716d63d935ff60d95cb91c9a39dfe9282cbba005\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8352058616fa4ed90ca907e547bad2201d9aa696330f1eb8434c3c3d54d71d89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8352058616fa4ed90ca907e547bad2201d9aa696330f1eb8434c3c3d54d71d89\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:09Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:08Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:07Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:07 crc kubenswrapper[4838]: I1128 09:58:07.221302 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:07Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:07 crc kubenswrapper[4838]: I1128 09:58:07.234244 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:07Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:07 crc kubenswrapper[4838]: I1128 09:58:07.244438 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-tj8hl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cbb3c60a-bf9f-4a62-9310-30898e42be4f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e6fe3f1cbc53f02e2556c5fb44cffcf13330c3240a0ff10a8471080466416bd1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c2jkq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:54Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-tj8hl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:07Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:07 crc kubenswrapper[4838]: I1128 09:58:07.256553 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://05cb23094534a9fdebbe33d8d34a79412ee49437c25e185c6dfa99384cbf9629\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:07Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:07 crc kubenswrapper[4838]: I1128 09:58:07.268728 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3968fb11df6b2265691177838400dcb08e03d330c166dd880b3acfac7ec7938f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e91940e5398321649eac2960a545bb4bbe047113c81f152aa55894cabee55006\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:07Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:07 crc kubenswrapper[4838]: I1128 09:58:07.275273 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:07 crc kubenswrapper[4838]: I1128 09:58:07.275318 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:07 crc kubenswrapper[4838]: I1128 09:58:07.275393 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:07 crc kubenswrapper[4838]: I1128 09:58:07.275417 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:07 crc kubenswrapper[4838]: I1128 09:58:07.275429 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:07Z","lastTransitionTime":"2025-11-28T09:58:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:07 crc kubenswrapper[4838]: I1128 09:58:07.286781 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-58mh7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f556bd7-3b15-4d7d-b8e2-4815bb5c9c7d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0a0fbfb7a81acf63b4deabef68d55dd843092bce1c00c27c127955417bede44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f0a0fbfb7a81acf63b4deabef68d55dd843092bce1c00c27c127955417bede44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-58mh7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:07Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:07 crc kubenswrapper[4838]: I1128 09:58:07.298732 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-sft2b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"200cdb91-cc86-40be-a5b6-30f7b9beba6d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lpt6k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-sft2b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:07Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:07 crc kubenswrapper[4838]: I1128 09:58:07.309382 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-p69l6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2a223cc8-af33-4e83-8bfc-2676c5700447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h25xx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h25xx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:58:05Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-p69l6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:07Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:07 crc kubenswrapper[4838]: I1128 09:58:07.321101 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:07Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:07 crc kubenswrapper[4838]: I1128 09:58:07.333134 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4rv9b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"051f7e1c-2d47-4be9-bbd5-14feec16eb16\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f641ae0677a50fbc7b20fac7bb5c567f031af1b6a06fbe9e965091efa4ee4ec3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-szqtp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4rv9b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:07Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:07 crc kubenswrapper[4838]: I1128 09:58:07.345471 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-tnclp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ebace5c6-6ca4-48ff-9c50-c6b769d599b5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://572204db3ac1da6b232430bf06fb87e94638e151ea0edc2f8b111deb7d82c58c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8pxnw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://efa96991cdc29f4f075bfd7751f620c7b091510ea68d577b243811ddd4140cec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8pxnw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:58:03Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-tnclp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:07Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:07 crc kubenswrapper[4838]: I1128 09:58:07.356871 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42be35de-5c72-4cc2-a5e4-fb7872425cdf\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5a39765f9493a3a9454db77d07855624ff5645e9dbd898e6dcb880d7a01a8c42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://426741a23e7b1b9fae6769b581c0d44694f957b0da985476923801395fad082f\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T09:57:46Z\\\",\\\"message\\\":\\\"+ timeout 3m /bin/bash -exuo pipefail -c 'while [ -n \\\\\\\"$(ss -Htanop \\\\\\\\( sport = 10357 \\\\\\\\))\\\\\\\" ]; do sleep 1; done'\\\\n++ ss -Htanop '(' sport = 10357 ')'\\\\n+ '[' -n '' ']'\\\\n+ exec cluster-policy-controller start --config=/etc/kubernetes/static-pod-resources/configmaps/cluster-policy-controller-config/config.yaml --kubeconfig=/etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig --namespace=openshift-kube-controller-manager -v=2\\\\nI1128 09:57:12.554377 1 leaderelection.go:121] The leader election gives 4 retries and allows for 30s of clock skew. The kube-apiserver downtime tolerance is 78s. Worst non-graceful lease acquisition is 2m43s. Worst graceful lease acquisition is {26s}.\\\\nI1128 09:57:12.555793 1 observer_polling.go:159] Starting file observer\\\\nI1128 09:57:12.567187 1 builder.go:298] cluster-policy-controller version 4.18.0-202501230001.p0.g5fd8525.assembly.stream.el9-5fd8525-5fd852525909ce6eab52972ba9ce8fcf56528eb9\\\\nI1128 09:57:12.568976 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.crt::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.key\\\\\\\"\\\\nI1128 09:57:45.000857 1 cmd.go:138] Received SIGTERM or SIGINT signal, shutting down controller.\\\\nF1128 09:57:46.203931 1 cmd.go:179] failed checking apiserver connectivity: client rate limiter Wait returned an error: context deadline exceeded\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:10Z\\\"}},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef54c8995d6bb8e077c6d1c3d796b6f2ce88370b6cadc4f040f590760103320b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fab4ce68cf732b3e6b32f68e84805013d646a9cbd6d5c55ea3d2f41a7f9db83d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2da93de71b5896e3c9ba192df2896b946b1376fefe2a87cf21adb87ea665be04\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:08Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:07Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:07 crc kubenswrapper[4838]: I1128 09:58:07.371416 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1630b1c6-63b5-4481-a711-0485765d37e3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3a04db28528da269759635186b06952f9a3dc4c2b130458354a5bf9ef994db8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://065f0c39a0f1fbdb83a6a758ddd20a4af3ddf96297ce35551b570f5e6c9deb89\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T09:57:35Z\\\",\\\"message\\\":\\\"rpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"192.168.126.11:2379\\\\\\\", ServerName: \\\\\\\"192.168.126.11:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp 192.168.126.11:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:30.193159 13 logging.go:55] [core] [Channel #1 SubChannel #5]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"192.168.126.11:2379\\\\\\\", ServerName: \\\\\\\"192.168.126.11:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp 192.168.126.11:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:30.410371 13 logging.go:55] [core] [Channel #7 SubChannel #8]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"192.168.126.11:2379\\\\\\\", ServerName: \\\\\\\"192.168.126.11:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp 192.168.126.11:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:30.766067 13 logging.go:55] [core] [Channel #7 SubChannel #9]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"localhost:2379\\\\\\\", ServerName: \\\\\\\"localhost:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp [::1]:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:30.792318 13 logging.go:55] [core] [Channel #2 SubChannel #4]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"localhost:2379\\\\\\\", ServerName: \\\\\\\"localhost:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp [::1]:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:34.548830 13 logging.go:55] [core] [Channel #1 SubChannel #6]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"localhost:2379\\\\\\\", ServerName: \\\\\\\"localhost:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: authentication handshake failed: context canceled\\\\\\\"\\\\nE1128 09:57:34.559534 13 run.go:72] \\\\\\\"command failed\\\\\\\" err=\\\\\\\"context deadline exceeded\\\\\\\"\\\\nI1128 09:57:34.572762 1 main.go:235] Termination finished with exit code 1\\\\nI1128 09:57:34.572819 1 main.go:188] Deleting termination lock file \\\\\\\"/var/log/kube-apiserver/.terminating\\\\\\\"\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25d620ea6d7c38547e89d61e7a60f227d28b21f18d563055db47256b266d5b6b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://837cc7d33d601516b4ea56a283f71167b41da7c769070c97ea77f29e97cf1555\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a821e5b105f62cf7f3b12714bfb0f58867a808d1c777f2fb711895c345d8ee9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ad3223a9346861cf1b27af8c95207349f10af6f416380747e32c4faf1d3add4\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T09:57:48Z\\\",\\\"message\\\":\\\"le observer\\\\nW1128 09:57:46.202169 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1128 09:57:46.202478 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 09:57:46.203984 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2688063589/tls.crt::/tmp/serving-cert-2688063589/tls.key\\\\\\\"\\\\nI1128 09:57:46.517349 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 09:57:46.756626 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 09:57:46.756681 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 09:57:46.756806 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 09:57:46.756825 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 09:57:46.768610 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1128 09:57:46.768654 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1128 09:57:46.768660 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 09:57:46.768697 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 09:57:46.768707 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 09:57:46.768714 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 09:57:46.768746 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 09:57:46.768752 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1128 09:57:46.772242 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:40Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c690f38f717fbfbd681f21f5dc845b94601530b4fc0860fdbeb1317042c793b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:18Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b4d4af6fc5dd97d5b6104ef79b62fd241db5659dfdfd496a38536453b207a4e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b4d4af6fc5dd97d5b6104ef79b62fd241db5659dfdfd496a38536453b207a4e5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:08Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:07Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:07 crc kubenswrapper[4838]: I1128 09:58:07.384992 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:07 crc kubenswrapper[4838]: I1128 09:58:07.385035 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:07 crc kubenswrapper[4838]: I1128 09:58:07.385047 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:07 crc kubenswrapper[4838]: I1128 09:58:07.385064 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:07 crc kubenswrapper[4838]: I1128 09:58:07.385042 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:07Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:07 crc kubenswrapper[4838]: I1128 09:58:07.385077 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:07Z","lastTransitionTime":"2025-11-28T09:58:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:07 crc kubenswrapper[4838]: I1128 09:58:07.405786 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gmhsj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"41b01f7d-5c75-49de-86f7-87e04bf71194\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9efead1ac3ae101f3a81e0c5568645b6ac107dc126ff57bc55745b86ffacb730\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9efead1ac3ae101f3a81e0c5568645b6ac107dc126ff57bc55745b86ffacb730\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-gmhsj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:07Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:07 crc kubenswrapper[4838]: I1128 09:58:07.419087 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:07 crc kubenswrapper[4838]: I1128 09:58:07.419122 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:07 crc kubenswrapper[4838]: I1128 09:58:07.419133 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:07 crc kubenswrapper[4838]: I1128 09:58:07.419148 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:07 crc kubenswrapper[4838]: I1128 09:58:07.419158 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:07Z","lastTransitionTime":"2025-11-28T09:58:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:07 crc kubenswrapper[4838]: I1128 09:58:07.425257 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5c3daa53-8c4e-4e30-aeba-146602dd45cd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9dmrk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9dmrk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-5dxdd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:07Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:07 crc kubenswrapper[4838]: E1128 09:58:07.433000 4838 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:58:07Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:07Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:58:07Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:07Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:58:07Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:07Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:58:07Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:07Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2d884793-8973-45d8-9335-b721f6accbac\\\",\\\"systemUUID\\\":\\\"e14391b4-beaf-4b9f-9de4-e3bbde3f3327\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:07Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:07 crc kubenswrapper[4838]: I1128 09:58:07.436867 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:07 crc kubenswrapper[4838]: I1128 09:58:07.436911 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:07 crc kubenswrapper[4838]: I1128 09:58:07.436922 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:07 crc kubenswrapper[4838]: I1128 09:58:07.436936 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:07 crc kubenswrapper[4838]: I1128 09:58:07.436945 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:07Z","lastTransitionTime":"2025-11-28T09:58:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:07 crc kubenswrapper[4838]: E1128 09:58:07.450284 4838 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:58:07Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:07Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:58:07Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:07Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:58:07Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:07Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:58:07Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:07Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2d884793-8973-45d8-9335-b721f6accbac\\\",\\\"systemUUID\\\":\\\"e14391b4-beaf-4b9f-9de4-e3bbde3f3327\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:07Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:07 crc kubenswrapper[4838]: I1128 09:58:07.454339 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:07 crc kubenswrapper[4838]: I1128 09:58:07.454390 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:07 crc kubenswrapper[4838]: I1128 09:58:07.454403 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:07 crc kubenswrapper[4838]: I1128 09:58:07.454424 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:07 crc kubenswrapper[4838]: I1128 09:58:07.454440 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:07Z","lastTransitionTime":"2025-11-28T09:58:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:07 crc kubenswrapper[4838]: E1128 09:58:07.466939 4838 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:58:07Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:07Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:58:07Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:07Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:58:07Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:07Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:58:07Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:07Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2d884793-8973-45d8-9335-b721f6accbac\\\",\\\"systemUUID\\\":\\\"e14391b4-beaf-4b9f-9de4-e3bbde3f3327\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:07Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:07 crc kubenswrapper[4838]: I1128 09:58:07.470268 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:07 crc kubenswrapper[4838]: I1128 09:58:07.470309 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:07 crc kubenswrapper[4838]: I1128 09:58:07.470322 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:07 crc kubenswrapper[4838]: I1128 09:58:07.470338 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:07 crc kubenswrapper[4838]: I1128 09:58:07.470349 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:07Z","lastTransitionTime":"2025-11-28T09:58:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:07 crc kubenswrapper[4838]: E1128 09:58:07.483320 4838 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:58:07Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:07Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:58:07Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:07Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:58:07Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:07Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:58:07Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:07Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2d884793-8973-45d8-9335-b721f6accbac\\\",\\\"systemUUID\\\":\\\"e14391b4-beaf-4b9f-9de4-e3bbde3f3327\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:07Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:07 crc kubenswrapper[4838]: I1128 09:58:07.486907 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:07 crc kubenswrapper[4838]: I1128 09:58:07.486958 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:07 crc kubenswrapper[4838]: I1128 09:58:07.486975 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:07 crc kubenswrapper[4838]: I1128 09:58:07.486999 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:07 crc kubenswrapper[4838]: I1128 09:58:07.487016 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:07Z","lastTransitionTime":"2025-11-28T09:58:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:07 crc kubenswrapper[4838]: E1128 09:58:07.501161 4838 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:58:07Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:07Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:58:07Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:07Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:58:07Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:07Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:58:07Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:07Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2d884793-8973-45d8-9335-b721f6accbac\\\",\\\"systemUUID\\\":\\\"e14391b4-beaf-4b9f-9de4-e3bbde3f3327\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:07Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:07 crc kubenswrapper[4838]: E1128 09:58:07.501277 4838 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 28 09:58:07 crc kubenswrapper[4838]: I1128 09:58:07.502971 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:07 crc kubenswrapper[4838]: I1128 09:58:07.502992 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:07 crc kubenswrapper[4838]: I1128 09:58:07.503000 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:07 crc kubenswrapper[4838]: I1128 09:58:07.503017 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:07 crc kubenswrapper[4838]: I1128 09:58:07.503028 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:07Z","lastTransitionTime":"2025-11-28T09:58:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:07 crc kubenswrapper[4838]: I1128 09:58:07.561383 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 09:58:07 crc kubenswrapper[4838]: I1128 09:58:07.561383 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 09:58:07 crc kubenswrapper[4838]: E1128 09:58:07.561538 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 09:58:07 crc kubenswrapper[4838]: I1128 09:58:07.561397 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 09:58:07 crc kubenswrapper[4838]: E1128 09:58:07.561635 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 09:58:07 crc kubenswrapper[4838]: E1128 09:58:07.561749 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 09:58:07 crc kubenswrapper[4838]: I1128 09:58:07.605499 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:07 crc kubenswrapper[4838]: I1128 09:58:07.605536 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:07 crc kubenswrapper[4838]: I1128 09:58:07.605549 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:07 crc kubenswrapper[4838]: I1128 09:58:07.605567 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:07 crc kubenswrapper[4838]: I1128 09:58:07.605579 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:07Z","lastTransitionTime":"2025-11-28T09:58:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:07 crc kubenswrapper[4838]: I1128 09:58:07.707822 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:07 crc kubenswrapper[4838]: I1128 09:58:07.707871 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:07 crc kubenswrapper[4838]: I1128 09:58:07.707882 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:07 crc kubenswrapper[4838]: I1128 09:58:07.707898 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:07 crc kubenswrapper[4838]: I1128 09:58:07.707909 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:07Z","lastTransitionTime":"2025-11-28T09:58:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:07 crc kubenswrapper[4838]: I1128 09:58:07.810873 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:07 crc kubenswrapper[4838]: I1128 09:58:07.810932 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:07 crc kubenswrapper[4838]: I1128 09:58:07.810947 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:07 crc kubenswrapper[4838]: I1128 09:58:07.810973 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:07 crc kubenswrapper[4838]: I1128 09:58:07.810986 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:07Z","lastTransitionTime":"2025-11-28T09:58:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:07 crc kubenswrapper[4838]: I1128 09:58:07.915602 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:07 crc kubenswrapper[4838]: I1128 09:58:07.915659 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:07 crc kubenswrapper[4838]: I1128 09:58:07.915676 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:07 crc kubenswrapper[4838]: I1128 09:58:07.915701 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:07 crc kubenswrapper[4838]: I1128 09:58:07.915749 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:07Z","lastTransitionTime":"2025-11-28T09:58:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:07 crc kubenswrapper[4838]: I1128 09:58:07.924413 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gmhsj" event={"ID":"41b01f7d-5c75-49de-86f7-87e04bf71194","Type":"ContainerStarted","Data":"b1f5c4da04a97ccc1851e901f9a13763e45ff8c7f6aab25bcbb934541ae4a776"} Nov 28 09:58:07 crc kubenswrapper[4838]: I1128 09:58:07.924485 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gmhsj" event={"ID":"41b01f7d-5c75-49de-86f7-87e04bf71194","Type":"ContainerStarted","Data":"5781261b70d6722e8d28b91441c4241e1b48e6ff71ae3ec8973bde50180fc146"} Nov 28 09:58:07 crc kubenswrapper[4838]: I1128 09:58:07.924506 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gmhsj" event={"ID":"41b01f7d-5c75-49de-86f7-87e04bf71194","Type":"ContainerStarted","Data":"87fcac052cf598fe1999137913f0b5ab8c999dad7d8fb512c3b079fd3b49bdaa"} Nov 28 09:58:07 crc kubenswrapper[4838]: I1128 09:58:07.924526 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gmhsj" event={"ID":"41b01f7d-5c75-49de-86f7-87e04bf71194","Type":"ContainerStarted","Data":"3ccbb0df20c7e22308632a5a2d8837d77579134973f3888355be0fe46b4e59aa"} Nov 28 09:58:07 crc kubenswrapper[4838]: I1128 09:58:07.924545 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gmhsj" event={"ID":"41b01f7d-5c75-49de-86f7-87e04bf71194","Type":"ContainerStarted","Data":"b3a5c0b96cef205139d500d85f27d2c9230ab23a84ffc78f2587a465a7ff25e6"} Nov 28 09:58:07 crc kubenswrapper[4838]: I1128 09:58:07.924564 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gmhsj" event={"ID":"41b01f7d-5c75-49de-86f7-87e04bf71194","Type":"ContainerStarted","Data":"669de13027827632f353811e082cce7cca326651a9bf4820e66504bb59067d72"} Nov 28 09:58:07 crc kubenswrapper[4838]: I1128 09:58:07.930190 4838 generic.go:334] "Generic (PLEG): container finished" podID="3f556bd7-3b15-4d7d-b8e2-4815bb5c9c7d" containerID="dea8cf8adbb0c946731fdc71c2b0d80ace50b919cf9c776eb70cb6ff36529401" exitCode=0 Nov 28 09:58:07 crc kubenswrapper[4838]: I1128 09:58:07.930275 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-58mh7" event={"ID":"3f556bd7-3b15-4d7d-b8e2-4815bb5c9c7d","Type":"ContainerDied","Data":"dea8cf8adbb0c946731fdc71c2b0d80ace50b919cf9c776eb70cb6ff36529401"} Nov 28 09:58:07 crc kubenswrapper[4838]: I1128 09:58:07.954455 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-tj8hl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cbb3c60a-bf9f-4a62-9310-30898e42be4f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e6fe3f1cbc53f02e2556c5fb44cffcf13330c3240a0ff10a8471080466416bd1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c2jkq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:54Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-tj8hl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:07Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:07 crc kubenswrapper[4838]: I1128 09:58:07.977574 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"faf44892-fdd2-4b45-8772-20049c555d3b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8f8f5484d008289a9e34ceaffd3cf2582565e7265003b0a6a913fe424760fc65\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://496ac21c6d7e650f191b3bc29ab676bab6ba40727c5ac4d517833ef9a115ae07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://698aacf6e94caf2da7095c89716d63d935ff60d95cb91c9a39dfe9282cbba005\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8352058616fa4ed90ca907e547bad2201d9aa696330f1eb8434c3c3d54d71d89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8352058616fa4ed90ca907e547bad2201d9aa696330f1eb8434c3c3d54d71d89\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:09Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:08Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:07Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:07 crc kubenswrapper[4838]: I1128 09:58:07.992905 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:07Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:08 crc kubenswrapper[4838]: I1128 09:58:08.015255 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:08Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:08 crc kubenswrapper[4838]: I1128 09:58:08.026810 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:08 crc kubenswrapper[4838]: I1128 09:58:08.026872 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:08 crc kubenswrapper[4838]: I1128 09:58:08.026890 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:08 crc kubenswrapper[4838]: I1128 09:58:08.026915 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:08 crc kubenswrapper[4838]: I1128 09:58:08.026932 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:08Z","lastTransitionTime":"2025-11-28T09:58:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:08 crc kubenswrapper[4838]: I1128 09:58:08.029498 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-p69l6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2a223cc8-af33-4e83-8bfc-2676c5700447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h25xx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h25xx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:58:05Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-p69l6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:08Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:08 crc kubenswrapper[4838]: I1128 09:58:08.043374 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://05cb23094534a9fdebbe33d8d34a79412ee49437c25e185c6dfa99384cbf9629\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:08Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:08 crc kubenswrapper[4838]: I1128 09:58:08.067423 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3968fb11df6b2265691177838400dcb08e03d330c166dd880b3acfac7ec7938f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e91940e5398321649eac2960a545bb4bbe047113c81f152aa55894cabee55006\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:08Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:08 crc kubenswrapper[4838]: I1128 09:58:08.092592 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-58mh7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f556bd7-3b15-4d7d-b8e2-4815bb5c9c7d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with incomplete status: [bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0a0fbfb7a81acf63b4deabef68d55dd843092bce1c00c27c127955417bede44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f0a0fbfb7a81acf63b4deabef68d55dd843092bce1c00c27c127955417bede44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dea8cf8adbb0c946731fdc71c2b0d80ace50b919cf9c776eb70cb6ff36529401\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dea8cf8adbb0c946731fdc71c2b0d80ace50b919cf9c776eb70cb6ff36529401\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-58mh7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:08Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:08 crc kubenswrapper[4838]: I1128 09:58:08.108970 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-sft2b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"200cdb91-cc86-40be-a5b6-30f7b9beba6d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lpt6k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-sft2b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:08Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:08 crc kubenswrapper[4838]: I1128 09:58:08.129453 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:08 crc kubenswrapper[4838]: I1128 09:58:08.129514 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:08 crc kubenswrapper[4838]: I1128 09:58:08.129536 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:08 crc kubenswrapper[4838]: I1128 09:58:08.129560 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:08 crc kubenswrapper[4838]: I1128 09:58:08.129577 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:08Z","lastTransitionTime":"2025-11-28T09:58:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:08 crc kubenswrapper[4838]: I1128 09:58:08.130041 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:08Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:08 crc kubenswrapper[4838]: I1128 09:58:08.145334 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4rv9b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"051f7e1c-2d47-4be9-bbd5-14feec16eb16\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f641ae0677a50fbc7b20fac7bb5c567f031af1b6a06fbe9e965091efa4ee4ec3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-szqtp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4rv9b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:08Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:08 crc kubenswrapper[4838]: I1128 09:58:08.159055 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-tnclp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ebace5c6-6ca4-48ff-9c50-c6b769d599b5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://572204db3ac1da6b232430bf06fb87e94638e151ea0edc2f8b111deb7d82c58c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8pxnw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://efa96991cdc29f4f075bfd7751f620c7b091510ea68d577b243811ddd4140cec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8pxnw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:58:03Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-tnclp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:08Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:08 crc kubenswrapper[4838]: I1128 09:58:08.179328 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5c3daa53-8c4e-4e30-aeba-146602dd45cd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9dmrk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9dmrk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-5dxdd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:08Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:08 crc kubenswrapper[4838]: I1128 09:58:08.198424 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42be35de-5c72-4cc2-a5e4-fb7872425cdf\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5a39765f9493a3a9454db77d07855624ff5645e9dbd898e6dcb880d7a01a8c42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://426741a23e7b1b9fae6769b581c0d44694f957b0da985476923801395fad082f\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T09:57:46Z\\\",\\\"message\\\":\\\"+ timeout 3m /bin/bash -exuo pipefail -c 'while [ -n \\\\\\\"$(ss -Htanop \\\\\\\\( sport = 10357 \\\\\\\\))\\\\\\\" ]; do sleep 1; done'\\\\n++ ss -Htanop '(' sport = 10357 ')'\\\\n+ '[' -n '' ']'\\\\n+ exec cluster-policy-controller start --config=/etc/kubernetes/static-pod-resources/configmaps/cluster-policy-controller-config/config.yaml --kubeconfig=/etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig --namespace=openshift-kube-controller-manager -v=2\\\\nI1128 09:57:12.554377 1 leaderelection.go:121] The leader election gives 4 retries and allows for 30s of clock skew. The kube-apiserver downtime tolerance is 78s. Worst non-graceful lease acquisition is 2m43s. Worst graceful lease acquisition is {26s}.\\\\nI1128 09:57:12.555793 1 observer_polling.go:159] Starting file observer\\\\nI1128 09:57:12.567187 1 builder.go:298] cluster-policy-controller version 4.18.0-202501230001.p0.g5fd8525.assembly.stream.el9-5fd8525-5fd852525909ce6eab52972ba9ce8fcf56528eb9\\\\nI1128 09:57:12.568976 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.crt::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.key\\\\\\\"\\\\nI1128 09:57:45.000857 1 cmd.go:138] Received SIGTERM or SIGINT signal, shutting down controller.\\\\nF1128 09:57:46.203931 1 cmd.go:179] failed checking apiserver connectivity: client rate limiter Wait returned an error: context deadline exceeded\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:10Z\\\"}},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef54c8995d6bb8e077c6d1c3d796b6f2ce88370b6cadc4f040f590760103320b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fab4ce68cf732b3e6b32f68e84805013d646a9cbd6d5c55ea3d2f41a7f9db83d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2da93de71b5896e3c9ba192df2896b946b1376fefe2a87cf21adb87ea665be04\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:08Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:08Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:08 crc kubenswrapper[4838]: I1128 09:58:08.217854 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1630b1c6-63b5-4481-a711-0485765d37e3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3a04db28528da269759635186b06952f9a3dc4c2b130458354a5bf9ef994db8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://065f0c39a0f1fbdb83a6a758ddd20a4af3ddf96297ce35551b570f5e6c9deb89\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T09:57:35Z\\\",\\\"message\\\":\\\"rpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"192.168.126.11:2379\\\\\\\", ServerName: \\\\\\\"192.168.126.11:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp 192.168.126.11:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:30.193159 13 logging.go:55] [core] [Channel #1 SubChannel #5]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"192.168.126.11:2379\\\\\\\", ServerName: \\\\\\\"192.168.126.11:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp 192.168.126.11:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:30.410371 13 logging.go:55] [core] [Channel #7 SubChannel #8]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"192.168.126.11:2379\\\\\\\", ServerName: \\\\\\\"192.168.126.11:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp 192.168.126.11:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:30.766067 13 logging.go:55] [core] [Channel #7 SubChannel #9]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"localhost:2379\\\\\\\", ServerName: \\\\\\\"localhost:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp [::1]:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:30.792318 13 logging.go:55] [core] [Channel #2 SubChannel #4]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"localhost:2379\\\\\\\", ServerName: \\\\\\\"localhost:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp [::1]:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:34.548830 13 logging.go:55] [core] [Channel #1 SubChannel #6]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"localhost:2379\\\\\\\", ServerName: \\\\\\\"localhost:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: authentication handshake failed: context canceled\\\\\\\"\\\\nE1128 09:57:34.559534 13 run.go:72] \\\\\\\"command failed\\\\\\\" err=\\\\\\\"context deadline exceeded\\\\\\\"\\\\nI1128 09:57:34.572762 1 main.go:235] Termination finished with exit code 1\\\\nI1128 09:57:34.572819 1 main.go:188] Deleting termination lock file \\\\\\\"/var/log/kube-apiserver/.terminating\\\\\\\"\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25d620ea6d7c38547e89d61e7a60f227d28b21f18d563055db47256b266d5b6b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://837cc7d33d601516b4ea56a283f71167b41da7c769070c97ea77f29e97cf1555\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a821e5b105f62cf7f3b12714bfb0f58867a808d1c777f2fb711895c345d8ee9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ad3223a9346861cf1b27af8c95207349f10af6f416380747e32c4faf1d3add4\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T09:57:48Z\\\",\\\"message\\\":\\\"le observer\\\\nW1128 09:57:46.202169 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1128 09:57:46.202478 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 09:57:46.203984 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2688063589/tls.crt::/tmp/serving-cert-2688063589/tls.key\\\\\\\"\\\\nI1128 09:57:46.517349 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 09:57:46.756626 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 09:57:46.756681 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 09:57:46.756806 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 09:57:46.756825 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 09:57:46.768610 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1128 09:57:46.768654 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1128 09:57:46.768660 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 09:57:46.768697 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 09:57:46.768707 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 09:57:46.768714 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 09:57:46.768746 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 09:57:46.768752 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1128 09:57:46.772242 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:40Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c690f38f717fbfbd681f21f5dc845b94601530b4fc0860fdbeb1317042c793b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:18Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b4d4af6fc5dd97d5b6104ef79b62fd241db5659dfdfd496a38536453b207a4e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b4d4af6fc5dd97d5b6104ef79b62fd241db5659dfdfd496a38536453b207a4e5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:08Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:08Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:08 crc kubenswrapper[4838]: I1128 09:58:08.234546 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:08 crc kubenswrapper[4838]: I1128 09:58:08.234513 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:08Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:08 crc kubenswrapper[4838]: I1128 09:58:08.234593 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:08 crc kubenswrapper[4838]: I1128 09:58:08.234765 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:08 crc kubenswrapper[4838]: I1128 09:58:08.234799 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:08 crc kubenswrapper[4838]: I1128 09:58:08.234812 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:08Z","lastTransitionTime":"2025-11-28T09:58:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:08 crc kubenswrapper[4838]: I1128 09:58:08.256201 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gmhsj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"41b01f7d-5c75-49de-86f7-87e04bf71194\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9efead1ac3ae101f3a81e0c5568645b6ac107dc126ff57bc55745b86ffacb730\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9efead1ac3ae101f3a81e0c5568645b6ac107dc126ff57bc55745b86ffacb730\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-gmhsj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:08Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:08 crc kubenswrapper[4838]: I1128 09:58:08.337477 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:08 crc kubenswrapper[4838]: I1128 09:58:08.337550 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:08 crc kubenswrapper[4838]: I1128 09:58:08.337568 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:08 crc kubenswrapper[4838]: I1128 09:58:08.337592 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:08 crc kubenswrapper[4838]: I1128 09:58:08.337609 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:08Z","lastTransitionTime":"2025-11-28T09:58:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:08 crc kubenswrapper[4838]: I1128 09:58:08.440512 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:08 crc kubenswrapper[4838]: I1128 09:58:08.440562 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:08 crc kubenswrapper[4838]: I1128 09:58:08.440575 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:08 crc kubenswrapper[4838]: I1128 09:58:08.440595 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:08 crc kubenswrapper[4838]: I1128 09:58:08.440606 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:08Z","lastTransitionTime":"2025-11-28T09:58:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:08 crc kubenswrapper[4838]: I1128 09:58:08.543189 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:08 crc kubenswrapper[4838]: I1128 09:58:08.543765 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:08 crc kubenswrapper[4838]: I1128 09:58:08.543784 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:08 crc kubenswrapper[4838]: I1128 09:58:08.543815 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:08 crc kubenswrapper[4838]: I1128 09:58:08.543834 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:08Z","lastTransitionTime":"2025-11-28T09:58:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:08 crc kubenswrapper[4838]: I1128 09:58:08.561261 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-p69l6" Nov 28 09:58:08 crc kubenswrapper[4838]: E1128 09:58:08.561424 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-p69l6" podUID="2a223cc8-af33-4e83-8bfc-2676c5700447" Nov 28 09:58:08 crc kubenswrapper[4838]: I1128 09:58:08.580340 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"faf44892-fdd2-4b45-8772-20049c555d3b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8f8f5484d008289a9e34ceaffd3cf2582565e7265003b0a6a913fe424760fc65\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://496ac21c6d7e650f191b3bc29ab676bab6ba40727c5ac4d517833ef9a115ae07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://698aacf6e94caf2da7095c89716d63d935ff60d95cb91c9a39dfe9282cbba005\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8352058616fa4ed90ca907e547bad2201d9aa696330f1eb8434c3c3d54d71d89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8352058616fa4ed90ca907e547bad2201d9aa696330f1eb8434c3c3d54d71d89\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:09Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:08Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:08Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:08 crc kubenswrapper[4838]: I1128 09:58:08.595744 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:08Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:08 crc kubenswrapper[4838]: I1128 09:58:08.609608 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:08Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:08 crc kubenswrapper[4838]: I1128 09:58:08.620972 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-tj8hl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cbb3c60a-bf9f-4a62-9310-30898e42be4f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e6fe3f1cbc53f02e2556c5fb44cffcf13330c3240a0ff10a8471080466416bd1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c2jkq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:54Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-tj8hl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:08Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:08 crc kubenswrapper[4838]: I1128 09:58:08.636548 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://05cb23094534a9fdebbe33d8d34a79412ee49437c25e185c6dfa99384cbf9629\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:08Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:08 crc kubenswrapper[4838]: I1128 09:58:08.646187 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:08 crc kubenswrapper[4838]: I1128 09:58:08.646231 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:08 crc kubenswrapper[4838]: I1128 09:58:08.646241 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:08 crc kubenswrapper[4838]: I1128 09:58:08.646255 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:08 crc kubenswrapper[4838]: I1128 09:58:08.646263 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:08Z","lastTransitionTime":"2025-11-28T09:58:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:08 crc kubenswrapper[4838]: I1128 09:58:08.649481 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3968fb11df6b2265691177838400dcb08e03d330c166dd880b3acfac7ec7938f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e91940e5398321649eac2960a545bb4bbe047113c81f152aa55894cabee55006\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:08Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:08 crc kubenswrapper[4838]: I1128 09:58:08.666525 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-58mh7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f556bd7-3b15-4d7d-b8e2-4815bb5c9c7d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with incomplete status: [bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0a0fbfb7a81acf63b4deabef68d55dd843092bce1c00c27c127955417bede44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f0a0fbfb7a81acf63b4deabef68d55dd843092bce1c00c27c127955417bede44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dea8cf8adbb0c946731fdc71c2b0d80ace50b919cf9c776eb70cb6ff36529401\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dea8cf8adbb0c946731fdc71c2b0d80ace50b919cf9c776eb70cb6ff36529401\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-58mh7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:08Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:08 crc kubenswrapper[4838]: I1128 09:58:08.678477 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-sft2b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"200cdb91-cc86-40be-a5b6-30f7b9beba6d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lpt6k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-sft2b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:08Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:08 crc kubenswrapper[4838]: I1128 09:58:08.689801 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-p69l6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2a223cc8-af33-4e83-8bfc-2676c5700447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h25xx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h25xx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:58:05Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-p69l6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:08Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:08 crc kubenswrapper[4838]: I1128 09:58:08.703913 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:08Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:08 crc kubenswrapper[4838]: I1128 09:58:08.718009 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4rv9b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"051f7e1c-2d47-4be9-bbd5-14feec16eb16\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f641ae0677a50fbc7b20fac7bb5c567f031af1b6a06fbe9e965091efa4ee4ec3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-szqtp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4rv9b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:08Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:08 crc kubenswrapper[4838]: I1128 09:58:08.731041 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-tnclp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ebace5c6-6ca4-48ff-9c50-c6b769d599b5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://572204db3ac1da6b232430bf06fb87e94638e151ea0edc2f8b111deb7d82c58c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8pxnw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://efa96991cdc29f4f075bfd7751f620c7b091510ea68d577b243811ddd4140cec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8pxnw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:58:03Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-tnclp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:08Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:08 crc kubenswrapper[4838]: I1128 09:58:08.746940 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42be35de-5c72-4cc2-a5e4-fb7872425cdf\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5a39765f9493a3a9454db77d07855624ff5645e9dbd898e6dcb880d7a01a8c42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://426741a23e7b1b9fae6769b581c0d44694f957b0da985476923801395fad082f\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T09:57:46Z\\\",\\\"message\\\":\\\"+ timeout 3m /bin/bash -exuo pipefail -c 'while [ -n \\\\\\\"$(ss -Htanop \\\\\\\\( sport = 10357 \\\\\\\\))\\\\\\\" ]; do sleep 1; done'\\\\n++ ss -Htanop '(' sport = 10357 ')'\\\\n+ '[' -n '' ']'\\\\n+ exec cluster-policy-controller start --config=/etc/kubernetes/static-pod-resources/configmaps/cluster-policy-controller-config/config.yaml --kubeconfig=/etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig --namespace=openshift-kube-controller-manager -v=2\\\\nI1128 09:57:12.554377 1 leaderelection.go:121] The leader election gives 4 retries and allows for 30s of clock skew. The kube-apiserver downtime tolerance is 78s. Worst non-graceful lease acquisition is 2m43s. Worst graceful lease acquisition is {26s}.\\\\nI1128 09:57:12.555793 1 observer_polling.go:159] Starting file observer\\\\nI1128 09:57:12.567187 1 builder.go:298] cluster-policy-controller version 4.18.0-202501230001.p0.g5fd8525.assembly.stream.el9-5fd8525-5fd852525909ce6eab52972ba9ce8fcf56528eb9\\\\nI1128 09:57:12.568976 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.crt::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.key\\\\\\\"\\\\nI1128 09:57:45.000857 1 cmd.go:138] Received SIGTERM or SIGINT signal, shutting down controller.\\\\nF1128 09:57:46.203931 1 cmd.go:179] failed checking apiserver connectivity: client rate limiter Wait returned an error: context deadline exceeded\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:10Z\\\"}},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef54c8995d6bb8e077c6d1c3d796b6f2ce88370b6cadc4f040f590760103320b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fab4ce68cf732b3e6b32f68e84805013d646a9cbd6d5c55ea3d2f41a7f9db83d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2da93de71b5896e3c9ba192df2896b946b1376fefe2a87cf21adb87ea665be04\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:08Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:08Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:08 crc kubenswrapper[4838]: I1128 09:58:08.748509 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:08 crc kubenswrapper[4838]: I1128 09:58:08.748544 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:08 crc kubenswrapper[4838]: I1128 09:58:08.748559 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:08 crc kubenswrapper[4838]: I1128 09:58:08.748576 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:08 crc kubenswrapper[4838]: I1128 09:58:08.748589 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:08Z","lastTransitionTime":"2025-11-28T09:58:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:08 crc kubenswrapper[4838]: I1128 09:58:08.767680 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1630b1c6-63b5-4481-a711-0485765d37e3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3a04db28528da269759635186b06952f9a3dc4c2b130458354a5bf9ef994db8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://065f0c39a0f1fbdb83a6a758ddd20a4af3ddf96297ce35551b570f5e6c9deb89\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T09:57:35Z\\\",\\\"message\\\":\\\"rpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"192.168.126.11:2379\\\\\\\", ServerName: \\\\\\\"192.168.126.11:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp 192.168.126.11:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:30.193159 13 logging.go:55] [core] [Channel #1 SubChannel #5]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"192.168.126.11:2379\\\\\\\", ServerName: \\\\\\\"192.168.126.11:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp 192.168.126.11:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:30.410371 13 logging.go:55] [core] [Channel #7 SubChannel #8]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"192.168.126.11:2379\\\\\\\", ServerName: \\\\\\\"192.168.126.11:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp 192.168.126.11:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:30.766067 13 logging.go:55] [core] [Channel #7 SubChannel #9]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"localhost:2379\\\\\\\", ServerName: \\\\\\\"localhost:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp [::1]:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:30.792318 13 logging.go:55] [core] [Channel #2 SubChannel #4]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"localhost:2379\\\\\\\", ServerName: \\\\\\\"localhost:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp [::1]:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:34.548830 13 logging.go:55] [core] [Channel #1 SubChannel #6]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"localhost:2379\\\\\\\", ServerName: \\\\\\\"localhost:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: authentication handshake failed: context canceled\\\\\\\"\\\\nE1128 09:57:34.559534 13 run.go:72] \\\\\\\"command failed\\\\\\\" err=\\\\\\\"context deadline exceeded\\\\\\\"\\\\nI1128 09:57:34.572762 1 main.go:235] Termination finished with exit code 1\\\\nI1128 09:57:34.572819 1 main.go:188] Deleting termination lock file \\\\\\\"/var/log/kube-apiserver/.terminating\\\\\\\"\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25d620ea6d7c38547e89d61e7a60f227d28b21f18d563055db47256b266d5b6b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://837cc7d33d601516b4ea56a283f71167b41da7c769070c97ea77f29e97cf1555\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a821e5b105f62cf7f3b12714bfb0f58867a808d1c777f2fb711895c345d8ee9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ad3223a9346861cf1b27af8c95207349f10af6f416380747e32c4faf1d3add4\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T09:57:48Z\\\",\\\"message\\\":\\\"le observer\\\\nW1128 09:57:46.202169 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1128 09:57:46.202478 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 09:57:46.203984 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2688063589/tls.crt::/tmp/serving-cert-2688063589/tls.key\\\\\\\"\\\\nI1128 09:57:46.517349 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 09:57:46.756626 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 09:57:46.756681 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 09:57:46.756806 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 09:57:46.756825 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 09:57:46.768610 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1128 09:57:46.768654 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1128 09:57:46.768660 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 09:57:46.768697 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 09:57:46.768707 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 09:57:46.768714 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 09:57:46.768746 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 09:57:46.768752 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1128 09:57:46.772242 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:40Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c690f38f717fbfbd681f21f5dc845b94601530b4fc0860fdbeb1317042c793b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:18Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b4d4af6fc5dd97d5b6104ef79b62fd241db5659dfdfd496a38536453b207a4e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b4d4af6fc5dd97d5b6104ef79b62fd241db5659dfdfd496a38536453b207a4e5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:08Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:08Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:08 crc kubenswrapper[4838]: I1128 09:58:08.779960 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/2a223cc8-af33-4e83-8bfc-2676c5700447-metrics-certs\") pod \"network-metrics-daemon-p69l6\" (UID: \"2a223cc8-af33-4e83-8bfc-2676c5700447\") " pod="openshift-multus/network-metrics-daemon-p69l6" Nov 28 09:58:08 crc kubenswrapper[4838]: I1128 09:58:08.780034 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:08Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:08 crc kubenswrapper[4838]: E1128 09:58:08.780135 4838 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 28 09:58:08 crc kubenswrapper[4838]: E1128 09:58:08.780199 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/2a223cc8-af33-4e83-8bfc-2676c5700447-metrics-certs podName:2a223cc8-af33-4e83-8bfc-2676c5700447 nodeName:}" failed. No retries permitted until 2025-11-28 09:58:12.780181437 +0000 UTC m=+64.479155607 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/2a223cc8-af33-4e83-8bfc-2676c5700447-metrics-certs") pod "network-metrics-daemon-p69l6" (UID: "2a223cc8-af33-4e83-8bfc-2676c5700447") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 28 09:58:08 crc kubenswrapper[4838]: I1128 09:58:08.797105 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gmhsj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"41b01f7d-5c75-49de-86f7-87e04bf71194\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9efead1ac3ae101f3a81e0c5568645b6ac107dc126ff57bc55745b86ffacb730\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9efead1ac3ae101f3a81e0c5568645b6ac107dc126ff57bc55745b86ffacb730\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-gmhsj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:08Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:08 crc kubenswrapper[4838]: I1128 09:58:08.808012 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5c3daa53-8c4e-4e30-aeba-146602dd45cd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9dmrk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9dmrk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-5dxdd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:08Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:08 crc kubenswrapper[4838]: I1128 09:58:08.851459 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:08 crc kubenswrapper[4838]: I1128 09:58:08.851506 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:08 crc kubenswrapper[4838]: I1128 09:58:08.851519 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:08 crc kubenswrapper[4838]: I1128 09:58:08.851537 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:08 crc kubenswrapper[4838]: I1128 09:58:08.851551 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:08Z","lastTransitionTime":"2025-11-28T09:58:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:08 crc kubenswrapper[4838]: I1128 09:58:08.937811 4838 generic.go:334] "Generic (PLEG): container finished" podID="3f556bd7-3b15-4d7d-b8e2-4815bb5c9c7d" containerID="f717c0691a541e3a8310cd520a96e92144902511c74835a5ef8ba9536cd65657" exitCode=0 Nov 28 09:58:08 crc kubenswrapper[4838]: I1128 09:58:08.937899 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-58mh7" event={"ID":"3f556bd7-3b15-4d7d-b8e2-4815bb5c9c7d","Type":"ContainerDied","Data":"f717c0691a541e3a8310cd520a96e92144902511c74835a5ef8ba9536cd65657"} Nov 28 09:58:08 crc kubenswrapper[4838]: I1128 09:58:08.940057 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" event={"ID":"5c3daa53-8c4e-4e30-aeba-146602dd45cd","Type":"ContainerStarted","Data":"5374d5a62ca21176826339023247946593cc1a7bdb4fb39596f12bf598790697"} Nov 28 09:58:08 crc kubenswrapper[4838]: I1128 09:58:08.954342 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:08 crc kubenswrapper[4838]: I1128 09:58:08.954419 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:08 crc kubenswrapper[4838]: I1128 09:58:08.954441 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:08 crc kubenswrapper[4838]: I1128 09:58:08.954472 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:08 crc kubenswrapper[4838]: I1128 09:58:08.954492 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:08Z","lastTransitionTime":"2025-11-28T09:58:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:08 crc kubenswrapper[4838]: I1128 09:58:08.958915 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:08Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:08 crc kubenswrapper[4838]: I1128 09:58:08.975370 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4rv9b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"051f7e1c-2d47-4be9-bbd5-14feec16eb16\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f641ae0677a50fbc7b20fac7bb5c567f031af1b6a06fbe9e965091efa4ee4ec3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-szqtp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4rv9b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:08Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:08 crc kubenswrapper[4838]: I1128 09:58:08.988808 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-tnclp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ebace5c6-6ca4-48ff-9c50-c6b769d599b5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://572204db3ac1da6b232430bf06fb87e94638e151ea0edc2f8b111deb7d82c58c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8pxnw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://efa96991cdc29f4f075bfd7751f620c7b091510ea68d577b243811ddd4140cec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8pxnw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:58:03Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-tnclp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:08Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:09 crc kubenswrapper[4838]: I1128 09:58:09.002340 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42be35de-5c72-4cc2-a5e4-fb7872425cdf\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5a39765f9493a3a9454db77d07855624ff5645e9dbd898e6dcb880d7a01a8c42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://426741a23e7b1b9fae6769b581c0d44694f957b0da985476923801395fad082f\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T09:57:46Z\\\",\\\"message\\\":\\\"+ timeout 3m /bin/bash -exuo pipefail -c 'while [ -n \\\\\\\"$(ss -Htanop \\\\\\\\( sport = 10357 \\\\\\\\))\\\\\\\" ]; do sleep 1; done'\\\\n++ ss -Htanop '(' sport = 10357 ')'\\\\n+ '[' -n '' ']'\\\\n+ exec cluster-policy-controller start --config=/etc/kubernetes/static-pod-resources/configmaps/cluster-policy-controller-config/config.yaml --kubeconfig=/etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig --namespace=openshift-kube-controller-manager -v=2\\\\nI1128 09:57:12.554377 1 leaderelection.go:121] The leader election gives 4 retries and allows for 30s of clock skew. The kube-apiserver downtime tolerance is 78s. Worst non-graceful lease acquisition is 2m43s. Worst graceful lease acquisition is {26s}.\\\\nI1128 09:57:12.555793 1 observer_polling.go:159] Starting file observer\\\\nI1128 09:57:12.567187 1 builder.go:298] cluster-policy-controller version 4.18.0-202501230001.p0.g5fd8525.assembly.stream.el9-5fd8525-5fd852525909ce6eab52972ba9ce8fcf56528eb9\\\\nI1128 09:57:12.568976 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.crt::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.key\\\\\\\"\\\\nI1128 09:57:45.000857 1 cmd.go:138] Received SIGTERM or SIGINT signal, shutting down controller.\\\\nF1128 09:57:46.203931 1 cmd.go:179] failed checking apiserver connectivity: client rate limiter Wait returned an error: context deadline exceeded\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:10Z\\\"}},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef54c8995d6bb8e077c6d1c3d796b6f2ce88370b6cadc4f040f590760103320b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fab4ce68cf732b3e6b32f68e84805013d646a9cbd6d5c55ea3d2f41a7f9db83d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2da93de71b5896e3c9ba192df2896b946b1376fefe2a87cf21adb87ea665be04\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:08Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:09Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:09 crc kubenswrapper[4838]: I1128 09:58:09.014403 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1630b1c6-63b5-4481-a711-0485765d37e3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3a04db28528da269759635186b06952f9a3dc4c2b130458354a5bf9ef994db8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://065f0c39a0f1fbdb83a6a758ddd20a4af3ddf96297ce35551b570f5e6c9deb89\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T09:57:35Z\\\",\\\"message\\\":\\\"rpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"192.168.126.11:2379\\\\\\\", ServerName: \\\\\\\"192.168.126.11:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp 192.168.126.11:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:30.193159 13 logging.go:55] [core] [Channel #1 SubChannel #5]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"192.168.126.11:2379\\\\\\\", ServerName: \\\\\\\"192.168.126.11:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp 192.168.126.11:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:30.410371 13 logging.go:55] [core] [Channel #7 SubChannel #8]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"192.168.126.11:2379\\\\\\\", ServerName: \\\\\\\"192.168.126.11:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp 192.168.126.11:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:30.766067 13 logging.go:55] [core] [Channel #7 SubChannel #9]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"localhost:2379\\\\\\\", ServerName: \\\\\\\"localhost:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp [::1]:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:30.792318 13 logging.go:55] [core] [Channel #2 SubChannel #4]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"localhost:2379\\\\\\\", ServerName: \\\\\\\"localhost:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp [::1]:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:34.548830 13 logging.go:55] [core] [Channel #1 SubChannel #6]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"localhost:2379\\\\\\\", ServerName: \\\\\\\"localhost:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: authentication handshake failed: context canceled\\\\\\\"\\\\nE1128 09:57:34.559534 13 run.go:72] \\\\\\\"command failed\\\\\\\" err=\\\\\\\"context deadline exceeded\\\\\\\"\\\\nI1128 09:57:34.572762 1 main.go:235] Termination finished with exit code 1\\\\nI1128 09:57:34.572819 1 main.go:188] Deleting termination lock file \\\\\\\"/var/log/kube-apiserver/.terminating\\\\\\\"\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25d620ea6d7c38547e89d61e7a60f227d28b21f18d563055db47256b266d5b6b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://837cc7d33d601516b4ea56a283f71167b41da7c769070c97ea77f29e97cf1555\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a821e5b105f62cf7f3b12714bfb0f58867a808d1c777f2fb711895c345d8ee9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ad3223a9346861cf1b27af8c95207349f10af6f416380747e32c4faf1d3add4\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T09:57:48Z\\\",\\\"message\\\":\\\"le observer\\\\nW1128 09:57:46.202169 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1128 09:57:46.202478 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 09:57:46.203984 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2688063589/tls.crt::/tmp/serving-cert-2688063589/tls.key\\\\\\\"\\\\nI1128 09:57:46.517349 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 09:57:46.756626 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 09:57:46.756681 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 09:57:46.756806 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 09:57:46.756825 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 09:57:46.768610 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1128 09:57:46.768654 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1128 09:57:46.768660 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 09:57:46.768697 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 09:57:46.768707 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 09:57:46.768714 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 09:57:46.768746 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 09:57:46.768752 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1128 09:57:46.772242 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:40Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c690f38f717fbfbd681f21f5dc845b94601530b4fc0860fdbeb1317042c793b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:18Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b4d4af6fc5dd97d5b6104ef79b62fd241db5659dfdfd496a38536453b207a4e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b4d4af6fc5dd97d5b6104ef79b62fd241db5659dfdfd496a38536453b207a4e5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:08Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:09Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:09 crc kubenswrapper[4838]: I1128 09:58:09.024002 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:09Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:09 crc kubenswrapper[4838]: I1128 09:58:09.045269 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gmhsj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"41b01f7d-5c75-49de-86f7-87e04bf71194\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9efead1ac3ae101f3a81e0c5568645b6ac107dc126ff57bc55745b86ffacb730\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9efead1ac3ae101f3a81e0c5568645b6ac107dc126ff57bc55745b86ffacb730\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-gmhsj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:09Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:09 crc kubenswrapper[4838]: I1128 09:58:09.057117 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:09 crc kubenswrapper[4838]: I1128 09:58:09.057154 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:09 crc kubenswrapper[4838]: I1128 09:58:09.057191 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:09 crc kubenswrapper[4838]: I1128 09:58:09.057211 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:09 crc kubenswrapper[4838]: I1128 09:58:09.057223 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:09Z","lastTransitionTime":"2025-11-28T09:58:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:09 crc kubenswrapper[4838]: I1128 09:58:09.061965 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5c3daa53-8c4e-4e30-aeba-146602dd45cd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9dmrk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9dmrk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-5dxdd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:09Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:09 crc kubenswrapper[4838]: I1128 09:58:09.079833 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"faf44892-fdd2-4b45-8772-20049c555d3b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8f8f5484d008289a9e34ceaffd3cf2582565e7265003b0a6a913fe424760fc65\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://496ac21c6d7e650f191b3bc29ab676bab6ba40727c5ac4d517833ef9a115ae07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://698aacf6e94caf2da7095c89716d63d935ff60d95cb91c9a39dfe9282cbba005\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8352058616fa4ed90ca907e547bad2201d9aa696330f1eb8434c3c3d54d71d89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8352058616fa4ed90ca907e547bad2201d9aa696330f1eb8434c3c3d54d71d89\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:09Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:08Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:09Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:09 crc kubenswrapper[4838]: I1128 09:58:09.097515 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:09Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:09 crc kubenswrapper[4838]: I1128 09:58:09.115196 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:09Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:09 crc kubenswrapper[4838]: I1128 09:58:09.130923 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-tj8hl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cbb3c60a-bf9f-4a62-9310-30898e42be4f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e6fe3f1cbc53f02e2556c5fb44cffcf13330c3240a0ff10a8471080466416bd1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c2jkq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:54Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-tj8hl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:09Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:09 crc kubenswrapper[4838]: I1128 09:58:09.143783 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://05cb23094534a9fdebbe33d8d34a79412ee49437c25e185c6dfa99384cbf9629\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:09Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:09 crc kubenswrapper[4838]: I1128 09:58:09.159747 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3968fb11df6b2265691177838400dcb08e03d330c166dd880b3acfac7ec7938f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e91940e5398321649eac2960a545bb4bbe047113c81f152aa55894cabee55006\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:09Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:09 crc kubenswrapper[4838]: I1128 09:58:09.159926 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:09 crc kubenswrapper[4838]: I1128 09:58:09.159962 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:09 crc kubenswrapper[4838]: I1128 09:58:09.159970 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:09 crc kubenswrapper[4838]: I1128 09:58:09.159985 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:09 crc kubenswrapper[4838]: I1128 09:58:09.159993 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:09Z","lastTransitionTime":"2025-11-28T09:58:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:09 crc kubenswrapper[4838]: I1128 09:58:09.173612 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-58mh7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f556bd7-3b15-4d7d-b8e2-4815bb5c9c7d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with incomplete status: [routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0a0fbfb7a81acf63b4deabef68d55dd843092bce1c00c27c127955417bede44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f0a0fbfb7a81acf63b4deabef68d55dd843092bce1c00c27c127955417bede44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dea8cf8adbb0c946731fdc71c2b0d80ace50b919cf9c776eb70cb6ff36529401\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dea8cf8adbb0c946731fdc71c2b0d80ace50b919cf9c776eb70cb6ff36529401\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f717c0691a541e3a8310cd520a96e92144902511c74835a5ef8ba9536cd65657\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f717c0691a541e3a8310cd520a96e92144902511c74835a5ef8ba9536cd65657\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-58mh7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:09Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:09 crc kubenswrapper[4838]: I1128 09:58:09.184728 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-sft2b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"200cdb91-cc86-40be-a5b6-30f7b9beba6d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lpt6k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-sft2b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:09Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:09 crc kubenswrapper[4838]: I1128 09:58:09.194599 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-p69l6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2a223cc8-af33-4e83-8bfc-2676c5700447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h25xx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h25xx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:58:05Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-p69l6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:09Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:09 crc kubenswrapper[4838]: I1128 09:58:09.262226 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:09 crc kubenswrapper[4838]: I1128 09:58:09.262262 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:09 crc kubenswrapper[4838]: I1128 09:58:09.262270 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:09 crc kubenswrapper[4838]: I1128 09:58:09.262285 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:09 crc kubenswrapper[4838]: I1128 09:58:09.262300 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:09Z","lastTransitionTime":"2025-11-28T09:58:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:09 crc kubenswrapper[4838]: I1128 09:58:09.365825 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:09 crc kubenswrapper[4838]: I1128 09:58:09.366188 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:09 crc kubenswrapper[4838]: I1128 09:58:09.366200 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:09 crc kubenswrapper[4838]: I1128 09:58:09.366215 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:09 crc kubenswrapper[4838]: I1128 09:58:09.366226 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:09Z","lastTransitionTime":"2025-11-28T09:58:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:09 crc kubenswrapper[4838]: I1128 09:58:09.384699 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 09:58:09 crc kubenswrapper[4838]: E1128 09:58:09.384824 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 09:58:25.384802769 +0000 UTC m=+77.083776939 (durationBeforeRetry 16s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 09:58:09 crc kubenswrapper[4838]: I1128 09:58:09.384881 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 09:58:09 crc kubenswrapper[4838]: I1128 09:58:09.384919 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 09:58:09 crc kubenswrapper[4838]: I1128 09:58:09.384939 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 09:58:09 crc kubenswrapper[4838]: I1128 09:58:09.384967 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 09:58:09 crc kubenswrapper[4838]: E1128 09:58:09.385055 4838 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 28 09:58:09 crc kubenswrapper[4838]: E1128 09:58:09.385064 4838 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 28 09:58:09 crc kubenswrapper[4838]: E1128 09:58:09.385086 4838 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 28 09:58:09 crc kubenswrapper[4838]: E1128 09:58:09.385094 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-28 09:58:25.385087426 +0000 UTC m=+77.084061596 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 28 09:58:09 crc kubenswrapper[4838]: E1128 09:58:09.385101 4838 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 09:58:09 crc kubenswrapper[4838]: E1128 09:58:09.385101 4838 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 28 09:58:09 crc kubenswrapper[4838]: E1128 09:58:09.385128 4838 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 28 09:58:09 crc kubenswrapper[4838]: E1128 09:58:09.385137 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-28 09:58:25.385125577 +0000 UTC m=+77.084099747 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 09:58:09 crc kubenswrapper[4838]: E1128 09:58:09.385140 4838 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 09:58:09 crc kubenswrapper[4838]: E1128 09:58:09.385054 4838 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 28 09:58:09 crc kubenswrapper[4838]: E1128 09:58:09.385181 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-28 09:58:25.385172889 +0000 UTC m=+77.084147059 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 28 09:58:09 crc kubenswrapper[4838]: E1128 09:58:09.385196 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-28 09:58:25.385188679 +0000 UTC m=+77.084162859 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 09:58:09 crc kubenswrapper[4838]: I1128 09:58:09.468617 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:09 crc kubenswrapper[4838]: I1128 09:58:09.468650 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:09 crc kubenswrapper[4838]: I1128 09:58:09.468658 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:09 crc kubenswrapper[4838]: I1128 09:58:09.468670 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:09 crc kubenswrapper[4838]: I1128 09:58:09.468679 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:09Z","lastTransitionTime":"2025-11-28T09:58:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:09 crc kubenswrapper[4838]: I1128 09:58:09.562008 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 09:58:09 crc kubenswrapper[4838]: I1128 09:58:09.562151 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 09:58:09 crc kubenswrapper[4838]: I1128 09:58:09.562196 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 09:58:09 crc kubenswrapper[4838]: E1128 09:58:09.562154 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 09:58:09 crc kubenswrapper[4838]: E1128 09:58:09.562259 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 09:58:09 crc kubenswrapper[4838]: E1128 09:58:09.562508 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 09:58:09 crc kubenswrapper[4838]: I1128 09:58:09.571376 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:09 crc kubenswrapper[4838]: I1128 09:58:09.571435 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:09 crc kubenswrapper[4838]: I1128 09:58:09.571462 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:09 crc kubenswrapper[4838]: I1128 09:58:09.571495 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:09 crc kubenswrapper[4838]: I1128 09:58:09.571519 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:09Z","lastTransitionTime":"2025-11-28T09:58:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:09 crc kubenswrapper[4838]: I1128 09:58:09.674657 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:09 crc kubenswrapper[4838]: I1128 09:58:09.674702 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:09 crc kubenswrapper[4838]: I1128 09:58:09.674712 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:09 crc kubenswrapper[4838]: I1128 09:58:09.674751 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:09 crc kubenswrapper[4838]: I1128 09:58:09.674764 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:09Z","lastTransitionTime":"2025-11-28T09:58:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:09 crc kubenswrapper[4838]: I1128 09:58:09.777626 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:09 crc kubenswrapper[4838]: I1128 09:58:09.777694 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:09 crc kubenswrapper[4838]: I1128 09:58:09.777705 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:09 crc kubenswrapper[4838]: I1128 09:58:09.777742 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:09 crc kubenswrapper[4838]: I1128 09:58:09.777755 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:09Z","lastTransitionTime":"2025-11-28T09:58:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:09 crc kubenswrapper[4838]: I1128 09:58:09.880986 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:09 crc kubenswrapper[4838]: I1128 09:58:09.881041 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:09 crc kubenswrapper[4838]: I1128 09:58:09.881055 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:09 crc kubenswrapper[4838]: I1128 09:58:09.881073 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:09 crc kubenswrapper[4838]: I1128 09:58:09.881086 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:09Z","lastTransitionTime":"2025-11-28T09:58:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:09 crc kubenswrapper[4838]: I1128 09:58:09.945572 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" event={"ID":"5c3daa53-8c4e-4e30-aeba-146602dd45cd","Type":"ContainerStarted","Data":"d582c5c17a21e943db9e4af274b6cb31d7551e545039aca20a767b35e2ca5040"} Nov 28 09:58:09 crc kubenswrapper[4838]: I1128 09:58:09.967135 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42be35de-5c72-4cc2-a5e4-fb7872425cdf\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5a39765f9493a3a9454db77d07855624ff5645e9dbd898e6dcb880d7a01a8c42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://426741a23e7b1b9fae6769b581c0d44694f957b0da985476923801395fad082f\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T09:57:46Z\\\",\\\"message\\\":\\\"+ timeout 3m /bin/bash -exuo pipefail -c 'while [ -n \\\\\\\"$(ss -Htanop \\\\\\\\( sport = 10357 \\\\\\\\))\\\\\\\" ]; do sleep 1; done'\\\\n++ ss -Htanop '(' sport = 10357 ')'\\\\n+ '[' -n '' ']'\\\\n+ exec cluster-policy-controller start --config=/etc/kubernetes/static-pod-resources/configmaps/cluster-policy-controller-config/config.yaml --kubeconfig=/etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig --namespace=openshift-kube-controller-manager -v=2\\\\nI1128 09:57:12.554377 1 leaderelection.go:121] The leader election gives 4 retries and allows for 30s of clock skew. The kube-apiserver downtime tolerance is 78s. Worst non-graceful lease acquisition is 2m43s. Worst graceful lease acquisition is {26s}.\\\\nI1128 09:57:12.555793 1 observer_polling.go:159] Starting file observer\\\\nI1128 09:57:12.567187 1 builder.go:298] cluster-policy-controller version 4.18.0-202501230001.p0.g5fd8525.assembly.stream.el9-5fd8525-5fd852525909ce6eab52972ba9ce8fcf56528eb9\\\\nI1128 09:57:12.568976 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.crt::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.key\\\\\\\"\\\\nI1128 09:57:45.000857 1 cmd.go:138] Received SIGTERM or SIGINT signal, shutting down controller.\\\\nF1128 09:57:46.203931 1 cmd.go:179] failed checking apiserver connectivity: client rate limiter Wait returned an error: context deadline exceeded\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:10Z\\\"}},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef54c8995d6bb8e077c6d1c3d796b6f2ce88370b6cadc4f040f590760103320b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fab4ce68cf732b3e6b32f68e84805013d646a9cbd6d5c55ea3d2f41a7f9db83d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2da93de71b5896e3c9ba192df2896b946b1376fefe2a87cf21adb87ea665be04\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:08Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:09Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:09 crc kubenswrapper[4838]: I1128 09:58:09.981480 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1630b1c6-63b5-4481-a711-0485765d37e3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3a04db28528da269759635186b06952f9a3dc4c2b130458354a5bf9ef994db8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://065f0c39a0f1fbdb83a6a758ddd20a4af3ddf96297ce35551b570f5e6c9deb89\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T09:57:35Z\\\",\\\"message\\\":\\\"rpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"192.168.126.11:2379\\\\\\\", ServerName: \\\\\\\"192.168.126.11:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp 192.168.126.11:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:30.193159 13 logging.go:55] [core] [Channel #1 SubChannel #5]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"192.168.126.11:2379\\\\\\\", ServerName: \\\\\\\"192.168.126.11:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp 192.168.126.11:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:30.410371 13 logging.go:55] [core] [Channel #7 SubChannel #8]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"192.168.126.11:2379\\\\\\\", ServerName: \\\\\\\"192.168.126.11:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp 192.168.126.11:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:30.766067 13 logging.go:55] [core] [Channel #7 SubChannel #9]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"localhost:2379\\\\\\\", ServerName: \\\\\\\"localhost:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp [::1]:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:30.792318 13 logging.go:55] [core] [Channel #2 SubChannel #4]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"localhost:2379\\\\\\\", ServerName: \\\\\\\"localhost:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp [::1]:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:34.548830 13 logging.go:55] [core] [Channel #1 SubChannel #6]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"localhost:2379\\\\\\\", ServerName: \\\\\\\"localhost:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: authentication handshake failed: context canceled\\\\\\\"\\\\nE1128 09:57:34.559534 13 run.go:72] \\\\\\\"command failed\\\\\\\" err=\\\\\\\"context deadline exceeded\\\\\\\"\\\\nI1128 09:57:34.572762 1 main.go:235] Termination finished with exit code 1\\\\nI1128 09:57:34.572819 1 main.go:188] Deleting termination lock file \\\\\\\"/var/log/kube-apiserver/.terminating\\\\\\\"\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25d620ea6d7c38547e89d61e7a60f227d28b21f18d563055db47256b266d5b6b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://837cc7d33d601516b4ea56a283f71167b41da7c769070c97ea77f29e97cf1555\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a821e5b105f62cf7f3b12714bfb0f58867a808d1c777f2fb711895c345d8ee9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ad3223a9346861cf1b27af8c95207349f10af6f416380747e32c4faf1d3add4\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T09:57:48Z\\\",\\\"message\\\":\\\"le observer\\\\nW1128 09:57:46.202169 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1128 09:57:46.202478 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 09:57:46.203984 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2688063589/tls.crt::/tmp/serving-cert-2688063589/tls.key\\\\\\\"\\\\nI1128 09:57:46.517349 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 09:57:46.756626 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 09:57:46.756681 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 09:57:46.756806 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 09:57:46.756825 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 09:57:46.768610 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1128 09:57:46.768654 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1128 09:57:46.768660 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 09:57:46.768697 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 09:57:46.768707 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 09:57:46.768714 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 09:57:46.768746 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 09:57:46.768752 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1128 09:57:46.772242 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:40Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c690f38f717fbfbd681f21f5dc845b94601530b4fc0860fdbeb1317042c793b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:18Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b4d4af6fc5dd97d5b6104ef79b62fd241db5659dfdfd496a38536453b207a4e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b4d4af6fc5dd97d5b6104ef79b62fd241db5659dfdfd496a38536453b207a4e5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:08Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:09Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:09 crc kubenswrapper[4838]: I1128 09:58:09.984749 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:09 crc kubenswrapper[4838]: I1128 09:58:09.984992 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:09 crc kubenswrapper[4838]: I1128 09:58:09.985234 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:09 crc kubenswrapper[4838]: I1128 09:58:09.985380 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:09 crc kubenswrapper[4838]: I1128 09:58:09.985532 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:09Z","lastTransitionTime":"2025-11-28T09:58:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:09 crc kubenswrapper[4838]: I1128 09:58:09.995021 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:09Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:10 crc kubenswrapper[4838]: I1128 09:58:10.015519 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gmhsj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"41b01f7d-5c75-49de-86f7-87e04bf71194\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9efead1ac3ae101f3a81e0c5568645b6ac107dc126ff57bc55745b86ffacb730\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9efead1ac3ae101f3a81e0c5568645b6ac107dc126ff57bc55745b86ffacb730\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-gmhsj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:10Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:10 crc kubenswrapper[4838]: I1128 09:58:10.029572 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5c3daa53-8c4e-4e30-aeba-146602dd45cd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d582c5c17a21e943db9e4af274b6cb31d7551e545039aca20a767b35e2ca5040\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9dmrk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5374d5a62ca21176826339023247946593cc1a7bdb4fb39596f12bf598790697\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9dmrk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-5dxdd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:10Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:10 crc kubenswrapper[4838]: I1128 09:58:10.040199 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"faf44892-fdd2-4b45-8772-20049c555d3b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8f8f5484d008289a9e34ceaffd3cf2582565e7265003b0a6a913fe424760fc65\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://496ac21c6d7e650f191b3bc29ab676bab6ba40727c5ac4d517833ef9a115ae07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://698aacf6e94caf2da7095c89716d63d935ff60d95cb91c9a39dfe9282cbba005\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8352058616fa4ed90ca907e547bad2201d9aa696330f1eb8434c3c3d54d71d89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8352058616fa4ed90ca907e547bad2201d9aa696330f1eb8434c3c3d54d71d89\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:09Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:08Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:10Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:10 crc kubenswrapper[4838]: I1128 09:58:10.050070 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:10Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:10 crc kubenswrapper[4838]: I1128 09:58:10.060854 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:10Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:10 crc kubenswrapper[4838]: I1128 09:58:10.070070 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-tj8hl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cbb3c60a-bf9f-4a62-9310-30898e42be4f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e6fe3f1cbc53f02e2556c5fb44cffcf13330c3240a0ff10a8471080466416bd1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c2jkq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:54Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-tj8hl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:10Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:10 crc kubenswrapper[4838]: I1128 09:58:10.083507 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://05cb23094534a9fdebbe33d8d34a79412ee49437c25e185c6dfa99384cbf9629\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:10Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:10 crc kubenswrapper[4838]: I1128 09:58:10.088140 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:10 crc kubenswrapper[4838]: I1128 09:58:10.088313 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:10 crc kubenswrapper[4838]: I1128 09:58:10.088395 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:10 crc kubenswrapper[4838]: I1128 09:58:10.088476 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:10 crc kubenswrapper[4838]: I1128 09:58:10.088572 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:10Z","lastTransitionTime":"2025-11-28T09:58:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:10 crc kubenswrapper[4838]: I1128 09:58:10.095754 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3968fb11df6b2265691177838400dcb08e03d330c166dd880b3acfac7ec7938f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e91940e5398321649eac2960a545bb4bbe047113c81f152aa55894cabee55006\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:10Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:10 crc kubenswrapper[4838]: I1128 09:58:10.108692 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-58mh7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f556bd7-3b15-4d7d-b8e2-4815bb5c9c7d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with incomplete status: [routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0a0fbfb7a81acf63b4deabef68d55dd843092bce1c00c27c127955417bede44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f0a0fbfb7a81acf63b4deabef68d55dd843092bce1c00c27c127955417bede44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dea8cf8adbb0c946731fdc71c2b0d80ace50b919cf9c776eb70cb6ff36529401\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dea8cf8adbb0c946731fdc71c2b0d80ace50b919cf9c776eb70cb6ff36529401\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f717c0691a541e3a8310cd520a96e92144902511c74835a5ef8ba9536cd65657\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f717c0691a541e3a8310cd520a96e92144902511c74835a5ef8ba9536cd65657\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-58mh7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:10Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:10 crc kubenswrapper[4838]: I1128 09:58:10.118920 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-sft2b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"200cdb91-cc86-40be-a5b6-30f7b9beba6d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lpt6k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-sft2b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:10Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:10 crc kubenswrapper[4838]: I1128 09:58:10.128375 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-p69l6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2a223cc8-af33-4e83-8bfc-2676c5700447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h25xx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h25xx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:58:05Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-p69l6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:10Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:10 crc kubenswrapper[4838]: I1128 09:58:10.141604 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:10Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:10 crc kubenswrapper[4838]: I1128 09:58:10.155898 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4rv9b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"051f7e1c-2d47-4be9-bbd5-14feec16eb16\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f641ae0677a50fbc7b20fac7bb5c567f031af1b6a06fbe9e965091efa4ee4ec3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-szqtp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4rv9b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:10Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:10 crc kubenswrapper[4838]: I1128 09:58:10.168774 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-tnclp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ebace5c6-6ca4-48ff-9c50-c6b769d599b5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://572204db3ac1da6b232430bf06fb87e94638e151ea0edc2f8b111deb7d82c58c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8pxnw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://efa96991cdc29f4f075bfd7751f620c7b091510ea68d577b243811ddd4140cec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8pxnw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:58:03Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-tnclp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:10Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:10 crc kubenswrapper[4838]: I1128 09:58:10.190566 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:10 crc kubenswrapper[4838]: I1128 09:58:10.190796 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:10 crc kubenswrapper[4838]: I1128 09:58:10.191022 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:10 crc kubenswrapper[4838]: I1128 09:58:10.191303 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:10 crc kubenswrapper[4838]: I1128 09:58:10.191554 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:10Z","lastTransitionTime":"2025-11-28T09:58:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:10 crc kubenswrapper[4838]: I1128 09:58:10.293912 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:10 crc kubenswrapper[4838]: I1128 09:58:10.293960 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:10 crc kubenswrapper[4838]: I1128 09:58:10.293971 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:10 crc kubenswrapper[4838]: I1128 09:58:10.293990 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:10 crc kubenswrapper[4838]: I1128 09:58:10.293999 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:10Z","lastTransitionTime":"2025-11-28T09:58:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:10 crc kubenswrapper[4838]: I1128 09:58:10.398508 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:10 crc kubenswrapper[4838]: I1128 09:58:10.398693 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:10 crc kubenswrapper[4838]: I1128 09:58:10.398805 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:10 crc kubenswrapper[4838]: I1128 09:58:10.398947 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:10 crc kubenswrapper[4838]: I1128 09:58:10.399018 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:10Z","lastTransitionTime":"2025-11-28T09:58:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:10 crc kubenswrapper[4838]: I1128 09:58:10.502652 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:10 crc kubenswrapper[4838]: I1128 09:58:10.503112 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:10 crc kubenswrapper[4838]: I1128 09:58:10.503132 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:10 crc kubenswrapper[4838]: I1128 09:58:10.503192 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:10 crc kubenswrapper[4838]: I1128 09:58:10.503206 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:10Z","lastTransitionTime":"2025-11-28T09:58:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:10 crc kubenswrapper[4838]: I1128 09:58:10.561589 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-p69l6" Nov 28 09:58:10 crc kubenswrapper[4838]: E1128 09:58:10.562349 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-p69l6" podUID="2a223cc8-af33-4e83-8bfc-2676c5700447" Nov 28 09:58:10 crc kubenswrapper[4838]: I1128 09:58:10.605530 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:10 crc kubenswrapper[4838]: I1128 09:58:10.605567 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:10 crc kubenswrapper[4838]: I1128 09:58:10.605575 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:10 crc kubenswrapper[4838]: I1128 09:58:10.605589 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:10 crc kubenswrapper[4838]: I1128 09:58:10.605601 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:10Z","lastTransitionTime":"2025-11-28T09:58:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:10 crc kubenswrapper[4838]: I1128 09:58:10.707882 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:10 crc kubenswrapper[4838]: I1128 09:58:10.707917 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:10 crc kubenswrapper[4838]: I1128 09:58:10.707926 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:10 crc kubenswrapper[4838]: I1128 09:58:10.707939 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:10 crc kubenswrapper[4838]: I1128 09:58:10.707949 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:10Z","lastTransitionTime":"2025-11-28T09:58:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:10 crc kubenswrapper[4838]: I1128 09:58:10.811464 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:10 crc kubenswrapper[4838]: I1128 09:58:10.811535 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:10 crc kubenswrapper[4838]: I1128 09:58:10.811555 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:10 crc kubenswrapper[4838]: I1128 09:58:10.811582 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:10 crc kubenswrapper[4838]: I1128 09:58:10.811602 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:10Z","lastTransitionTime":"2025-11-28T09:58:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:10 crc kubenswrapper[4838]: I1128 09:58:10.914951 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:10 crc kubenswrapper[4838]: I1128 09:58:10.915000 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:10 crc kubenswrapper[4838]: I1128 09:58:10.915018 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:10 crc kubenswrapper[4838]: I1128 09:58:10.915042 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:10 crc kubenswrapper[4838]: I1128 09:58:10.915064 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:10Z","lastTransitionTime":"2025-11-28T09:58:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:10 crc kubenswrapper[4838]: I1128 09:58:10.957041 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gmhsj" event={"ID":"41b01f7d-5c75-49de-86f7-87e04bf71194","Type":"ContainerStarted","Data":"1ae634151347de87041ca338bcea3a34ee0c2330a3c6630a3e342f62beba0ab8"} Nov 28 09:58:10 crc kubenswrapper[4838]: I1128 09:58:10.961257 4838 generic.go:334] "Generic (PLEG): container finished" podID="3f556bd7-3b15-4d7d-b8e2-4815bb5c9c7d" containerID="b8b67d281ed50a87ac086c5ce0bcc082c30a41d80074b638340326c334fa4f0c" exitCode=0 Nov 28 09:58:10 crc kubenswrapper[4838]: I1128 09:58:10.961344 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-58mh7" event={"ID":"3f556bd7-3b15-4d7d-b8e2-4815bb5c9c7d","Type":"ContainerDied","Data":"b8b67d281ed50a87ac086c5ce0bcc082c30a41d80074b638340326c334fa4f0c"} Nov 28 09:58:10 crc kubenswrapper[4838]: I1128 09:58:10.976613 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://05cb23094534a9fdebbe33d8d34a79412ee49437c25e185c6dfa99384cbf9629\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:10Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:10 crc kubenswrapper[4838]: I1128 09:58:10.992809 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3968fb11df6b2265691177838400dcb08e03d330c166dd880b3acfac7ec7938f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e91940e5398321649eac2960a545bb4bbe047113c81f152aa55894cabee55006\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:10Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:11 crc kubenswrapper[4838]: I1128 09:58:11.016739 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-58mh7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f556bd7-3b15-4d7d-b8e2-4815bb5c9c7d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0a0fbfb7a81acf63b4deabef68d55dd843092bce1c00c27c127955417bede44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f0a0fbfb7a81acf63b4deabef68d55dd843092bce1c00c27c127955417bede44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dea8cf8adbb0c946731fdc71c2b0d80ace50b919cf9c776eb70cb6ff36529401\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dea8cf8adbb0c946731fdc71c2b0d80ace50b919cf9c776eb70cb6ff36529401\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f717c0691a541e3a8310cd520a96e92144902511c74835a5ef8ba9536cd65657\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f717c0691a541e3a8310cd520a96e92144902511c74835a5ef8ba9536cd65657\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b8b67d281ed50a87ac086c5ce0bcc082c30a41d80074b638340326c334fa4f0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b8b67d281ed50a87ac086c5ce0bcc082c30a41d80074b638340326c334fa4f0c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-58mh7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:11Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:11 crc kubenswrapper[4838]: I1128 09:58:11.022493 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:11 crc kubenswrapper[4838]: I1128 09:58:11.022538 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:11 crc kubenswrapper[4838]: I1128 09:58:11.022554 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:11 crc kubenswrapper[4838]: I1128 09:58:11.022574 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:11 crc kubenswrapper[4838]: I1128 09:58:11.022590 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:11Z","lastTransitionTime":"2025-11-28T09:58:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:11 crc kubenswrapper[4838]: I1128 09:58:11.031036 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-sft2b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"200cdb91-cc86-40be-a5b6-30f7b9beba6d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lpt6k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-sft2b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:11Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:11 crc kubenswrapper[4838]: I1128 09:58:11.042421 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-p69l6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2a223cc8-af33-4e83-8bfc-2676c5700447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h25xx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h25xx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:58:05Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-p69l6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:11Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:11 crc kubenswrapper[4838]: I1128 09:58:11.057354 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:11Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:11 crc kubenswrapper[4838]: I1128 09:58:11.069078 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4rv9b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"051f7e1c-2d47-4be9-bbd5-14feec16eb16\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f641ae0677a50fbc7b20fac7bb5c567f031af1b6a06fbe9e965091efa4ee4ec3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-szqtp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4rv9b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:11Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:11 crc kubenswrapper[4838]: I1128 09:58:11.082988 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-tnclp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ebace5c6-6ca4-48ff-9c50-c6b769d599b5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://572204db3ac1da6b232430bf06fb87e94638e151ea0edc2f8b111deb7d82c58c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8pxnw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://efa96991cdc29f4f075bfd7751f620c7b091510ea68d577b243811ddd4140cec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8pxnw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:58:03Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-tnclp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:11Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:11 crc kubenswrapper[4838]: I1128 09:58:11.099300 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42be35de-5c72-4cc2-a5e4-fb7872425cdf\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5a39765f9493a3a9454db77d07855624ff5645e9dbd898e6dcb880d7a01a8c42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://426741a23e7b1b9fae6769b581c0d44694f957b0da985476923801395fad082f\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T09:57:46Z\\\",\\\"message\\\":\\\"+ timeout 3m /bin/bash -exuo pipefail -c 'while [ -n \\\\\\\"$(ss -Htanop \\\\\\\\( sport = 10357 \\\\\\\\))\\\\\\\" ]; do sleep 1; done'\\\\n++ ss -Htanop '(' sport = 10357 ')'\\\\n+ '[' -n '' ']'\\\\n+ exec cluster-policy-controller start --config=/etc/kubernetes/static-pod-resources/configmaps/cluster-policy-controller-config/config.yaml --kubeconfig=/etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig --namespace=openshift-kube-controller-manager -v=2\\\\nI1128 09:57:12.554377 1 leaderelection.go:121] The leader election gives 4 retries and allows for 30s of clock skew. The kube-apiserver downtime tolerance is 78s. Worst non-graceful lease acquisition is 2m43s. Worst graceful lease acquisition is {26s}.\\\\nI1128 09:57:12.555793 1 observer_polling.go:159] Starting file observer\\\\nI1128 09:57:12.567187 1 builder.go:298] cluster-policy-controller version 4.18.0-202501230001.p0.g5fd8525.assembly.stream.el9-5fd8525-5fd852525909ce6eab52972ba9ce8fcf56528eb9\\\\nI1128 09:57:12.568976 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.crt::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.key\\\\\\\"\\\\nI1128 09:57:45.000857 1 cmd.go:138] Received SIGTERM or SIGINT signal, shutting down controller.\\\\nF1128 09:57:46.203931 1 cmd.go:179] failed checking apiserver connectivity: client rate limiter Wait returned an error: context deadline exceeded\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:10Z\\\"}},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef54c8995d6bb8e077c6d1c3d796b6f2ce88370b6cadc4f040f590760103320b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fab4ce68cf732b3e6b32f68e84805013d646a9cbd6d5c55ea3d2f41a7f9db83d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2da93de71b5896e3c9ba192df2896b946b1376fefe2a87cf21adb87ea665be04\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:08Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:11Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:11 crc kubenswrapper[4838]: I1128 09:58:11.118839 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1630b1c6-63b5-4481-a711-0485765d37e3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3a04db28528da269759635186b06952f9a3dc4c2b130458354a5bf9ef994db8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://065f0c39a0f1fbdb83a6a758ddd20a4af3ddf96297ce35551b570f5e6c9deb89\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T09:57:35Z\\\",\\\"message\\\":\\\"rpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"192.168.126.11:2379\\\\\\\", ServerName: \\\\\\\"192.168.126.11:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp 192.168.126.11:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:30.193159 13 logging.go:55] [core] [Channel #1 SubChannel #5]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"192.168.126.11:2379\\\\\\\", ServerName: \\\\\\\"192.168.126.11:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp 192.168.126.11:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:30.410371 13 logging.go:55] [core] [Channel #7 SubChannel #8]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"192.168.126.11:2379\\\\\\\", ServerName: \\\\\\\"192.168.126.11:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp 192.168.126.11:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:30.766067 13 logging.go:55] [core] [Channel #7 SubChannel #9]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"localhost:2379\\\\\\\", ServerName: \\\\\\\"localhost:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp [::1]:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:30.792318 13 logging.go:55] [core] [Channel #2 SubChannel #4]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"localhost:2379\\\\\\\", ServerName: \\\\\\\"localhost:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp [::1]:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:34.548830 13 logging.go:55] [core] [Channel #1 SubChannel #6]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"localhost:2379\\\\\\\", ServerName: \\\\\\\"localhost:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: authentication handshake failed: context canceled\\\\\\\"\\\\nE1128 09:57:34.559534 13 run.go:72] \\\\\\\"command failed\\\\\\\" err=\\\\\\\"context deadline exceeded\\\\\\\"\\\\nI1128 09:57:34.572762 1 main.go:235] Termination finished with exit code 1\\\\nI1128 09:57:34.572819 1 main.go:188] Deleting termination lock file \\\\\\\"/var/log/kube-apiserver/.terminating\\\\\\\"\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25d620ea6d7c38547e89d61e7a60f227d28b21f18d563055db47256b266d5b6b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://837cc7d33d601516b4ea56a283f71167b41da7c769070c97ea77f29e97cf1555\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a821e5b105f62cf7f3b12714bfb0f58867a808d1c777f2fb711895c345d8ee9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ad3223a9346861cf1b27af8c95207349f10af6f416380747e32c4faf1d3add4\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T09:57:48Z\\\",\\\"message\\\":\\\"le observer\\\\nW1128 09:57:46.202169 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1128 09:57:46.202478 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 09:57:46.203984 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2688063589/tls.crt::/tmp/serving-cert-2688063589/tls.key\\\\\\\"\\\\nI1128 09:57:46.517349 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 09:57:46.756626 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 09:57:46.756681 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 09:57:46.756806 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 09:57:46.756825 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 09:57:46.768610 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1128 09:57:46.768654 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1128 09:57:46.768660 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 09:57:46.768697 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 09:57:46.768707 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 09:57:46.768714 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 09:57:46.768746 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 09:57:46.768752 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1128 09:57:46.772242 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:40Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c690f38f717fbfbd681f21f5dc845b94601530b4fc0860fdbeb1317042c793b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:18Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b4d4af6fc5dd97d5b6104ef79b62fd241db5659dfdfd496a38536453b207a4e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b4d4af6fc5dd97d5b6104ef79b62fd241db5659dfdfd496a38536453b207a4e5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:08Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:11Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:11 crc kubenswrapper[4838]: I1128 09:58:11.133187 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:11Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:11 crc kubenswrapper[4838]: I1128 09:58:11.149011 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:11 crc kubenswrapper[4838]: I1128 09:58:11.149050 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:11 crc kubenswrapper[4838]: I1128 09:58:11.149061 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:11 crc kubenswrapper[4838]: I1128 09:58:11.149076 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:11 crc kubenswrapper[4838]: I1128 09:58:11.149087 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:11Z","lastTransitionTime":"2025-11-28T09:58:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:11 crc kubenswrapper[4838]: I1128 09:58:11.156984 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gmhsj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"41b01f7d-5c75-49de-86f7-87e04bf71194\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9efead1ac3ae101f3a81e0c5568645b6ac107dc126ff57bc55745b86ffacb730\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9efead1ac3ae101f3a81e0c5568645b6ac107dc126ff57bc55745b86ffacb730\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-gmhsj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:11Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:11 crc kubenswrapper[4838]: I1128 09:58:11.169834 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5c3daa53-8c4e-4e30-aeba-146602dd45cd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d582c5c17a21e943db9e4af274b6cb31d7551e545039aca20a767b35e2ca5040\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9dmrk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5374d5a62ca21176826339023247946593cc1a7bdb4fb39596f12bf598790697\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9dmrk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-5dxdd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:11Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:11 crc kubenswrapper[4838]: I1128 09:58:11.182503 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"faf44892-fdd2-4b45-8772-20049c555d3b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8f8f5484d008289a9e34ceaffd3cf2582565e7265003b0a6a913fe424760fc65\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://496ac21c6d7e650f191b3bc29ab676bab6ba40727c5ac4d517833ef9a115ae07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://698aacf6e94caf2da7095c89716d63d935ff60d95cb91c9a39dfe9282cbba005\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8352058616fa4ed90ca907e547bad2201d9aa696330f1eb8434c3c3d54d71d89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8352058616fa4ed90ca907e547bad2201d9aa696330f1eb8434c3c3d54d71d89\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:09Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:08Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:11Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:11 crc kubenswrapper[4838]: I1128 09:58:11.194761 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:11Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:11 crc kubenswrapper[4838]: I1128 09:58:11.207317 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:11Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:11 crc kubenswrapper[4838]: I1128 09:58:11.217901 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-tj8hl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cbb3c60a-bf9f-4a62-9310-30898e42be4f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e6fe3f1cbc53f02e2556c5fb44cffcf13330c3240a0ff10a8471080466416bd1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c2jkq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:54Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-tj8hl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:11Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:11 crc kubenswrapper[4838]: I1128 09:58:11.252226 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:11 crc kubenswrapper[4838]: I1128 09:58:11.252286 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:11 crc kubenswrapper[4838]: I1128 09:58:11.252302 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:11 crc kubenswrapper[4838]: I1128 09:58:11.252327 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:11 crc kubenswrapper[4838]: I1128 09:58:11.252344 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:11Z","lastTransitionTime":"2025-11-28T09:58:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:11 crc kubenswrapper[4838]: I1128 09:58:11.356435 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:11 crc kubenswrapper[4838]: I1128 09:58:11.356490 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:11 crc kubenswrapper[4838]: I1128 09:58:11.356504 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:11 crc kubenswrapper[4838]: I1128 09:58:11.356523 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:11 crc kubenswrapper[4838]: I1128 09:58:11.356537 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:11Z","lastTransitionTime":"2025-11-28T09:58:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:11 crc kubenswrapper[4838]: I1128 09:58:11.459946 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:11 crc kubenswrapper[4838]: I1128 09:58:11.460033 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:11 crc kubenswrapper[4838]: I1128 09:58:11.460053 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:11 crc kubenswrapper[4838]: I1128 09:58:11.460086 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:11 crc kubenswrapper[4838]: I1128 09:58:11.460111 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:11Z","lastTransitionTime":"2025-11-28T09:58:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:11 crc kubenswrapper[4838]: I1128 09:58:11.561145 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 09:58:11 crc kubenswrapper[4838]: I1128 09:58:11.561162 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 09:58:11 crc kubenswrapper[4838]: I1128 09:58:11.561319 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 09:58:11 crc kubenswrapper[4838]: E1128 09:58:11.561431 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 09:58:11 crc kubenswrapper[4838]: E1128 09:58:11.561572 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 09:58:11 crc kubenswrapper[4838]: E1128 09:58:11.561671 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 09:58:11 crc kubenswrapper[4838]: I1128 09:58:11.562692 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:11 crc kubenswrapper[4838]: I1128 09:58:11.562765 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:11 crc kubenswrapper[4838]: I1128 09:58:11.562781 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:11 crc kubenswrapper[4838]: I1128 09:58:11.562808 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:11 crc kubenswrapper[4838]: I1128 09:58:11.562824 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:11Z","lastTransitionTime":"2025-11-28T09:58:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:11 crc kubenswrapper[4838]: I1128 09:58:11.665909 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:11 crc kubenswrapper[4838]: I1128 09:58:11.665982 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:11 crc kubenswrapper[4838]: I1128 09:58:11.666001 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:11 crc kubenswrapper[4838]: I1128 09:58:11.666027 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:11 crc kubenswrapper[4838]: I1128 09:58:11.666044 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:11Z","lastTransitionTime":"2025-11-28T09:58:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:11 crc kubenswrapper[4838]: I1128 09:58:11.768469 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:11 crc kubenswrapper[4838]: I1128 09:58:11.768527 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:11 crc kubenswrapper[4838]: I1128 09:58:11.768547 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:11 crc kubenswrapper[4838]: I1128 09:58:11.768572 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:11 crc kubenswrapper[4838]: I1128 09:58:11.768591 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:11Z","lastTransitionTime":"2025-11-28T09:58:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:11 crc kubenswrapper[4838]: I1128 09:58:11.872354 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:11 crc kubenswrapper[4838]: I1128 09:58:11.872408 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:11 crc kubenswrapper[4838]: I1128 09:58:11.872421 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:11 crc kubenswrapper[4838]: I1128 09:58:11.872438 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:11 crc kubenswrapper[4838]: I1128 09:58:11.872451 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:11Z","lastTransitionTime":"2025-11-28T09:58:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:11 crc kubenswrapper[4838]: I1128 09:58:11.966624 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-sft2b" event={"ID":"200cdb91-cc86-40be-a5b6-30f7b9beba6d","Type":"ContainerStarted","Data":"c4f94520c534ecc5a5a92d18b6047c6df131fa7d2a9b8712c021a74c3ffc18ac"} Nov 28 09:58:11 crc kubenswrapper[4838]: I1128 09:58:11.974363 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:11 crc kubenswrapper[4838]: I1128 09:58:11.974419 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:11 crc kubenswrapper[4838]: I1128 09:58:11.974435 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:11 crc kubenswrapper[4838]: I1128 09:58:11.974457 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:11 crc kubenswrapper[4838]: I1128 09:58:11.974470 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:11Z","lastTransitionTime":"2025-11-28T09:58:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:12 crc kubenswrapper[4838]: I1128 09:58:12.077179 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:12 crc kubenswrapper[4838]: I1128 09:58:12.077220 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:12 crc kubenswrapper[4838]: I1128 09:58:12.077231 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:12 crc kubenswrapper[4838]: I1128 09:58:12.077248 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:12 crc kubenswrapper[4838]: I1128 09:58:12.077279 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:12Z","lastTransitionTime":"2025-11-28T09:58:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:12 crc kubenswrapper[4838]: I1128 09:58:12.179153 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:12 crc kubenswrapper[4838]: I1128 09:58:12.179538 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:12 crc kubenswrapper[4838]: I1128 09:58:12.179556 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:12 crc kubenswrapper[4838]: I1128 09:58:12.179581 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:12 crc kubenswrapper[4838]: I1128 09:58:12.179598 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:12Z","lastTransitionTime":"2025-11-28T09:58:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:12 crc kubenswrapper[4838]: I1128 09:58:12.282425 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:12 crc kubenswrapper[4838]: I1128 09:58:12.282458 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:12 crc kubenswrapper[4838]: I1128 09:58:12.282465 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:12 crc kubenswrapper[4838]: I1128 09:58:12.282478 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:12 crc kubenswrapper[4838]: I1128 09:58:12.282487 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:12Z","lastTransitionTime":"2025-11-28T09:58:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:12 crc kubenswrapper[4838]: I1128 09:58:12.385592 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:12 crc kubenswrapper[4838]: I1128 09:58:12.385634 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:12 crc kubenswrapper[4838]: I1128 09:58:12.385647 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:12 crc kubenswrapper[4838]: I1128 09:58:12.385667 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:12 crc kubenswrapper[4838]: I1128 09:58:12.385680 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:12Z","lastTransitionTime":"2025-11-28T09:58:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:12 crc kubenswrapper[4838]: I1128 09:58:12.490090 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:12 crc kubenswrapper[4838]: I1128 09:58:12.490131 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:12 crc kubenswrapper[4838]: I1128 09:58:12.490142 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:12 crc kubenswrapper[4838]: I1128 09:58:12.490158 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:12 crc kubenswrapper[4838]: I1128 09:58:12.490169 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:12Z","lastTransitionTime":"2025-11-28T09:58:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:12 crc kubenswrapper[4838]: I1128 09:58:12.561962 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-p69l6" Nov 28 09:58:12 crc kubenswrapper[4838]: E1128 09:58:12.562085 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-p69l6" podUID="2a223cc8-af33-4e83-8bfc-2676c5700447" Nov 28 09:58:12 crc kubenswrapper[4838]: I1128 09:58:12.592596 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:12 crc kubenswrapper[4838]: I1128 09:58:12.592652 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:12 crc kubenswrapper[4838]: I1128 09:58:12.592663 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:12 crc kubenswrapper[4838]: I1128 09:58:12.592678 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:12 crc kubenswrapper[4838]: I1128 09:58:12.592687 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:12Z","lastTransitionTime":"2025-11-28T09:58:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:12 crc kubenswrapper[4838]: I1128 09:58:12.695210 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:12 crc kubenswrapper[4838]: I1128 09:58:12.695274 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:12 crc kubenswrapper[4838]: I1128 09:58:12.695292 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:12 crc kubenswrapper[4838]: I1128 09:58:12.695318 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:12 crc kubenswrapper[4838]: I1128 09:58:12.695338 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:12Z","lastTransitionTime":"2025-11-28T09:58:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:12 crc kubenswrapper[4838]: I1128 09:58:12.798144 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:12 crc kubenswrapper[4838]: I1128 09:58:12.798211 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:12 crc kubenswrapper[4838]: I1128 09:58:12.798236 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:12 crc kubenswrapper[4838]: I1128 09:58:12.798269 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:12 crc kubenswrapper[4838]: I1128 09:58:12.798303 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:12Z","lastTransitionTime":"2025-11-28T09:58:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:12 crc kubenswrapper[4838]: I1128 09:58:12.822266 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/2a223cc8-af33-4e83-8bfc-2676c5700447-metrics-certs\") pod \"network-metrics-daemon-p69l6\" (UID: \"2a223cc8-af33-4e83-8bfc-2676c5700447\") " pod="openshift-multus/network-metrics-daemon-p69l6" Nov 28 09:58:12 crc kubenswrapper[4838]: E1128 09:58:12.822592 4838 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 28 09:58:12 crc kubenswrapper[4838]: E1128 09:58:12.822761 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/2a223cc8-af33-4e83-8bfc-2676c5700447-metrics-certs podName:2a223cc8-af33-4e83-8bfc-2676c5700447 nodeName:}" failed. No retries permitted until 2025-11-28 09:58:20.822703924 +0000 UTC m=+72.521678174 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/2a223cc8-af33-4e83-8bfc-2676c5700447-metrics-certs") pod "network-metrics-daemon-p69l6" (UID: "2a223cc8-af33-4e83-8bfc-2676c5700447") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 28 09:58:12 crc kubenswrapper[4838]: I1128 09:58:12.905073 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:12 crc kubenswrapper[4838]: I1128 09:58:12.905168 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:12 crc kubenswrapper[4838]: I1128 09:58:12.905189 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:12 crc kubenswrapper[4838]: I1128 09:58:12.905229 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:12 crc kubenswrapper[4838]: I1128 09:58:12.905252 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:12Z","lastTransitionTime":"2025-11-28T09:58:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:12 crc kubenswrapper[4838]: I1128 09:58:12.988233 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-58mh7" event={"ID":"3f556bd7-3b15-4d7d-b8e2-4815bb5c9c7d","Type":"ContainerStarted","Data":"f4ac7e0a6a8340f45399bb4b576bbcd90d4eee54c571e6d64846c521486a2607"} Nov 28 09:58:12 crc kubenswrapper[4838]: I1128 09:58:12.995831 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-gmhsj" Nov 28 09:58:12 crc kubenswrapper[4838]: I1128 09:58:12.995907 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-gmhsj" Nov 28 09:58:12 crc kubenswrapper[4838]: I1128 09:58:12.995996 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-gmhsj" Nov 28 09:58:13 crc kubenswrapper[4838]: I1128 09:58:13.007484 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:13 crc kubenswrapper[4838]: I1128 09:58:13.007518 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:13 crc kubenswrapper[4838]: I1128 09:58:13.007530 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:13 crc kubenswrapper[4838]: I1128 09:58:13.007547 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:13 crc kubenswrapper[4838]: I1128 09:58:13.007559 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:13Z","lastTransitionTime":"2025-11-28T09:58:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:13 crc kubenswrapper[4838]: I1128 09:58:13.017994 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42be35de-5c72-4cc2-a5e4-fb7872425cdf\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5a39765f9493a3a9454db77d07855624ff5645e9dbd898e6dcb880d7a01a8c42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://426741a23e7b1b9fae6769b581c0d44694f957b0da985476923801395fad082f\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T09:57:46Z\\\",\\\"message\\\":\\\"+ timeout 3m /bin/bash -exuo pipefail -c 'while [ -n \\\\\\\"$(ss -Htanop \\\\\\\\( sport = 10357 \\\\\\\\))\\\\\\\" ]; do sleep 1; done'\\\\n++ ss -Htanop '(' sport = 10357 ')'\\\\n+ '[' -n '' ']'\\\\n+ exec cluster-policy-controller start --config=/etc/kubernetes/static-pod-resources/configmaps/cluster-policy-controller-config/config.yaml --kubeconfig=/etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig --namespace=openshift-kube-controller-manager -v=2\\\\nI1128 09:57:12.554377 1 leaderelection.go:121] The leader election gives 4 retries and allows for 30s of clock skew. The kube-apiserver downtime tolerance is 78s. Worst non-graceful lease acquisition is 2m43s. Worst graceful lease acquisition is {26s}.\\\\nI1128 09:57:12.555793 1 observer_polling.go:159] Starting file observer\\\\nI1128 09:57:12.567187 1 builder.go:298] cluster-policy-controller version 4.18.0-202501230001.p0.g5fd8525.assembly.stream.el9-5fd8525-5fd852525909ce6eab52972ba9ce8fcf56528eb9\\\\nI1128 09:57:12.568976 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.crt::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.key\\\\\\\"\\\\nI1128 09:57:45.000857 1 cmd.go:138] Received SIGTERM or SIGINT signal, shutting down controller.\\\\nF1128 09:57:46.203931 1 cmd.go:179] failed checking apiserver connectivity: client rate limiter Wait returned an error: context deadline exceeded\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:10Z\\\"}},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef54c8995d6bb8e077c6d1c3d796b6f2ce88370b6cadc4f040f590760103320b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fab4ce68cf732b3e6b32f68e84805013d646a9cbd6d5c55ea3d2f41a7f9db83d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2da93de71b5896e3c9ba192df2896b946b1376fefe2a87cf21adb87ea665be04\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:08Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:13Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:13 crc kubenswrapper[4838]: I1128 09:58:13.031614 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-gmhsj" Nov 28 09:58:13 crc kubenswrapper[4838]: I1128 09:58:13.036126 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-gmhsj" Nov 28 09:58:13 crc kubenswrapper[4838]: I1128 09:58:13.036153 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1630b1c6-63b5-4481-a711-0485765d37e3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3a04db28528da269759635186b06952f9a3dc4c2b130458354a5bf9ef994db8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://065f0c39a0f1fbdb83a6a758ddd20a4af3ddf96297ce35551b570f5e6c9deb89\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T09:57:35Z\\\",\\\"message\\\":\\\"rpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"192.168.126.11:2379\\\\\\\", ServerName: \\\\\\\"192.168.126.11:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp 192.168.126.11:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:30.193159 13 logging.go:55] [core] [Channel #1 SubChannel #5]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"192.168.126.11:2379\\\\\\\", ServerName: \\\\\\\"192.168.126.11:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp 192.168.126.11:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:30.410371 13 logging.go:55] [core] [Channel #7 SubChannel #8]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"192.168.126.11:2379\\\\\\\", ServerName: \\\\\\\"192.168.126.11:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp 192.168.126.11:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:30.766067 13 logging.go:55] [core] [Channel #7 SubChannel #9]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"localhost:2379\\\\\\\", ServerName: \\\\\\\"localhost:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp [::1]:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:30.792318 13 logging.go:55] [core] [Channel #2 SubChannel #4]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"localhost:2379\\\\\\\", ServerName: \\\\\\\"localhost:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp [::1]:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:34.548830 13 logging.go:55] [core] [Channel #1 SubChannel #6]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"localhost:2379\\\\\\\", ServerName: \\\\\\\"localhost:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: authentication handshake failed: context canceled\\\\\\\"\\\\nE1128 09:57:34.559534 13 run.go:72] \\\\\\\"command failed\\\\\\\" err=\\\\\\\"context deadline exceeded\\\\\\\"\\\\nI1128 09:57:34.572762 1 main.go:235] Termination finished with exit code 1\\\\nI1128 09:57:34.572819 1 main.go:188] Deleting termination lock file \\\\\\\"/var/log/kube-apiserver/.terminating\\\\\\\"\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25d620ea6d7c38547e89d61e7a60f227d28b21f18d563055db47256b266d5b6b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://837cc7d33d601516b4ea56a283f71167b41da7c769070c97ea77f29e97cf1555\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a821e5b105f62cf7f3b12714bfb0f58867a808d1c777f2fb711895c345d8ee9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ad3223a9346861cf1b27af8c95207349f10af6f416380747e32c4faf1d3add4\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T09:57:48Z\\\",\\\"message\\\":\\\"le observer\\\\nW1128 09:57:46.202169 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1128 09:57:46.202478 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 09:57:46.203984 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2688063589/tls.crt::/tmp/serving-cert-2688063589/tls.key\\\\\\\"\\\\nI1128 09:57:46.517349 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 09:57:46.756626 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 09:57:46.756681 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 09:57:46.756806 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 09:57:46.756825 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 09:57:46.768610 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1128 09:57:46.768654 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1128 09:57:46.768660 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 09:57:46.768697 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 09:57:46.768707 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 09:57:46.768714 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 09:57:46.768746 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 09:57:46.768752 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1128 09:57:46.772242 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:40Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c690f38f717fbfbd681f21f5dc845b94601530b4fc0860fdbeb1317042c793b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:18Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b4d4af6fc5dd97d5b6104ef79b62fd241db5659dfdfd496a38536453b207a4e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b4d4af6fc5dd97d5b6104ef79b62fd241db5659dfdfd496a38536453b207a4e5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:08Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:13Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:13 crc kubenswrapper[4838]: I1128 09:58:13.050832 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:13Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:13 crc kubenswrapper[4838]: I1128 09:58:13.070895 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gmhsj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"41b01f7d-5c75-49de-86f7-87e04bf71194\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ccbb0df20c7e22308632a5a2d8837d77579134973f3888355be0fe46b4e59aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://87fcac052cf598fe1999137913f0b5ab8c999dad7d8fb512c3b079fd3b49bdaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b1f5c4da04a97ccc1851e901f9a13763e45ff8c7f6aab25bcbb934541ae4a776\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5781261b70d6722e8d28b91441c4241e1b48e6ff71ae3ec8973bde50180fc146\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3a5c0b96cef205139d500d85f27d2c9230ab23a84ffc78f2587a465a7ff25e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://669de13027827632f353811e082cce7cca326651a9bf4820e66504bb59067d72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://53360b6257759a7c7274680307597ae7ca0e40e9966c24d50262c25b203a5177\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1ae634151347de87041ca338bcea3a34ee0c2330a3c6630a3e342f62beba0ab8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9efead1ac3ae101f3a81e0c5568645b6ac107dc126ff57bc55745b86ffacb730\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9efead1ac3ae101f3a81e0c5568645b6ac107dc126ff57bc55745b86ffacb730\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-gmhsj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:13Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:13 crc kubenswrapper[4838]: I1128 09:58:13.085846 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5c3daa53-8c4e-4e30-aeba-146602dd45cd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d582c5c17a21e943db9e4af274b6cb31d7551e545039aca20a767b35e2ca5040\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9dmrk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5374d5a62ca21176826339023247946593cc1a7bdb4fb39596f12bf598790697\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9dmrk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-5dxdd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:13Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:13 crc kubenswrapper[4838]: I1128 09:58:13.097737 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"faf44892-fdd2-4b45-8772-20049c555d3b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8f8f5484d008289a9e34ceaffd3cf2582565e7265003b0a6a913fe424760fc65\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://496ac21c6d7e650f191b3bc29ab676bab6ba40727c5ac4d517833ef9a115ae07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://698aacf6e94caf2da7095c89716d63d935ff60d95cb91c9a39dfe9282cbba005\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8352058616fa4ed90ca907e547bad2201d9aa696330f1eb8434c3c3d54d71d89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8352058616fa4ed90ca907e547bad2201d9aa696330f1eb8434c3c3d54d71d89\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:09Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:08Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:13Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:13 crc kubenswrapper[4838]: I1128 09:58:13.111726 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:13Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:13 crc kubenswrapper[4838]: I1128 09:58:13.113570 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:13 crc kubenswrapper[4838]: I1128 09:58:13.113601 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:13 crc kubenswrapper[4838]: I1128 09:58:13.113613 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:13 crc kubenswrapper[4838]: I1128 09:58:13.113630 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:13 crc kubenswrapper[4838]: I1128 09:58:13.113642 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:13Z","lastTransitionTime":"2025-11-28T09:58:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:13 crc kubenswrapper[4838]: I1128 09:58:13.129456 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:13Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:13 crc kubenswrapper[4838]: I1128 09:58:13.138880 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-tj8hl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cbb3c60a-bf9f-4a62-9310-30898e42be4f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e6fe3f1cbc53f02e2556c5fb44cffcf13330c3240a0ff10a8471080466416bd1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c2jkq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:54Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-tj8hl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:13Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:13 crc kubenswrapper[4838]: I1128 09:58:13.161750 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://05cb23094534a9fdebbe33d8d34a79412ee49437c25e185c6dfa99384cbf9629\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:13Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:13 crc kubenswrapper[4838]: I1128 09:58:13.175756 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3968fb11df6b2265691177838400dcb08e03d330c166dd880b3acfac7ec7938f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e91940e5398321649eac2960a545bb4bbe047113c81f152aa55894cabee55006\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:13Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:13 crc kubenswrapper[4838]: I1128 09:58:13.188627 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-58mh7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f556bd7-3b15-4d7d-b8e2-4815bb5c9c7d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0a0fbfb7a81acf63b4deabef68d55dd843092bce1c00c27c127955417bede44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f0a0fbfb7a81acf63b4deabef68d55dd843092bce1c00c27c127955417bede44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dea8cf8adbb0c946731fdc71c2b0d80ace50b919cf9c776eb70cb6ff36529401\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dea8cf8adbb0c946731fdc71c2b0d80ace50b919cf9c776eb70cb6ff36529401\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f717c0691a541e3a8310cd520a96e92144902511c74835a5ef8ba9536cd65657\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f717c0691a541e3a8310cd520a96e92144902511c74835a5ef8ba9536cd65657\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b8b67d281ed50a87ac086c5ce0bcc082c30a41d80074b638340326c334fa4f0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b8b67d281ed50a87ac086c5ce0bcc082c30a41d80074b638340326c334fa4f0c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-58mh7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:13Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:13 crc kubenswrapper[4838]: I1128 09:58:13.198953 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-sft2b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"200cdb91-cc86-40be-a5b6-30f7b9beba6d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lpt6k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-sft2b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:13Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:13 crc kubenswrapper[4838]: I1128 09:58:13.209991 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-p69l6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2a223cc8-af33-4e83-8bfc-2676c5700447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h25xx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h25xx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:58:05Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-p69l6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:13Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:13 crc kubenswrapper[4838]: I1128 09:58:13.215999 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:13 crc kubenswrapper[4838]: I1128 09:58:13.216038 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:13 crc kubenswrapper[4838]: I1128 09:58:13.216051 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:13 crc kubenswrapper[4838]: I1128 09:58:13.216068 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:13 crc kubenswrapper[4838]: I1128 09:58:13.216080 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:13Z","lastTransitionTime":"2025-11-28T09:58:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:13 crc kubenswrapper[4838]: I1128 09:58:13.222422 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:13Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:13 crc kubenswrapper[4838]: I1128 09:58:13.234791 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4rv9b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"051f7e1c-2d47-4be9-bbd5-14feec16eb16\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f641ae0677a50fbc7b20fac7bb5c567f031af1b6a06fbe9e965091efa4ee4ec3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-szqtp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4rv9b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:13Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:13 crc kubenswrapper[4838]: I1128 09:58:13.246441 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-tnclp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ebace5c6-6ca4-48ff-9c50-c6b769d599b5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://572204db3ac1da6b232430bf06fb87e94638e151ea0edc2f8b111deb7d82c58c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8pxnw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://efa96991cdc29f4f075bfd7751f620c7b091510ea68d577b243811ddd4140cec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8pxnw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:58:03Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-tnclp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:13Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:13 crc kubenswrapper[4838]: I1128 09:58:13.259910 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"faf44892-fdd2-4b45-8772-20049c555d3b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8f8f5484d008289a9e34ceaffd3cf2582565e7265003b0a6a913fe424760fc65\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://496ac21c6d7e650f191b3bc29ab676bab6ba40727c5ac4d517833ef9a115ae07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://698aacf6e94caf2da7095c89716d63d935ff60d95cb91c9a39dfe9282cbba005\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8352058616fa4ed90ca907e547bad2201d9aa696330f1eb8434c3c3d54d71d89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8352058616fa4ed90ca907e547bad2201d9aa696330f1eb8434c3c3d54d71d89\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:09Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:08Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:13Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:13 crc kubenswrapper[4838]: I1128 09:58:13.272098 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:13Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:13 crc kubenswrapper[4838]: I1128 09:58:13.283418 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:13Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:13 crc kubenswrapper[4838]: I1128 09:58:13.295020 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-tj8hl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cbb3c60a-bf9f-4a62-9310-30898e42be4f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e6fe3f1cbc53f02e2556c5fb44cffcf13330c3240a0ff10a8471080466416bd1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c2jkq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:54Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-tj8hl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:13Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:13 crc kubenswrapper[4838]: I1128 09:58:13.308163 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://05cb23094534a9fdebbe33d8d34a79412ee49437c25e185c6dfa99384cbf9629\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:13Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:13 crc kubenswrapper[4838]: I1128 09:58:13.318335 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:13 crc kubenswrapper[4838]: I1128 09:58:13.318363 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:13 crc kubenswrapper[4838]: I1128 09:58:13.318373 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:13 crc kubenswrapper[4838]: I1128 09:58:13.318389 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:13 crc kubenswrapper[4838]: I1128 09:58:13.318399 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:13Z","lastTransitionTime":"2025-11-28T09:58:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:13 crc kubenswrapper[4838]: I1128 09:58:13.321134 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3968fb11df6b2265691177838400dcb08e03d330c166dd880b3acfac7ec7938f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e91940e5398321649eac2960a545bb4bbe047113c81f152aa55894cabee55006\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:13Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:13 crc kubenswrapper[4838]: I1128 09:58:13.342138 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-58mh7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f556bd7-3b15-4d7d-b8e2-4815bb5c9c7d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0a0fbfb7a81acf63b4deabef68d55dd843092bce1c00c27c127955417bede44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f0a0fbfb7a81acf63b4deabef68d55dd843092bce1c00c27c127955417bede44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dea8cf8adbb0c946731fdc71c2b0d80ace50b919cf9c776eb70cb6ff36529401\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dea8cf8adbb0c946731fdc71c2b0d80ace50b919cf9c776eb70cb6ff36529401\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f717c0691a541e3a8310cd520a96e92144902511c74835a5ef8ba9536cd65657\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f717c0691a541e3a8310cd520a96e92144902511c74835a5ef8ba9536cd65657\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b8b67d281ed50a87ac086c5ce0bcc082c30a41d80074b638340326c334fa4f0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b8b67d281ed50a87ac086c5ce0bcc082c30a41d80074b638340326c334fa4f0c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-58mh7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:13Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:13 crc kubenswrapper[4838]: I1128 09:58:13.353729 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-sft2b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"200cdb91-cc86-40be-a5b6-30f7b9beba6d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c4f94520c534ecc5a5a92d18b6047c6df131fa7d2a9b8712c021a74c3ffc18ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lpt6k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-sft2b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:13Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:13 crc kubenswrapper[4838]: I1128 09:58:13.366278 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-p69l6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2a223cc8-af33-4e83-8bfc-2676c5700447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h25xx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h25xx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:58:05Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-p69l6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:13Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:13 crc kubenswrapper[4838]: I1128 09:58:13.378220 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:13Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:13 crc kubenswrapper[4838]: I1128 09:58:13.390798 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4rv9b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"051f7e1c-2d47-4be9-bbd5-14feec16eb16\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f641ae0677a50fbc7b20fac7bb5c567f031af1b6a06fbe9e965091efa4ee4ec3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-szqtp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4rv9b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:13Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:13 crc kubenswrapper[4838]: I1128 09:58:13.402438 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-tnclp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ebace5c6-6ca4-48ff-9c50-c6b769d599b5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://572204db3ac1da6b232430bf06fb87e94638e151ea0edc2f8b111deb7d82c58c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8pxnw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://efa96991cdc29f4f075bfd7751f620c7b091510ea68d577b243811ddd4140cec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8pxnw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:58:03Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-tnclp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:13Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:13 crc kubenswrapper[4838]: I1128 09:58:13.412772 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42be35de-5c72-4cc2-a5e4-fb7872425cdf\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5a39765f9493a3a9454db77d07855624ff5645e9dbd898e6dcb880d7a01a8c42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://426741a23e7b1b9fae6769b581c0d44694f957b0da985476923801395fad082f\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T09:57:46Z\\\",\\\"message\\\":\\\"+ timeout 3m /bin/bash -exuo pipefail -c 'while [ -n \\\\\\\"$(ss -Htanop \\\\\\\\( sport = 10357 \\\\\\\\))\\\\\\\" ]; do sleep 1; done'\\\\n++ ss -Htanop '(' sport = 10357 ')'\\\\n+ '[' -n '' ']'\\\\n+ exec cluster-policy-controller start --config=/etc/kubernetes/static-pod-resources/configmaps/cluster-policy-controller-config/config.yaml --kubeconfig=/etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig --namespace=openshift-kube-controller-manager -v=2\\\\nI1128 09:57:12.554377 1 leaderelection.go:121] The leader election gives 4 retries and allows for 30s of clock skew. The kube-apiserver downtime tolerance is 78s. Worst non-graceful lease acquisition is 2m43s. Worst graceful lease acquisition is {26s}.\\\\nI1128 09:57:12.555793 1 observer_polling.go:159] Starting file observer\\\\nI1128 09:57:12.567187 1 builder.go:298] cluster-policy-controller version 4.18.0-202501230001.p0.g5fd8525.assembly.stream.el9-5fd8525-5fd852525909ce6eab52972ba9ce8fcf56528eb9\\\\nI1128 09:57:12.568976 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.crt::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.key\\\\\\\"\\\\nI1128 09:57:45.000857 1 cmd.go:138] Received SIGTERM or SIGINT signal, shutting down controller.\\\\nF1128 09:57:46.203931 1 cmd.go:179] failed checking apiserver connectivity: client rate limiter Wait returned an error: context deadline exceeded\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:10Z\\\"}},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef54c8995d6bb8e077c6d1c3d796b6f2ce88370b6cadc4f040f590760103320b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fab4ce68cf732b3e6b32f68e84805013d646a9cbd6d5c55ea3d2f41a7f9db83d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2da93de71b5896e3c9ba192df2896b946b1376fefe2a87cf21adb87ea665be04\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:08Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:13Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:13 crc kubenswrapper[4838]: I1128 09:58:13.420613 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:13 crc kubenswrapper[4838]: I1128 09:58:13.420635 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:13 crc kubenswrapper[4838]: I1128 09:58:13.420643 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:13 crc kubenswrapper[4838]: I1128 09:58:13.420657 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:13 crc kubenswrapper[4838]: I1128 09:58:13.420667 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:13Z","lastTransitionTime":"2025-11-28T09:58:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:13 crc kubenswrapper[4838]: I1128 09:58:13.425362 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1630b1c6-63b5-4481-a711-0485765d37e3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3a04db28528da269759635186b06952f9a3dc4c2b130458354a5bf9ef994db8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://065f0c39a0f1fbdb83a6a758ddd20a4af3ddf96297ce35551b570f5e6c9deb89\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T09:57:35Z\\\",\\\"message\\\":\\\"rpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"192.168.126.11:2379\\\\\\\", ServerName: \\\\\\\"192.168.126.11:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp 192.168.126.11:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:30.193159 13 logging.go:55] [core] [Channel #1 SubChannel #5]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"192.168.126.11:2379\\\\\\\", ServerName: \\\\\\\"192.168.126.11:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp 192.168.126.11:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:30.410371 13 logging.go:55] [core] [Channel #7 SubChannel #8]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"192.168.126.11:2379\\\\\\\", ServerName: \\\\\\\"192.168.126.11:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp 192.168.126.11:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:30.766067 13 logging.go:55] [core] [Channel #7 SubChannel #9]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"localhost:2379\\\\\\\", ServerName: \\\\\\\"localhost:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp [::1]:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:30.792318 13 logging.go:55] [core] [Channel #2 SubChannel #4]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"localhost:2379\\\\\\\", ServerName: \\\\\\\"localhost:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp [::1]:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:34.548830 13 logging.go:55] [core] [Channel #1 SubChannel #6]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"localhost:2379\\\\\\\", ServerName: \\\\\\\"localhost:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: authentication handshake failed: context canceled\\\\\\\"\\\\nE1128 09:57:34.559534 13 run.go:72] \\\\\\\"command failed\\\\\\\" err=\\\\\\\"context deadline exceeded\\\\\\\"\\\\nI1128 09:57:34.572762 1 main.go:235] Termination finished with exit code 1\\\\nI1128 09:57:34.572819 1 main.go:188] Deleting termination lock file \\\\\\\"/var/log/kube-apiserver/.terminating\\\\\\\"\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25d620ea6d7c38547e89d61e7a60f227d28b21f18d563055db47256b266d5b6b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://837cc7d33d601516b4ea56a283f71167b41da7c769070c97ea77f29e97cf1555\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a821e5b105f62cf7f3b12714bfb0f58867a808d1c777f2fb711895c345d8ee9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ad3223a9346861cf1b27af8c95207349f10af6f416380747e32c4faf1d3add4\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T09:57:48Z\\\",\\\"message\\\":\\\"le observer\\\\nW1128 09:57:46.202169 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1128 09:57:46.202478 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 09:57:46.203984 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2688063589/tls.crt::/tmp/serving-cert-2688063589/tls.key\\\\\\\"\\\\nI1128 09:57:46.517349 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 09:57:46.756626 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 09:57:46.756681 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 09:57:46.756806 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 09:57:46.756825 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 09:57:46.768610 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1128 09:57:46.768654 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1128 09:57:46.768660 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 09:57:46.768697 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 09:57:46.768707 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 09:57:46.768714 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 09:57:46.768746 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 09:57:46.768752 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1128 09:57:46.772242 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:40Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c690f38f717fbfbd681f21f5dc845b94601530b4fc0860fdbeb1317042c793b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:18Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b4d4af6fc5dd97d5b6104ef79b62fd241db5659dfdfd496a38536453b207a4e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b4d4af6fc5dd97d5b6104ef79b62fd241db5659dfdfd496a38536453b207a4e5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:08Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:13Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:13 crc kubenswrapper[4838]: I1128 09:58:13.435564 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:13Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:13 crc kubenswrapper[4838]: I1128 09:58:13.451330 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gmhsj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"41b01f7d-5c75-49de-86f7-87e04bf71194\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ccbb0df20c7e22308632a5a2d8837d77579134973f3888355be0fe46b4e59aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://87fcac052cf598fe1999137913f0b5ab8c999dad7d8fb512c3b079fd3b49bdaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b1f5c4da04a97ccc1851e901f9a13763e45ff8c7f6aab25bcbb934541ae4a776\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5781261b70d6722e8d28b91441c4241e1b48e6ff71ae3ec8973bde50180fc146\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3a5c0b96cef205139d500d85f27d2c9230ab23a84ffc78f2587a465a7ff25e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://669de13027827632f353811e082cce7cca326651a9bf4820e66504bb59067d72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://53360b6257759a7c7274680307597ae7ca0e40e9966c24d50262c25b203a5177\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1ae634151347de87041ca338bcea3a34ee0c2330a3c6630a3e342f62beba0ab8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9efead1ac3ae101f3a81e0c5568645b6ac107dc126ff57bc55745b86ffacb730\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9efead1ac3ae101f3a81e0c5568645b6ac107dc126ff57bc55745b86ffacb730\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-gmhsj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:13Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:13 crc kubenswrapper[4838]: I1128 09:58:13.463892 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5c3daa53-8c4e-4e30-aeba-146602dd45cd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d582c5c17a21e943db9e4af274b6cb31d7551e545039aca20a767b35e2ca5040\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9dmrk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5374d5a62ca21176826339023247946593cc1a7bdb4fb39596f12bf598790697\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9dmrk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-5dxdd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:13Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:13 crc kubenswrapper[4838]: I1128 09:58:13.523058 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:13 crc kubenswrapper[4838]: I1128 09:58:13.523107 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:13 crc kubenswrapper[4838]: I1128 09:58:13.523119 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:13 crc kubenswrapper[4838]: I1128 09:58:13.523138 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:13 crc kubenswrapper[4838]: I1128 09:58:13.523152 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:13Z","lastTransitionTime":"2025-11-28T09:58:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:13 crc kubenswrapper[4838]: I1128 09:58:13.561362 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 09:58:13 crc kubenswrapper[4838]: I1128 09:58:13.561377 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 09:58:13 crc kubenswrapper[4838]: E1128 09:58:13.561554 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 09:58:13 crc kubenswrapper[4838]: I1128 09:58:13.561512 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 09:58:13 crc kubenswrapper[4838]: E1128 09:58:13.561619 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 09:58:13 crc kubenswrapper[4838]: E1128 09:58:13.561859 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 09:58:13 crc kubenswrapper[4838]: I1128 09:58:13.625775 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:13 crc kubenswrapper[4838]: I1128 09:58:13.625822 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:13 crc kubenswrapper[4838]: I1128 09:58:13.625834 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:13 crc kubenswrapper[4838]: I1128 09:58:13.625856 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:13 crc kubenswrapper[4838]: I1128 09:58:13.625870 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:13Z","lastTransitionTime":"2025-11-28T09:58:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:13 crc kubenswrapper[4838]: I1128 09:58:13.737061 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:13 crc kubenswrapper[4838]: I1128 09:58:13.737639 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:13 crc kubenswrapper[4838]: I1128 09:58:13.737683 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:13 crc kubenswrapper[4838]: I1128 09:58:13.737881 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:13 crc kubenswrapper[4838]: I1128 09:58:13.737934 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:13Z","lastTransitionTime":"2025-11-28T09:58:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:13 crc kubenswrapper[4838]: I1128 09:58:13.841254 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:13 crc kubenswrapper[4838]: I1128 09:58:13.841308 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:13 crc kubenswrapper[4838]: I1128 09:58:13.841317 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:13 crc kubenswrapper[4838]: I1128 09:58:13.841340 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:13 crc kubenswrapper[4838]: I1128 09:58:13.841354 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:13Z","lastTransitionTime":"2025-11-28T09:58:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:13 crc kubenswrapper[4838]: I1128 09:58:13.943787 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:13 crc kubenswrapper[4838]: I1128 09:58:13.943838 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:13 crc kubenswrapper[4838]: I1128 09:58:13.943853 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:13 crc kubenswrapper[4838]: I1128 09:58:13.943880 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:13 crc kubenswrapper[4838]: I1128 09:58:13.943892 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:13Z","lastTransitionTime":"2025-11-28T09:58:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:14 crc kubenswrapper[4838]: I1128 09:58:14.005923 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"33d18a643d8d8a58731e53993b40c94bb51ff9c35242f768090dc737cac26503"} Nov 28 09:58:14 crc kubenswrapper[4838]: I1128 09:58:14.012091 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gmhsj" event={"ID":"41b01f7d-5c75-49de-86f7-87e04bf71194","Type":"ContainerStarted","Data":"53360b6257759a7c7274680307597ae7ca0e40e9966c24d50262c25b203a5177"} Nov 28 09:58:14 crc kubenswrapper[4838]: I1128 09:58:14.022652 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:14Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:14 crc kubenswrapper[4838]: I1128 09:58:14.036382 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4rv9b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"051f7e1c-2d47-4be9-bbd5-14feec16eb16\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f641ae0677a50fbc7b20fac7bb5c567f031af1b6a06fbe9e965091efa4ee4ec3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-szqtp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4rv9b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:14Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:14 crc kubenswrapper[4838]: I1128 09:58:14.046608 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:14 crc kubenswrapper[4838]: I1128 09:58:14.046645 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:14 crc kubenswrapper[4838]: I1128 09:58:14.046657 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:14 crc kubenswrapper[4838]: I1128 09:58:14.046678 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:14 crc kubenswrapper[4838]: I1128 09:58:14.046691 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:14Z","lastTransitionTime":"2025-11-28T09:58:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:14 crc kubenswrapper[4838]: I1128 09:58:14.047408 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-tnclp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ebace5c6-6ca4-48ff-9c50-c6b769d599b5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://572204db3ac1da6b232430bf06fb87e94638e151ea0edc2f8b111deb7d82c58c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8pxnw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://efa96991cdc29f4f075bfd7751f620c7b091510ea68d577b243811ddd4140cec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8pxnw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:58:03Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-tnclp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:14Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:14 crc kubenswrapper[4838]: I1128 09:58:14.059042 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42be35de-5c72-4cc2-a5e4-fb7872425cdf\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5a39765f9493a3a9454db77d07855624ff5645e9dbd898e6dcb880d7a01a8c42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://426741a23e7b1b9fae6769b581c0d44694f957b0da985476923801395fad082f\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T09:57:46Z\\\",\\\"message\\\":\\\"+ timeout 3m /bin/bash -exuo pipefail -c 'while [ -n \\\\\\\"$(ss -Htanop \\\\\\\\( sport = 10357 \\\\\\\\))\\\\\\\" ]; do sleep 1; done'\\\\n++ ss -Htanop '(' sport = 10357 ')'\\\\n+ '[' -n '' ']'\\\\n+ exec cluster-policy-controller start --config=/etc/kubernetes/static-pod-resources/configmaps/cluster-policy-controller-config/config.yaml --kubeconfig=/etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig --namespace=openshift-kube-controller-manager -v=2\\\\nI1128 09:57:12.554377 1 leaderelection.go:121] The leader election gives 4 retries and allows for 30s of clock skew. The kube-apiserver downtime tolerance is 78s. Worst non-graceful lease acquisition is 2m43s. Worst graceful lease acquisition is {26s}.\\\\nI1128 09:57:12.555793 1 observer_polling.go:159] Starting file observer\\\\nI1128 09:57:12.567187 1 builder.go:298] cluster-policy-controller version 4.18.0-202501230001.p0.g5fd8525.assembly.stream.el9-5fd8525-5fd852525909ce6eab52972ba9ce8fcf56528eb9\\\\nI1128 09:57:12.568976 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.crt::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.key\\\\\\\"\\\\nI1128 09:57:45.000857 1 cmd.go:138] Received SIGTERM or SIGINT signal, shutting down controller.\\\\nF1128 09:57:46.203931 1 cmd.go:179] failed checking apiserver connectivity: client rate limiter Wait returned an error: context deadline exceeded\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:10Z\\\"}},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef54c8995d6bb8e077c6d1c3d796b6f2ce88370b6cadc4f040f590760103320b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fab4ce68cf732b3e6b32f68e84805013d646a9cbd6d5c55ea3d2f41a7f9db83d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2da93de71b5896e3c9ba192df2896b946b1376fefe2a87cf21adb87ea665be04\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:08Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:14Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:14 crc kubenswrapper[4838]: I1128 09:58:14.070038 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1630b1c6-63b5-4481-a711-0485765d37e3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3a04db28528da269759635186b06952f9a3dc4c2b130458354a5bf9ef994db8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://065f0c39a0f1fbdb83a6a758ddd20a4af3ddf96297ce35551b570f5e6c9deb89\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T09:57:35Z\\\",\\\"message\\\":\\\"rpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"192.168.126.11:2379\\\\\\\", ServerName: \\\\\\\"192.168.126.11:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp 192.168.126.11:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:30.193159 13 logging.go:55] [core] [Channel #1 SubChannel #5]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"192.168.126.11:2379\\\\\\\", ServerName: \\\\\\\"192.168.126.11:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp 192.168.126.11:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:30.410371 13 logging.go:55] [core] [Channel #7 SubChannel #8]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"192.168.126.11:2379\\\\\\\", ServerName: \\\\\\\"192.168.126.11:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp 192.168.126.11:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:30.766067 13 logging.go:55] [core] [Channel #7 SubChannel #9]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"localhost:2379\\\\\\\", ServerName: \\\\\\\"localhost:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp [::1]:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:30.792318 13 logging.go:55] [core] [Channel #2 SubChannel #4]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"localhost:2379\\\\\\\", ServerName: \\\\\\\"localhost:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp [::1]:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:34.548830 13 logging.go:55] [core] [Channel #1 SubChannel #6]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"localhost:2379\\\\\\\", ServerName: \\\\\\\"localhost:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: authentication handshake failed: context canceled\\\\\\\"\\\\nE1128 09:57:34.559534 13 run.go:72] \\\\\\\"command failed\\\\\\\" err=\\\\\\\"context deadline exceeded\\\\\\\"\\\\nI1128 09:57:34.572762 1 main.go:235] Termination finished with exit code 1\\\\nI1128 09:57:34.572819 1 main.go:188] Deleting termination lock file \\\\\\\"/var/log/kube-apiserver/.terminating\\\\\\\"\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25d620ea6d7c38547e89d61e7a60f227d28b21f18d563055db47256b266d5b6b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://837cc7d33d601516b4ea56a283f71167b41da7c769070c97ea77f29e97cf1555\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a821e5b105f62cf7f3b12714bfb0f58867a808d1c777f2fb711895c345d8ee9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ad3223a9346861cf1b27af8c95207349f10af6f416380747e32c4faf1d3add4\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T09:57:48Z\\\",\\\"message\\\":\\\"le observer\\\\nW1128 09:57:46.202169 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1128 09:57:46.202478 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 09:57:46.203984 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2688063589/tls.crt::/tmp/serving-cert-2688063589/tls.key\\\\\\\"\\\\nI1128 09:57:46.517349 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 09:57:46.756626 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 09:57:46.756681 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 09:57:46.756806 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 09:57:46.756825 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 09:57:46.768610 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1128 09:57:46.768654 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1128 09:57:46.768660 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 09:57:46.768697 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 09:57:46.768707 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 09:57:46.768714 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 09:57:46.768746 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 09:57:46.768752 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1128 09:57:46.772242 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:40Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c690f38f717fbfbd681f21f5dc845b94601530b4fc0860fdbeb1317042c793b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:18Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b4d4af6fc5dd97d5b6104ef79b62fd241db5659dfdfd496a38536453b207a4e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b4d4af6fc5dd97d5b6104ef79b62fd241db5659dfdfd496a38536453b207a4e5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:08Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:14Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:14 crc kubenswrapper[4838]: I1128 09:58:14.083279 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33d18a643d8d8a58731e53993b40c94bb51ff9c35242f768090dc737cac26503\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:14Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:14 crc kubenswrapper[4838]: I1128 09:58:14.100091 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gmhsj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"41b01f7d-5c75-49de-86f7-87e04bf71194\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ccbb0df20c7e22308632a5a2d8837d77579134973f3888355be0fe46b4e59aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://87fcac052cf598fe1999137913f0b5ab8c999dad7d8fb512c3b079fd3b49bdaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b1f5c4da04a97ccc1851e901f9a13763e45ff8c7f6aab25bcbb934541ae4a776\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5781261b70d6722e8d28b91441c4241e1b48e6ff71ae3ec8973bde50180fc146\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3a5c0b96cef205139d500d85f27d2c9230ab23a84ffc78f2587a465a7ff25e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://669de13027827632f353811e082cce7cca326651a9bf4820e66504bb59067d72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://53360b6257759a7c7274680307597ae7ca0e40e9966c24d50262c25b203a5177\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1ae634151347de87041ca338bcea3a34ee0c2330a3c6630a3e342f62beba0ab8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9efead1ac3ae101f3a81e0c5568645b6ac107dc126ff57bc55745b86ffacb730\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9efead1ac3ae101f3a81e0c5568645b6ac107dc126ff57bc55745b86ffacb730\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-gmhsj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:14Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:14 crc kubenswrapper[4838]: I1128 09:58:14.114684 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5c3daa53-8c4e-4e30-aeba-146602dd45cd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d582c5c17a21e943db9e4af274b6cb31d7551e545039aca20a767b35e2ca5040\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9dmrk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5374d5a62ca21176826339023247946593cc1a7bdb4fb39596f12bf598790697\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9dmrk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-5dxdd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:14Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:14 crc kubenswrapper[4838]: I1128 09:58:14.134168 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"faf44892-fdd2-4b45-8772-20049c555d3b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8f8f5484d008289a9e34ceaffd3cf2582565e7265003b0a6a913fe424760fc65\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://496ac21c6d7e650f191b3bc29ab676bab6ba40727c5ac4d517833ef9a115ae07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://698aacf6e94caf2da7095c89716d63d935ff60d95cb91c9a39dfe9282cbba005\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8352058616fa4ed90ca907e547bad2201d9aa696330f1eb8434c3c3d54d71d89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8352058616fa4ed90ca907e547bad2201d9aa696330f1eb8434c3c3d54d71d89\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:09Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:08Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:14Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:14 crc kubenswrapper[4838]: I1128 09:58:14.148963 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:14 crc kubenswrapper[4838]: I1128 09:58:14.149012 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:14 crc kubenswrapper[4838]: I1128 09:58:14.149029 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:14 crc kubenswrapper[4838]: I1128 09:58:14.149052 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:14 crc kubenswrapper[4838]: I1128 09:58:14.149071 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:14Z","lastTransitionTime":"2025-11-28T09:58:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:14 crc kubenswrapper[4838]: I1128 09:58:14.154333 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:14Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:14 crc kubenswrapper[4838]: I1128 09:58:14.169315 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:14Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:14 crc kubenswrapper[4838]: I1128 09:58:14.181225 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-tj8hl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cbb3c60a-bf9f-4a62-9310-30898e42be4f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e6fe3f1cbc53f02e2556c5fb44cffcf13330c3240a0ff10a8471080466416bd1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c2jkq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:54Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-tj8hl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:14Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:14 crc kubenswrapper[4838]: I1128 09:58:14.206389 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://05cb23094534a9fdebbe33d8d34a79412ee49437c25e185c6dfa99384cbf9629\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:14Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:14 crc kubenswrapper[4838]: I1128 09:58:14.226776 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3968fb11df6b2265691177838400dcb08e03d330c166dd880b3acfac7ec7938f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e91940e5398321649eac2960a545bb4bbe047113c81f152aa55894cabee55006\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:14Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:14 crc kubenswrapper[4838]: I1128 09:58:14.251356 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-58mh7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f556bd7-3b15-4d7d-b8e2-4815bb5c9c7d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0a0fbfb7a81acf63b4deabef68d55dd843092bce1c00c27c127955417bede44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f0a0fbfb7a81acf63b4deabef68d55dd843092bce1c00c27c127955417bede44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dea8cf8adbb0c946731fdc71c2b0d80ace50b919cf9c776eb70cb6ff36529401\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dea8cf8adbb0c946731fdc71c2b0d80ace50b919cf9c776eb70cb6ff36529401\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f717c0691a541e3a8310cd520a96e92144902511c74835a5ef8ba9536cd65657\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f717c0691a541e3a8310cd520a96e92144902511c74835a5ef8ba9536cd65657\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b8b67d281ed50a87ac086c5ce0bcc082c30a41d80074b638340326c334fa4f0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b8b67d281ed50a87ac086c5ce0bcc082c30a41d80074b638340326c334fa4f0c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-58mh7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:14Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:14 crc kubenswrapper[4838]: I1128 09:58:14.251493 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:14 crc kubenswrapper[4838]: I1128 09:58:14.251543 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:14 crc kubenswrapper[4838]: I1128 09:58:14.251561 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:14 crc kubenswrapper[4838]: I1128 09:58:14.251585 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:14 crc kubenswrapper[4838]: I1128 09:58:14.251602 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:14Z","lastTransitionTime":"2025-11-28T09:58:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:14 crc kubenswrapper[4838]: I1128 09:58:14.265276 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-sft2b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"200cdb91-cc86-40be-a5b6-30f7b9beba6d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c4f94520c534ecc5a5a92d18b6047c6df131fa7d2a9b8712c021a74c3ffc18ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lpt6k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-sft2b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:14Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:14 crc kubenswrapper[4838]: I1128 09:58:14.283026 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-p69l6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2a223cc8-af33-4e83-8bfc-2676c5700447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h25xx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h25xx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:58:05Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-p69l6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:14Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:14 crc kubenswrapper[4838]: I1128 09:58:14.302098 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:14Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:14 crc kubenswrapper[4838]: I1128 09:58:14.320667 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4rv9b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"051f7e1c-2d47-4be9-bbd5-14feec16eb16\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f641ae0677a50fbc7b20fac7bb5c567f031af1b6a06fbe9e965091efa4ee4ec3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-szqtp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4rv9b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:14Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:14 crc kubenswrapper[4838]: I1128 09:58:14.337006 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-tnclp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ebace5c6-6ca4-48ff-9c50-c6b769d599b5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://572204db3ac1da6b232430bf06fb87e94638e151ea0edc2f8b111deb7d82c58c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8pxnw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://efa96991cdc29f4f075bfd7751f620c7b091510ea68d577b243811ddd4140cec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8pxnw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:58:03Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-tnclp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:14Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:14 crc kubenswrapper[4838]: I1128 09:58:14.351665 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42be35de-5c72-4cc2-a5e4-fb7872425cdf\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5a39765f9493a3a9454db77d07855624ff5645e9dbd898e6dcb880d7a01a8c42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://426741a23e7b1b9fae6769b581c0d44694f957b0da985476923801395fad082f\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T09:57:46Z\\\",\\\"message\\\":\\\"+ timeout 3m /bin/bash -exuo pipefail -c 'while [ -n \\\\\\\"$(ss -Htanop \\\\\\\\( sport = 10357 \\\\\\\\))\\\\\\\" ]; do sleep 1; done'\\\\n++ ss -Htanop '(' sport = 10357 ')'\\\\n+ '[' -n '' ']'\\\\n+ exec cluster-policy-controller start --config=/etc/kubernetes/static-pod-resources/configmaps/cluster-policy-controller-config/config.yaml --kubeconfig=/etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig --namespace=openshift-kube-controller-manager -v=2\\\\nI1128 09:57:12.554377 1 leaderelection.go:121] The leader election gives 4 retries and allows for 30s of clock skew. The kube-apiserver downtime tolerance is 78s. Worst non-graceful lease acquisition is 2m43s. Worst graceful lease acquisition is {26s}.\\\\nI1128 09:57:12.555793 1 observer_polling.go:159] Starting file observer\\\\nI1128 09:57:12.567187 1 builder.go:298] cluster-policy-controller version 4.18.0-202501230001.p0.g5fd8525.assembly.stream.el9-5fd8525-5fd852525909ce6eab52972ba9ce8fcf56528eb9\\\\nI1128 09:57:12.568976 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.crt::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.key\\\\\\\"\\\\nI1128 09:57:45.000857 1 cmd.go:138] Received SIGTERM or SIGINT signal, shutting down controller.\\\\nF1128 09:57:46.203931 1 cmd.go:179] failed checking apiserver connectivity: client rate limiter Wait returned an error: context deadline exceeded\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:10Z\\\"}},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef54c8995d6bb8e077c6d1c3d796b6f2ce88370b6cadc4f040f590760103320b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fab4ce68cf732b3e6b32f68e84805013d646a9cbd6d5c55ea3d2f41a7f9db83d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2da93de71b5896e3c9ba192df2896b946b1376fefe2a87cf21adb87ea665be04\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:08Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:14Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:14 crc kubenswrapper[4838]: I1128 09:58:14.354667 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:14 crc kubenswrapper[4838]: I1128 09:58:14.354738 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:14 crc kubenswrapper[4838]: I1128 09:58:14.354749 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:14 crc kubenswrapper[4838]: I1128 09:58:14.354766 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:14 crc kubenswrapper[4838]: I1128 09:58:14.354776 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:14Z","lastTransitionTime":"2025-11-28T09:58:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:14 crc kubenswrapper[4838]: I1128 09:58:14.370831 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1630b1c6-63b5-4481-a711-0485765d37e3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3a04db28528da269759635186b06952f9a3dc4c2b130458354a5bf9ef994db8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://065f0c39a0f1fbdb83a6a758ddd20a4af3ddf96297ce35551b570f5e6c9deb89\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T09:57:35Z\\\",\\\"message\\\":\\\"rpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"192.168.126.11:2379\\\\\\\", ServerName: \\\\\\\"192.168.126.11:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp 192.168.126.11:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:30.193159 13 logging.go:55] [core] [Channel #1 SubChannel #5]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"192.168.126.11:2379\\\\\\\", ServerName: \\\\\\\"192.168.126.11:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp 192.168.126.11:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:30.410371 13 logging.go:55] [core] [Channel #7 SubChannel #8]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"192.168.126.11:2379\\\\\\\", ServerName: \\\\\\\"192.168.126.11:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp 192.168.126.11:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:30.766067 13 logging.go:55] [core] [Channel #7 SubChannel #9]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"localhost:2379\\\\\\\", ServerName: \\\\\\\"localhost:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp [::1]:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:30.792318 13 logging.go:55] [core] [Channel #2 SubChannel #4]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"localhost:2379\\\\\\\", ServerName: \\\\\\\"localhost:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp [::1]:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:34.548830 13 logging.go:55] [core] [Channel #1 SubChannel #6]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"localhost:2379\\\\\\\", ServerName: \\\\\\\"localhost:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: authentication handshake failed: context canceled\\\\\\\"\\\\nE1128 09:57:34.559534 13 run.go:72] \\\\\\\"command failed\\\\\\\" err=\\\\\\\"context deadline exceeded\\\\\\\"\\\\nI1128 09:57:34.572762 1 main.go:235] Termination finished with exit code 1\\\\nI1128 09:57:34.572819 1 main.go:188] Deleting termination lock file \\\\\\\"/var/log/kube-apiserver/.terminating\\\\\\\"\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25d620ea6d7c38547e89d61e7a60f227d28b21f18d563055db47256b266d5b6b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://837cc7d33d601516b4ea56a283f71167b41da7c769070c97ea77f29e97cf1555\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a821e5b105f62cf7f3b12714bfb0f58867a808d1c777f2fb711895c345d8ee9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ad3223a9346861cf1b27af8c95207349f10af6f416380747e32c4faf1d3add4\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T09:57:48Z\\\",\\\"message\\\":\\\"le observer\\\\nW1128 09:57:46.202169 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1128 09:57:46.202478 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 09:57:46.203984 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2688063589/tls.crt::/tmp/serving-cert-2688063589/tls.key\\\\\\\"\\\\nI1128 09:57:46.517349 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 09:57:46.756626 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 09:57:46.756681 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 09:57:46.756806 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 09:57:46.756825 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 09:57:46.768610 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1128 09:57:46.768654 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1128 09:57:46.768660 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 09:57:46.768697 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 09:57:46.768707 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 09:57:46.768714 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 09:57:46.768746 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 09:57:46.768752 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1128 09:57:46.772242 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:40Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c690f38f717fbfbd681f21f5dc845b94601530b4fc0860fdbeb1317042c793b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:18Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b4d4af6fc5dd97d5b6104ef79b62fd241db5659dfdfd496a38536453b207a4e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b4d4af6fc5dd97d5b6104ef79b62fd241db5659dfdfd496a38536453b207a4e5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:08Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:14Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:14 crc kubenswrapper[4838]: I1128 09:58:14.389217 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33d18a643d8d8a58731e53993b40c94bb51ff9c35242f768090dc737cac26503\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:14Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:14 crc kubenswrapper[4838]: I1128 09:58:14.414062 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gmhsj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"41b01f7d-5c75-49de-86f7-87e04bf71194\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ccbb0df20c7e22308632a5a2d8837d77579134973f3888355be0fe46b4e59aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://87fcac052cf598fe1999137913f0b5ab8c999dad7d8fb512c3b079fd3b49bdaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b1f5c4da04a97ccc1851e901f9a13763e45ff8c7f6aab25bcbb934541ae4a776\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5781261b70d6722e8d28b91441c4241e1b48e6ff71ae3ec8973bde50180fc146\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3a5c0b96cef205139d500d85f27d2c9230ab23a84ffc78f2587a465a7ff25e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://669de13027827632f353811e082cce7cca326651a9bf4820e66504bb59067d72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://53360b6257759a7c7274680307597ae7ca0e40e9966c24d50262c25b203a5177\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1ae634151347de87041ca338bcea3a34ee0c2330a3c6630a3e342f62beba0ab8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9efead1ac3ae101f3a81e0c5568645b6ac107dc126ff57bc55745b86ffacb730\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9efead1ac3ae101f3a81e0c5568645b6ac107dc126ff57bc55745b86ffacb730\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-gmhsj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:14Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:14 crc kubenswrapper[4838]: I1128 09:58:14.427245 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5c3daa53-8c4e-4e30-aeba-146602dd45cd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d582c5c17a21e943db9e4af274b6cb31d7551e545039aca20a767b35e2ca5040\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9dmrk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5374d5a62ca21176826339023247946593cc1a7bdb4fb39596f12bf598790697\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9dmrk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-5dxdd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:14Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:14 crc kubenswrapper[4838]: I1128 09:58:14.440173 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"faf44892-fdd2-4b45-8772-20049c555d3b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8f8f5484d008289a9e34ceaffd3cf2582565e7265003b0a6a913fe424760fc65\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://496ac21c6d7e650f191b3bc29ab676bab6ba40727c5ac4d517833ef9a115ae07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://698aacf6e94caf2da7095c89716d63d935ff60d95cb91c9a39dfe9282cbba005\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8352058616fa4ed90ca907e547bad2201d9aa696330f1eb8434c3c3d54d71d89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8352058616fa4ed90ca907e547bad2201d9aa696330f1eb8434c3c3d54d71d89\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:09Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:08Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:14Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:14 crc kubenswrapper[4838]: I1128 09:58:14.457685 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:14 crc kubenswrapper[4838]: I1128 09:58:14.457754 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:14 crc kubenswrapper[4838]: I1128 09:58:14.457766 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:14 crc kubenswrapper[4838]: I1128 09:58:14.457793 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:14 crc kubenswrapper[4838]: I1128 09:58:14.457806 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:14Z","lastTransitionTime":"2025-11-28T09:58:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:14 crc kubenswrapper[4838]: I1128 09:58:14.460861 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:14Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:14 crc kubenswrapper[4838]: I1128 09:58:14.476567 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:14Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:14 crc kubenswrapper[4838]: I1128 09:58:14.490410 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-tj8hl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cbb3c60a-bf9f-4a62-9310-30898e42be4f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e6fe3f1cbc53f02e2556c5fb44cffcf13330c3240a0ff10a8471080466416bd1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c2jkq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:54Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-tj8hl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:14Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:14 crc kubenswrapper[4838]: I1128 09:58:14.510159 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://05cb23094534a9fdebbe33d8d34a79412ee49437c25e185c6dfa99384cbf9629\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:14Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:14 crc kubenswrapper[4838]: I1128 09:58:14.538667 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3968fb11df6b2265691177838400dcb08e03d330c166dd880b3acfac7ec7938f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e91940e5398321649eac2960a545bb4bbe047113c81f152aa55894cabee55006\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:14Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:14 crc kubenswrapper[4838]: I1128 09:58:14.560438 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-58mh7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f556bd7-3b15-4d7d-b8e2-4815bb5c9c7d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0a0fbfb7a81acf63b4deabef68d55dd843092bce1c00c27c127955417bede44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f0a0fbfb7a81acf63b4deabef68d55dd843092bce1c00c27c127955417bede44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dea8cf8adbb0c946731fdc71c2b0d80ace50b919cf9c776eb70cb6ff36529401\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dea8cf8adbb0c946731fdc71c2b0d80ace50b919cf9c776eb70cb6ff36529401\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f717c0691a541e3a8310cd520a96e92144902511c74835a5ef8ba9536cd65657\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f717c0691a541e3a8310cd520a96e92144902511c74835a5ef8ba9536cd65657\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b8b67d281ed50a87ac086c5ce0bcc082c30a41d80074b638340326c334fa4f0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b8b67d281ed50a87ac086c5ce0bcc082c30a41d80074b638340326c334fa4f0c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4ac7e0a6a8340f45399bb4b576bbcd90d4eee54c571e6d64846c521486a2607\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-58mh7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:14Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:14 crc kubenswrapper[4838]: I1128 09:58:14.562194 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-p69l6" Nov 28 09:58:14 crc kubenswrapper[4838]: E1128 09:58:14.562350 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-p69l6" podUID="2a223cc8-af33-4e83-8bfc-2676c5700447" Nov 28 09:58:14 crc kubenswrapper[4838]: I1128 09:58:14.563615 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:14 crc kubenswrapper[4838]: I1128 09:58:14.563648 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:14 crc kubenswrapper[4838]: I1128 09:58:14.563658 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:14 crc kubenswrapper[4838]: I1128 09:58:14.563676 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:14 crc kubenswrapper[4838]: I1128 09:58:14.563688 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:14Z","lastTransitionTime":"2025-11-28T09:58:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:14 crc kubenswrapper[4838]: I1128 09:58:14.574537 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-sft2b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"200cdb91-cc86-40be-a5b6-30f7b9beba6d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c4f94520c534ecc5a5a92d18b6047c6df131fa7d2a9b8712c021a74c3ffc18ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lpt6k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-sft2b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:14Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:14 crc kubenswrapper[4838]: I1128 09:58:14.586770 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-p69l6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2a223cc8-af33-4e83-8bfc-2676c5700447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h25xx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h25xx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:58:05Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-p69l6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:14Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:14 crc kubenswrapper[4838]: I1128 09:58:14.666028 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:14 crc kubenswrapper[4838]: I1128 09:58:14.666072 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:14 crc kubenswrapper[4838]: I1128 09:58:14.666081 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:14 crc kubenswrapper[4838]: I1128 09:58:14.666094 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:14 crc kubenswrapper[4838]: I1128 09:58:14.666104 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:14Z","lastTransitionTime":"2025-11-28T09:58:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:14 crc kubenswrapper[4838]: I1128 09:58:14.768517 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:14 crc kubenswrapper[4838]: I1128 09:58:14.768555 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:14 crc kubenswrapper[4838]: I1128 09:58:14.768564 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:14 crc kubenswrapper[4838]: I1128 09:58:14.768579 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:14 crc kubenswrapper[4838]: I1128 09:58:14.768588 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:14Z","lastTransitionTime":"2025-11-28T09:58:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:14 crc kubenswrapper[4838]: I1128 09:58:14.870908 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:14 crc kubenswrapper[4838]: I1128 09:58:14.870962 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:14 crc kubenswrapper[4838]: I1128 09:58:14.870975 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:14 crc kubenswrapper[4838]: I1128 09:58:14.870998 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:14 crc kubenswrapper[4838]: I1128 09:58:14.871013 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:14Z","lastTransitionTime":"2025-11-28T09:58:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:14 crc kubenswrapper[4838]: I1128 09:58:14.973299 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:14 crc kubenswrapper[4838]: I1128 09:58:14.973325 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:14 crc kubenswrapper[4838]: I1128 09:58:14.973334 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:14 crc kubenswrapper[4838]: I1128 09:58:14.973348 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:14 crc kubenswrapper[4838]: I1128 09:58:14.973358 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:14Z","lastTransitionTime":"2025-11-28T09:58:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:15 crc kubenswrapper[4838]: I1128 09:58:15.017579 4838 generic.go:334] "Generic (PLEG): container finished" podID="3f556bd7-3b15-4d7d-b8e2-4815bb5c9c7d" containerID="f4ac7e0a6a8340f45399bb4b576bbcd90d4eee54c571e6d64846c521486a2607" exitCode=0 Nov 28 09:58:15 crc kubenswrapper[4838]: I1128 09:58:15.017612 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-58mh7" event={"ID":"3f556bd7-3b15-4d7d-b8e2-4815bb5c9c7d","Type":"ContainerDied","Data":"f4ac7e0a6a8340f45399bb4b576bbcd90d4eee54c571e6d64846c521486a2607"} Nov 28 09:58:15 crc kubenswrapper[4838]: I1128 09:58:15.028242 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-p69l6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2a223cc8-af33-4e83-8bfc-2676c5700447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h25xx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h25xx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:58:05Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-p69l6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:15Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:15 crc kubenswrapper[4838]: I1128 09:58:15.044835 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://05cb23094534a9fdebbe33d8d34a79412ee49437c25e185c6dfa99384cbf9629\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:15Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:15 crc kubenswrapper[4838]: I1128 09:58:15.058040 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3968fb11df6b2265691177838400dcb08e03d330c166dd880b3acfac7ec7938f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e91940e5398321649eac2960a545bb4bbe047113c81f152aa55894cabee55006\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:15Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:15 crc kubenswrapper[4838]: I1128 09:58:15.099587 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-58mh7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f556bd7-3b15-4d7d-b8e2-4815bb5c9c7d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0a0fbfb7a81acf63b4deabef68d55dd843092bce1c00c27c127955417bede44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f0a0fbfb7a81acf63b4deabef68d55dd843092bce1c00c27c127955417bede44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dea8cf8adbb0c946731fdc71c2b0d80ace50b919cf9c776eb70cb6ff36529401\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dea8cf8adbb0c946731fdc71c2b0d80ace50b919cf9c776eb70cb6ff36529401\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f717c0691a541e3a8310cd520a96e92144902511c74835a5ef8ba9536cd65657\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f717c0691a541e3a8310cd520a96e92144902511c74835a5ef8ba9536cd65657\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b8b67d281ed50a87ac086c5ce0bcc082c30a41d80074b638340326c334fa4f0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b8b67d281ed50a87ac086c5ce0bcc082c30a41d80074b638340326c334fa4f0c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4ac7e0a6a8340f45399bb4b576bbcd90d4eee54c571e6d64846c521486a2607\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f4ac7e0a6a8340f45399bb4b576bbcd90d4eee54c571e6d64846c521486a2607\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-58mh7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:15Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:15 crc kubenswrapper[4838]: I1128 09:58:15.113096 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:15 crc kubenswrapper[4838]: I1128 09:58:15.113298 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:15 crc kubenswrapper[4838]: I1128 09:58:15.113389 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:15 crc kubenswrapper[4838]: I1128 09:58:15.113461 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:15 crc kubenswrapper[4838]: I1128 09:58:15.113520 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:15Z","lastTransitionTime":"2025-11-28T09:58:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:15 crc kubenswrapper[4838]: I1128 09:58:15.134568 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-sft2b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"200cdb91-cc86-40be-a5b6-30f7b9beba6d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c4f94520c534ecc5a5a92d18b6047c6df131fa7d2a9b8712c021a74c3ffc18ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lpt6k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-sft2b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:15Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:15 crc kubenswrapper[4838]: I1128 09:58:15.163997 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:15Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:15 crc kubenswrapper[4838]: I1128 09:58:15.185206 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4rv9b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"051f7e1c-2d47-4be9-bbd5-14feec16eb16\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f641ae0677a50fbc7b20fac7bb5c567f031af1b6a06fbe9e965091efa4ee4ec3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-szqtp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4rv9b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:15Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:15 crc kubenswrapper[4838]: I1128 09:58:15.199214 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-tnclp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ebace5c6-6ca4-48ff-9c50-c6b769d599b5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://572204db3ac1da6b232430bf06fb87e94638e151ea0edc2f8b111deb7d82c58c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8pxnw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://efa96991cdc29f4f075bfd7751f620c7b091510ea68d577b243811ddd4140cec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8pxnw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:58:03Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-tnclp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:15Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:15 crc kubenswrapper[4838]: I1128 09:58:15.212402 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5c3daa53-8c4e-4e30-aeba-146602dd45cd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d582c5c17a21e943db9e4af274b6cb31d7551e545039aca20a767b35e2ca5040\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9dmrk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5374d5a62ca21176826339023247946593cc1a7bdb4fb39596f12bf598790697\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9dmrk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-5dxdd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:15Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:15 crc kubenswrapper[4838]: I1128 09:58:15.218496 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:15 crc kubenswrapper[4838]: I1128 09:58:15.218553 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:15 crc kubenswrapper[4838]: I1128 09:58:15.218563 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:15 crc kubenswrapper[4838]: I1128 09:58:15.218582 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:15 crc kubenswrapper[4838]: I1128 09:58:15.218596 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:15Z","lastTransitionTime":"2025-11-28T09:58:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:15 crc kubenswrapper[4838]: I1128 09:58:15.226109 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42be35de-5c72-4cc2-a5e4-fb7872425cdf\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5a39765f9493a3a9454db77d07855624ff5645e9dbd898e6dcb880d7a01a8c42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://426741a23e7b1b9fae6769b581c0d44694f957b0da985476923801395fad082f\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T09:57:46Z\\\",\\\"message\\\":\\\"+ timeout 3m /bin/bash -exuo pipefail -c 'while [ -n \\\\\\\"$(ss -Htanop \\\\\\\\( sport = 10357 \\\\\\\\))\\\\\\\" ]; do sleep 1; done'\\\\n++ ss -Htanop '(' sport = 10357 ')'\\\\n+ '[' -n '' ']'\\\\n+ exec cluster-policy-controller start --config=/etc/kubernetes/static-pod-resources/configmaps/cluster-policy-controller-config/config.yaml --kubeconfig=/etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig --namespace=openshift-kube-controller-manager -v=2\\\\nI1128 09:57:12.554377 1 leaderelection.go:121] The leader election gives 4 retries and allows for 30s of clock skew. The kube-apiserver downtime tolerance is 78s. Worst non-graceful lease acquisition is 2m43s. Worst graceful lease acquisition is {26s}.\\\\nI1128 09:57:12.555793 1 observer_polling.go:159] Starting file observer\\\\nI1128 09:57:12.567187 1 builder.go:298] cluster-policy-controller version 4.18.0-202501230001.p0.g5fd8525.assembly.stream.el9-5fd8525-5fd852525909ce6eab52972ba9ce8fcf56528eb9\\\\nI1128 09:57:12.568976 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.crt::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.key\\\\\\\"\\\\nI1128 09:57:45.000857 1 cmd.go:138] Received SIGTERM or SIGINT signal, shutting down controller.\\\\nF1128 09:57:46.203931 1 cmd.go:179] failed checking apiserver connectivity: client rate limiter Wait returned an error: context deadline exceeded\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:10Z\\\"}},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef54c8995d6bb8e077c6d1c3d796b6f2ce88370b6cadc4f040f590760103320b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fab4ce68cf732b3e6b32f68e84805013d646a9cbd6d5c55ea3d2f41a7f9db83d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2da93de71b5896e3c9ba192df2896b946b1376fefe2a87cf21adb87ea665be04\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:08Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:15Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:15 crc kubenswrapper[4838]: I1128 09:58:15.243050 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1630b1c6-63b5-4481-a711-0485765d37e3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3a04db28528da269759635186b06952f9a3dc4c2b130458354a5bf9ef994db8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://065f0c39a0f1fbdb83a6a758ddd20a4af3ddf96297ce35551b570f5e6c9deb89\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T09:57:35Z\\\",\\\"message\\\":\\\"rpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"192.168.126.11:2379\\\\\\\", ServerName: \\\\\\\"192.168.126.11:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp 192.168.126.11:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:30.193159 13 logging.go:55] [core] [Channel #1 SubChannel #5]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"192.168.126.11:2379\\\\\\\", ServerName: \\\\\\\"192.168.126.11:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp 192.168.126.11:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:30.410371 13 logging.go:55] [core] [Channel #7 SubChannel #8]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"192.168.126.11:2379\\\\\\\", ServerName: \\\\\\\"192.168.126.11:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp 192.168.126.11:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:30.766067 13 logging.go:55] [core] [Channel #7 SubChannel #9]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"localhost:2379\\\\\\\", ServerName: \\\\\\\"localhost:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp [::1]:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:30.792318 13 logging.go:55] [core] [Channel #2 SubChannel #4]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"localhost:2379\\\\\\\", ServerName: \\\\\\\"localhost:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp [::1]:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:34.548830 13 logging.go:55] [core] [Channel #1 SubChannel #6]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"localhost:2379\\\\\\\", ServerName: \\\\\\\"localhost:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: authentication handshake failed: context canceled\\\\\\\"\\\\nE1128 09:57:34.559534 13 run.go:72] \\\\\\\"command failed\\\\\\\" err=\\\\\\\"context deadline exceeded\\\\\\\"\\\\nI1128 09:57:34.572762 1 main.go:235] Termination finished with exit code 1\\\\nI1128 09:57:34.572819 1 main.go:188] Deleting termination lock file \\\\\\\"/var/log/kube-apiserver/.terminating\\\\\\\"\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25d620ea6d7c38547e89d61e7a60f227d28b21f18d563055db47256b266d5b6b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://837cc7d33d601516b4ea56a283f71167b41da7c769070c97ea77f29e97cf1555\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a821e5b105f62cf7f3b12714bfb0f58867a808d1c777f2fb711895c345d8ee9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ad3223a9346861cf1b27af8c95207349f10af6f416380747e32c4faf1d3add4\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T09:57:48Z\\\",\\\"message\\\":\\\"le observer\\\\nW1128 09:57:46.202169 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1128 09:57:46.202478 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 09:57:46.203984 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2688063589/tls.crt::/tmp/serving-cert-2688063589/tls.key\\\\\\\"\\\\nI1128 09:57:46.517349 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 09:57:46.756626 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 09:57:46.756681 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 09:57:46.756806 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 09:57:46.756825 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 09:57:46.768610 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1128 09:57:46.768654 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1128 09:57:46.768660 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 09:57:46.768697 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 09:57:46.768707 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 09:57:46.768714 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 09:57:46.768746 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 09:57:46.768752 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1128 09:57:46.772242 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:40Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c690f38f717fbfbd681f21f5dc845b94601530b4fc0860fdbeb1317042c793b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:18Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b4d4af6fc5dd97d5b6104ef79b62fd241db5659dfdfd496a38536453b207a4e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b4d4af6fc5dd97d5b6104ef79b62fd241db5659dfdfd496a38536453b207a4e5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:08Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:15Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:15 crc kubenswrapper[4838]: I1128 09:58:15.261141 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33d18a643d8d8a58731e53993b40c94bb51ff9c35242f768090dc737cac26503\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:15Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:15 crc kubenswrapper[4838]: I1128 09:58:15.285170 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gmhsj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"41b01f7d-5c75-49de-86f7-87e04bf71194\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ccbb0df20c7e22308632a5a2d8837d77579134973f3888355be0fe46b4e59aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://87fcac052cf598fe1999137913f0b5ab8c999dad7d8fb512c3b079fd3b49bdaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b1f5c4da04a97ccc1851e901f9a13763e45ff8c7f6aab25bcbb934541ae4a776\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5781261b70d6722e8d28b91441c4241e1b48e6ff71ae3ec8973bde50180fc146\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3a5c0b96cef205139d500d85f27d2c9230ab23a84ffc78f2587a465a7ff25e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://669de13027827632f353811e082cce7cca326651a9bf4820e66504bb59067d72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://53360b6257759a7c7274680307597ae7ca0e40e9966c24d50262c25b203a5177\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1ae634151347de87041ca338bcea3a34ee0c2330a3c6630a3e342f62beba0ab8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9efead1ac3ae101f3a81e0c5568645b6ac107dc126ff57bc55745b86ffacb730\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9efead1ac3ae101f3a81e0c5568645b6ac107dc126ff57bc55745b86ffacb730\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-gmhsj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:15Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:15 crc kubenswrapper[4838]: I1128 09:58:15.299653 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-tj8hl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cbb3c60a-bf9f-4a62-9310-30898e42be4f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e6fe3f1cbc53f02e2556c5fb44cffcf13330c3240a0ff10a8471080466416bd1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c2jkq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:54Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-tj8hl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:15Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:15 crc kubenswrapper[4838]: I1128 09:58:15.313931 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"faf44892-fdd2-4b45-8772-20049c555d3b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8f8f5484d008289a9e34ceaffd3cf2582565e7265003b0a6a913fe424760fc65\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://496ac21c6d7e650f191b3bc29ab676bab6ba40727c5ac4d517833ef9a115ae07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://698aacf6e94caf2da7095c89716d63d935ff60d95cb91c9a39dfe9282cbba005\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8352058616fa4ed90ca907e547bad2201d9aa696330f1eb8434c3c3d54d71d89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8352058616fa4ed90ca907e547bad2201d9aa696330f1eb8434c3c3d54d71d89\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:09Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:08Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:15Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:15 crc kubenswrapper[4838]: I1128 09:58:15.320803 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:15 crc kubenswrapper[4838]: I1128 09:58:15.320844 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:15 crc kubenswrapper[4838]: I1128 09:58:15.320855 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:15 crc kubenswrapper[4838]: I1128 09:58:15.320871 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:15 crc kubenswrapper[4838]: I1128 09:58:15.320884 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:15Z","lastTransitionTime":"2025-11-28T09:58:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:15 crc kubenswrapper[4838]: I1128 09:58:15.328327 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:15Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:15 crc kubenswrapper[4838]: I1128 09:58:15.344009 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:15Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:15 crc kubenswrapper[4838]: I1128 09:58:15.424233 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:15 crc kubenswrapper[4838]: I1128 09:58:15.424318 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:15 crc kubenswrapper[4838]: I1128 09:58:15.424339 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:15 crc kubenswrapper[4838]: I1128 09:58:15.424372 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:15 crc kubenswrapper[4838]: I1128 09:58:15.424395 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:15Z","lastTransitionTime":"2025-11-28T09:58:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:15 crc kubenswrapper[4838]: I1128 09:58:15.527459 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:15 crc kubenswrapper[4838]: I1128 09:58:15.527516 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:15 crc kubenswrapper[4838]: I1128 09:58:15.527528 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:15 crc kubenswrapper[4838]: I1128 09:58:15.527546 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:15 crc kubenswrapper[4838]: I1128 09:58:15.527610 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:15Z","lastTransitionTime":"2025-11-28T09:58:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:15 crc kubenswrapper[4838]: I1128 09:58:15.561809 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 09:58:15 crc kubenswrapper[4838]: E1128 09:58:15.562336 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 09:58:15 crc kubenswrapper[4838]: I1128 09:58:15.562434 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 09:58:15 crc kubenswrapper[4838]: I1128 09:58:15.562465 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 09:58:15 crc kubenswrapper[4838]: E1128 09:58:15.563239 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 09:58:15 crc kubenswrapper[4838]: E1128 09:58:15.563550 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 09:58:15 crc kubenswrapper[4838]: I1128 09:58:15.570024 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/kube-rbac-proxy-crio-crc"] Nov 28 09:58:15 crc kubenswrapper[4838]: I1128 09:58:15.630368 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:15 crc kubenswrapper[4838]: I1128 09:58:15.630430 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:15 crc kubenswrapper[4838]: I1128 09:58:15.630447 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:15 crc kubenswrapper[4838]: I1128 09:58:15.630473 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:15 crc kubenswrapper[4838]: I1128 09:58:15.630490 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:15Z","lastTransitionTime":"2025-11-28T09:58:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:15 crc kubenswrapper[4838]: I1128 09:58:15.733076 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:15 crc kubenswrapper[4838]: I1128 09:58:15.733135 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:15 crc kubenswrapper[4838]: I1128 09:58:15.733145 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:15 crc kubenswrapper[4838]: I1128 09:58:15.733168 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:15 crc kubenswrapper[4838]: I1128 09:58:15.733182 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:15Z","lastTransitionTime":"2025-11-28T09:58:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:15 crc kubenswrapper[4838]: I1128 09:58:15.835089 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:15 crc kubenswrapper[4838]: I1128 09:58:15.835142 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:15 crc kubenswrapper[4838]: I1128 09:58:15.835153 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:15 crc kubenswrapper[4838]: I1128 09:58:15.835176 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:15 crc kubenswrapper[4838]: I1128 09:58:15.835197 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:15Z","lastTransitionTime":"2025-11-28T09:58:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:15 crc kubenswrapper[4838]: I1128 09:58:15.938065 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:15 crc kubenswrapper[4838]: I1128 09:58:15.938114 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:15 crc kubenswrapper[4838]: I1128 09:58:15.938125 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:15 crc kubenswrapper[4838]: I1128 09:58:15.938144 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:15 crc kubenswrapper[4838]: I1128 09:58:15.938158 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:15Z","lastTransitionTime":"2025-11-28T09:58:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:16 crc kubenswrapper[4838]: I1128 09:58:16.024588 4838 generic.go:334] "Generic (PLEG): container finished" podID="3f556bd7-3b15-4d7d-b8e2-4815bb5c9c7d" containerID="0e3e1a16aa714e8c2b82811d71b1845942712cad84d7dd465c02a3c4478419af" exitCode=0 Nov 28 09:58:16 crc kubenswrapper[4838]: I1128 09:58:16.024674 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-58mh7" event={"ID":"3f556bd7-3b15-4d7d-b8e2-4815bb5c9c7d","Type":"ContainerDied","Data":"0e3e1a16aa714e8c2b82811d71b1845942712cad84d7dd465c02a3c4478419af"} Nov 28 09:58:16 crc kubenswrapper[4838]: I1128 09:58:16.040333 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:16 crc kubenswrapper[4838]: I1128 09:58:16.040372 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:16 crc kubenswrapper[4838]: I1128 09:58:16.040393 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:16 crc kubenswrapper[4838]: I1128 09:58:16.040408 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:16 crc kubenswrapper[4838]: I1128 09:58:16.040419 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:16Z","lastTransitionTime":"2025-11-28T09:58:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:16 crc kubenswrapper[4838]: I1128 09:58:16.040503 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:16Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:16 crc kubenswrapper[4838]: I1128 09:58:16.062230 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:16Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:16 crc kubenswrapper[4838]: I1128 09:58:16.072542 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-tj8hl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cbb3c60a-bf9f-4a62-9310-30898e42be4f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e6fe3f1cbc53f02e2556c5fb44cffcf13330c3240a0ff10a8471080466416bd1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c2jkq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:54Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-tj8hl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:16Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:16 crc kubenswrapper[4838]: I1128 09:58:16.083888 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"faf44892-fdd2-4b45-8772-20049c555d3b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8f8f5484d008289a9e34ceaffd3cf2582565e7265003b0a6a913fe424760fc65\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://496ac21c6d7e650f191b3bc29ab676bab6ba40727c5ac4d517833ef9a115ae07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://698aacf6e94caf2da7095c89716d63d935ff60d95cb91c9a39dfe9282cbba005\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8352058616fa4ed90ca907e547bad2201d9aa696330f1eb8434c3c3d54d71d89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8352058616fa4ed90ca907e547bad2201d9aa696330f1eb8434c3c3d54d71d89\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:09Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:08Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:16Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:16 crc kubenswrapper[4838]: I1128 09:58:16.095880 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-58mh7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f556bd7-3b15-4d7d-b8e2-4815bb5c9c7d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0a0fbfb7a81acf63b4deabef68d55dd843092bce1c00c27c127955417bede44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f0a0fbfb7a81acf63b4deabef68d55dd843092bce1c00c27c127955417bede44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dea8cf8adbb0c946731fdc71c2b0d80ace50b919cf9c776eb70cb6ff36529401\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dea8cf8adbb0c946731fdc71c2b0d80ace50b919cf9c776eb70cb6ff36529401\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f717c0691a541e3a8310cd520a96e92144902511c74835a5ef8ba9536cd65657\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f717c0691a541e3a8310cd520a96e92144902511c74835a5ef8ba9536cd65657\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b8b67d281ed50a87ac086c5ce0bcc082c30a41d80074b638340326c334fa4f0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b8b67d281ed50a87ac086c5ce0bcc082c30a41d80074b638340326c334fa4f0c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4ac7e0a6a8340f45399bb4b576bbcd90d4eee54c571e6d64846c521486a2607\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f4ac7e0a6a8340f45399bb4b576bbcd90d4eee54c571e6d64846c521486a2607\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e3e1a16aa714e8c2b82811d71b1845942712cad84d7dd465c02a3c4478419af\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0e3e1a16aa714e8c2b82811d71b1845942712cad84d7dd465c02a3c4478419af\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-58mh7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:16Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:16 crc kubenswrapper[4838]: I1128 09:58:16.106610 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-sft2b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"200cdb91-cc86-40be-a5b6-30f7b9beba6d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c4f94520c534ecc5a5a92d18b6047c6df131fa7d2a9b8712c021a74c3ffc18ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lpt6k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-sft2b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:16Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:16 crc kubenswrapper[4838]: I1128 09:58:16.117051 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-p69l6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2a223cc8-af33-4e83-8bfc-2676c5700447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h25xx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h25xx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:58:05Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-p69l6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:16Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:16 crc kubenswrapper[4838]: I1128 09:58:16.130144 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://05cb23094534a9fdebbe33d8d34a79412ee49437c25e185c6dfa99384cbf9629\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:16Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:16 crc kubenswrapper[4838]: I1128 09:58:16.142935 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3968fb11df6b2265691177838400dcb08e03d330c166dd880b3acfac7ec7938f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e91940e5398321649eac2960a545bb4bbe047113c81f152aa55894cabee55006\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:16Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:16 crc kubenswrapper[4838]: I1128 09:58:16.143476 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:16 crc kubenswrapper[4838]: I1128 09:58:16.143532 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:16 crc kubenswrapper[4838]: I1128 09:58:16.143545 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:16 crc kubenswrapper[4838]: I1128 09:58:16.143563 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:16 crc kubenswrapper[4838]: I1128 09:58:16.143575 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:16Z","lastTransitionTime":"2025-11-28T09:58:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:16 crc kubenswrapper[4838]: I1128 09:58:16.155810 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4rv9b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"051f7e1c-2d47-4be9-bbd5-14feec16eb16\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f641ae0677a50fbc7b20fac7bb5c567f031af1b6a06fbe9e965091efa4ee4ec3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-szqtp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4rv9b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:16Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:16 crc kubenswrapper[4838]: I1128 09:58:16.177428 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-tnclp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ebace5c6-6ca4-48ff-9c50-c6b769d599b5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://572204db3ac1da6b232430bf06fb87e94638e151ea0edc2f8b111deb7d82c58c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8pxnw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://efa96991cdc29f4f075bfd7751f620c7b091510ea68d577b243811ddd4140cec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8pxnw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:58:03Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-tnclp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:16Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:16 crc kubenswrapper[4838]: I1128 09:58:16.190394 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"de645de9-a5cd-4075-8bfd-402a619ea73f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://17e965aad7643d62c651c1e652be45bd914cfe3f14a0a6f43e4e4376b4cb7be0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://40b6222c83e4141c5d286efddc1b65ef732c5cadda9cebdd8d9ee114bf2eb533\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://40b6222c83e4141c5d286efddc1b65ef732c5cadda9cebdd8d9ee114bf2eb533\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:08Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:16Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:16 crc kubenswrapper[4838]: I1128 09:58:16.203319 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:16Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:16 crc kubenswrapper[4838]: I1128 09:58:16.214598 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33d18a643d8d8a58731e53993b40c94bb51ff9c35242f768090dc737cac26503\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:16Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:16 crc kubenswrapper[4838]: I1128 09:58:16.232032 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gmhsj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"41b01f7d-5c75-49de-86f7-87e04bf71194\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ccbb0df20c7e22308632a5a2d8837d77579134973f3888355be0fe46b4e59aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://87fcac052cf598fe1999137913f0b5ab8c999dad7d8fb512c3b079fd3b49bdaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b1f5c4da04a97ccc1851e901f9a13763e45ff8c7f6aab25bcbb934541ae4a776\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5781261b70d6722e8d28b91441c4241e1b48e6ff71ae3ec8973bde50180fc146\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3a5c0b96cef205139d500d85f27d2c9230ab23a84ffc78f2587a465a7ff25e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://669de13027827632f353811e082cce7cca326651a9bf4820e66504bb59067d72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://53360b6257759a7c7274680307597ae7ca0e40e9966c24d50262c25b203a5177\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1ae634151347de87041ca338bcea3a34ee0c2330a3c6630a3e342f62beba0ab8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9efead1ac3ae101f3a81e0c5568645b6ac107dc126ff57bc55745b86ffacb730\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9efead1ac3ae101f3a81e0c5568645b6ac107dc126ff57bc55745b86ffacb730\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-gmhsj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:16Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:16 crc kubenswrapper[4838]: I1128 09:58:16.245672 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5c3daa53-8c4e-4e30-aeba-146602dd45cd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d582c5c17a21e943db9e4af274b6cb31d7551e545039aca20a767b35e2ca5040\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9dmrk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5374d5a62ca21176826339023247946593cc1a7bdb4fb39596f12bf598790697\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9dmrk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-5dxdd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:16Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:16 crc kubenswrapper[4838]: I1128 09:58:16.250782 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:16 crc kubenswrapper[4838]: I1128 09:58:16.250854 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:16 crc kubenswrapper[4838]: I1128 09:58:16.250868 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:16 crc kubenswrapper[4838]: I1128 09:58:16.251040 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:16 crc kubenswrapper[4838]: I1128 09:58:16.251279 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:16Z","lastTransitionTime":"2025-11-28T09:58:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:16 crc kubenswrapper[4838]: I1128 09:58:16.258274 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42be35de-5c72-4cc2-a5e4-fb7872425cdf\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5a39765f9493a3a9454db77d07855624ff5645e9dbd898e6dcb880d7a01a8c42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://426741a23e7b1b9fae6769b581c0d44694f957b0da985476923801395fad082f\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T09:57:46Z\\\",\\\"message\\\":\\\"+ timeout 3m /bin/bash -exuo pipefail -c 'while [ -n \\\\\\\"$(ss -Htanop \\\\\\\\( sport = 10357 \\\\\\\\))\\\\\\\" ]; do sleep 1; done'\\\\n++ ss -Htanop '(' sport = 10357 ')'\\\\n+ '[' -n '' ']'\\\\n+ exec cluster-policy-controller start --config=/etc/kubernetes/static-pod-resources/configmaps/cluster-policy-controller-config/config.yaml --kubeconfig=/etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig --namespace=openshift-kube-controller-manager -v=2\\\\nI1128 09:57:12.554377 1 leaderelection.go:121] The leader election gives 4 retries and allows for 30s of clock skew. The kube-apiserver downtime tolerance is 78s. Worst non-graceful lease acquisition is 2m43s. Worst graceful lease acquisition is {26s}.\\\\nI1128 09:57:12.555793 1 observer_polling.go:159] Starting file observer\\\\nI1128 09:57:12.567187 1 builder.go:298] cluster-policy-controller version 4.18.0-202501230001.p0.g5fd8525.assembly.stream.el9-5fd8525-5fd852525909ce6eab52972ba9ce8fcf56528eb9\\\\nI1128 09:57:12.568976 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.crt::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.key\\\\\\\"\\\\nI1128 09:57:45.000857 1 cmd.go:138] Received SIGTERM or SIGINT signal, shutting down controller.\\\\nF1128 09:57:46.203931 1 cmd.go:179] failed checking apiserver connectivity: client rate limiter Wait returned an error: context deadline exceeded\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:10Z\\\"}},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef54c8995d6bb8e077c6d1c3d796b6f2ce88370b6cadc4f040f590760103320b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fab4ce68cf732b3e6b32f68e84805013d646a9cbd6d5c55ea3d2f41a7f9db83d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2da93de71b5896e3c9ba192df2896b946b1376fefe2a87cf21adb87ea665be04\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:08Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:16Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:16 crc kubenswrapper[4838]: I1128 09:58:16.271631 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1630b1c6-63b5-4481-a711-0485765d37e3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3a04db28528da269759635186b06952f9a3dc4c2b130458354a5bf9ef994db8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://065f0c39a0f1fbdb83a6a758ddd20a4af3ddf96297ce35551b570f5e6c9deb89\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T09:57:35Z\\\",\\\"message\\\":\\\"rpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"192.168.126.11:2379\\\\\\\", ServerName: \\\\\\\"192.168.126.11:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp 192.168.126.11:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:30.193159 13 logging.go:55] [core] [Channel #1 SubChannel #5]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"192.168.126.11:2379\\\\\\\", ServerName: \\\\\\\"192.168.126.11:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp 192.168.126.11:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:30.410371 13 logging.go:55] [core] [Channel #7 SubChannel #8]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"192.168.126.11:2379\\\\\\\", ServerName: \\\\\\\"192.168.126.11:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp 192.168.126.11:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:30.766067 13 logging.go:55] [core] [Channel #7 SubChannel #9]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"localhost:2379\\\\\\\", ServerName: \\\\\\\"localhost:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp [::1]:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:30.792318 13 logging.go:55] [core] [Channel #2 SubChannel #4]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"localhost:2379\\\\\\\", ServerName: \\\\\\\"localhost:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp [::1]:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:34.548830 13 logging.go:55] [core] [Channel #1 SubChannel #6]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"localhost:2379\\\\\\\", ServerName: \\\\\\\"localhost:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: authentication handshake failed: context canceled\\\\\\\"\\\\nE1128 09:57:34.559534 13 run.go:72] \\\\\\\"command failed\\\\\\\" err=\\\\\\\"context deadline exceeded\\\\\\\"\\\\nI1128 09:57:34.572762 1 main.go:235] Termination finished with exit code 1\\\\nI1128 09:57:34.572819 1 main.go:188] Deleting termination lock file \\\\\\\"/var/log/kube-apiserver/.terminating\\\\\\\"\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25d620ea6d7c38547e89d61e7a60f227d28b21f18d563055db47256b266d5b6b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://837cc7d33d601516b4ea56a283f71167b41da7c769070c97ea77f29e97cf1555\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a821e5b105f62cf7f3b12714bfb0f58867a808d1c777f2fb711895c345d8ee9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ad3223a9346861cf1b27af8c95207349f10af6f416380747e32c4faf1d3add4\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T09:57:48Z\\\",\\\"message\\\":\\\"le observer\\\\nW1128 09:57:46.202169 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1128 09:57:46.202478 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 09:57:46.203984 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2688063589/tls.crt::/tmp/serving-cert-2688063589/tls.key\\\\\\\"\\\\nI1128 09:57:46.517349 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 09:57:46.756626 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 09:57:46.756681 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 09:57:46.756806 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 09:57:46.756825 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 09:57:46.768610 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1128 09:57:46.768654 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1128 09:57:46.768660 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 09:57:46.768697 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 09:57:46.768707 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 09:57:46.768714 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 09:57:46.768746 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 09:57:46.768752 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1128 09:57:46.772242 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:40Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c690f38f717fbfbd681f21f5dc845b94601530b4fc0860fdbeb1317042c793b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:18Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b4d4af6fc5dd97d5b6104ef79b62fd241db5659dfdfd496a38536453b207a4e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b4d4af6fc5dd97d5b6104ef79b62fd241db5659dfdfd496a38536453b207a4e5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:08Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:16Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:16 crc kubenswrapper[4838]: I1128 09:58:16.354274 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:16 crc kubenswrapper[4838]: I1128 09:58:16.354316 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:16 crc kubenswrapper[4838]: I1128 09:58:16.354327 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:16 crc kubenswrapper[4838]: I1128 09:58:16.354344 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:16 crc kubenswrapper[4838]: I1128 09:58:16.354357 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:16Z","lastTransitionTime":"2025-11-28T09:58:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:16 crc kubenswrapper[4838]: I1128 09:58:16.457416 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:16 crc kubenswrapper[4838]: I1128 09:58:16.457461 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:16 crc kubenswrapper[4838]: I1128 09:58:16.457475 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:16 crc kubenswrapper[4838]: I1128 09:58:16.457502 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:16 crc kubenswrapper[4838]: I1128 09:58:16.457520 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:16Z","lastTransitionTime":"2025-11-28T09:58:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:16 crc kubenswrapper[4838]: I1128 09:58:16.560422 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:16 crc kubenswrapper[4838]: I1128 09:58:16.560462 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:16 crc kubenswrapper[4838]: I1128 09:58:16.560474 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:16 crc kubenswrapper[4838]: I1128 09:58:16.560492 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:16 crc kubenswrapper[4838]: I1128 09:58:16.560503 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:16Z","lastTransitionTime":"2025-11-28T09:58:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:16 crc kubenswrapper[4838]: I1128 09:58:16.562038 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-p69l6" Nov 28 09:58:16 crc kubenswrapper[4838]: E1128 09:58:16.562165 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-p69l6" podUID="2a223cc8-af33-4e83-8bfc-2676c5700447" Nov 28 09:58:16 crc kubenswrapper[4838]: I1128 09:58:16.663107 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:16 crc kubenswrapper[4838]: I1128 09:58:16.663143 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:16 crc kubenswrapper[4838]: I1128 09:58:16.663157 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:16 crc kubenswrapper[4838]: I1128 09:58:16.663175 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:16 crc kubenswrapper[4838]: I1128 09:58:16.663186 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:16Z","lastTransitionTime":"2025-11-28T09:58:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:16 crc kubenswrapper[4838]: I1128 09:58:16.766154 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:16 crc kubenswrapper[4838]: I1128 09:58:16.766202 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:16 crc kubenswrapper[4838]: I1128 09:58:16.766218 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:16 crc kubenswrapper[4838]: I1128 09:58:16.766242 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:16 crc kubenswrapper[4838]: I1128 09:58:16.766259 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:16Z","lastTransitionTime":"2025-11-28T09:58:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:16 crc kubenswrapper[4838]: I1128 09:58:16.868800 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:16 crc kubenswrapper[4838]: I1128 09:58:16.868885 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:16 crc kubenswrapper[4838]: I1128 09:58:16.868906 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:16 crc kubenswrapper[4838]: I1128 09:58:16.868930 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:16 crc kubenswrapper[4838]: I1128 09:58:16.868947 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:16Z","lastTransitionTime":"2025-11-28T09:58:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:16 crc kubenswrapper[4838]: I1128 09:58:16.971055 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:16 crc kubenswrapper[4838]: I1128 09:58:16.971101 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:16 crc kubenswrapper[4838]: I1128 09:58:16.971111 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:16 crc kubenswrapper[4838]: I1128 09:58:16.971126 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:16 crc kubenswrapper[4838]: I1128 09:58:16.971137 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:16Z","lastTransitionTime":"2025-11-28T09:58:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:17 crc kubenswrapper[4838]: I1128 09:58:17.073703 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:17 crc kubenswrapper[4838]: I1128 09:58:17.073757 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:17 crc kubenswrapper[4838]: I1128 09:58:17.073778 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:17 crc kubenswrapper[4838]: I1128 09:58:17.073793 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:17 crc kubenswrapper[4838]: I1128 09:58:17.073802 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:17Z","lastTransitionTime":"2025-11-28T09:58:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:17 crc kubenswrapper[4838]: I1128 09:58:17.176554 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:17 crc kubenswrapper[4838]: I1128 09:58:17.176606 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:17 crc kubenswrapper[4838]: I1128 09:58:17.176619 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:17 crc kubenswrapper[4838]: I1128 09:58:17.176638 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:17 crc kubenswrapper[4838]: I1128 09:58:17.176654 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:17Z","lastTransitionTime":"2025-11-28T09:58:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:17 crc kubenswrapper[4838]: I1128 09:58:17.279231 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:17 crc kubenswrapper[4838]: I1128 09:58:17.279309 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:17 crc kubenswrapper[4838]: I1128 09:58:17.279334 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:17 crc kubenswrapper[4838]: I1128 09:58:17.279365 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:17 crc kubenswrapper[4838]: I1128 09:58:17.279403 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:17Z","lastTransitionTime":"2025-11-28T09:58:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:17 crc kubenswrapper[4838]: I1128 09:58:17.382313 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:17 crc kubenswrapper[4838]: I1128 09:58:17.382353 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:17 crc kubenswrapper[4838]: I1128 09:58:17.382363 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:17 crc kubenswrapper[4838]: I1128 09:58:17.382376 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:17 crc kubenswrapper[4838]: I1128 09:58:17.382385 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:17Z","lastTransitionTime":"2025-11-28T09:58:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:17 crc kubenswrapper[4838]: I1128 09:58:17.485027 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:17 crc kubenswrapper[4838]: I1128 09:58:17.485071 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:17 crc kubenswrapper[4838]: I1128 09:58:17.485083 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:17 crc kubenswrapper[4838]: I1128 09:58:17.485099 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:17 crc kubenswrapper[4838]: I1128 09:58:17.485111 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:17Z","lastTransitionTime":"2025-11-28T09:58:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:17 crc kubenswrapper[4838]: I1128 09:58:17.508125 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:17 crc kubenswrapper[4838]: I1128 09:58:17.508164 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:17 crc kubenswrapper[4838]: I1128 09:58:17.508176 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:17 crc kubenswrapper[4838]: I1128 09:58:17.508196 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:17 crc kubenswrapper[4838]: I1128 09:58:17.508207 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:17Z","lastTransitionTime":"2025-11-28T09:58:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:17 crc kubenswrapper[4838]: E1128 09:58:17.531760 4838 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:58:17Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:17Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:58:17Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:17Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:58:17Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:17Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:58:17Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:17Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2d884793-8973-45d8-9335-b721f6accbac\\\",\\\"systemUUID\\\":\\\"e14391b4-beaf-4b9f-9de4-e3bbde3f3327\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:17Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:17 crc kubenswrapper[4838]: I1128 09:58:17.536928 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:17 crc kubenswrapper[4838]: I1128 09:58:17.536968 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:17 crc kubenswrapper[4838]: I1128 09:58:17.536979 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:17 crc kubenswrapper[4838]: I1128 09:58:17.536994 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:17 crc kubenswrapper[4838]: I1128 09:58:17.537004 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:17Z","lastTransitionTime":"2025-11-28T09:58:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:17 crc kubenswrapper[4838]: E1128 09:58:17.553245 4838 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:58:17Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:17Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:58:17Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:17Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:58:17Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:17Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:58:17Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:17Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2d884793-8973-45d8-9335-b721f6accbac\\\",\\\"systemUUID\\\":\\\"e14391b4-beaf-4b9f-9de4-e3bbde3f3327\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:17Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:17 crc kubenswrapper[4838]: I1128 09:58:17.557261 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:17 crc kubenswrapper[4838]: I1128 09:58:17.557300 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:17 crc kubenswrapper[4838]: I1128 09:58:17.557316 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:17 crc kubenswrapper[4838]: I1128 09:58:17.557336 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:17 crc kubenswrapper[4838]: I1128 09:58:17.557348 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:17Z","lastTransitionTime":"2025-11-28T09:58:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:17 crc kubenswrapper[4838]: I1128 09:58:17.560990 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 09:58:17 crc kubenswrapper[4838]: I1128 09:58:17.561014 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 09:58:17 crc kubenswrapper[4838]: I1128 09:58:17.561052 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 09:58:17 crc kubenswrapper[4838]: E1128 09:58:17.561085 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 09:58:17 crc kubenswrapper[4838]: E1128 09:58:17.561175 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 09:58:17 crc kubenswrapper[4838]: E1128 09:58:17.561280 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 09:58:17 crc kubenswrapper[4838]: E1128 09:58:17.575176 4838 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:58:17Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:17Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:58:17Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:17Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:58:17Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:17Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:58:17Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:17Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2d884793-8973-45d8-9335-b721f6accbac\\\",\\\"systemUUID\\\":\\\"e14391b4-beaf-4b9f-9de4-e3bbde3f3327\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:17Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:17 crc kubenswrapper[4838]: I1128 09:58:17.578784 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:17 crc kubenswrapper[4838]: I1128 09:58:17.578834 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:17 crc kubenswrapper[4838]: I1128 09:58:17.578851 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:17 crc kubenswrapper[4838]: I1128 09:58:17.578874 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:17 crc kubenswrapper[4838]: I1128 09:58:17.578892 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:17Z","lastTransitionTime":"2025-11-28T09:58:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:17 crc kubenswrapper[4838]: E1128 09:58:17.595506 4838 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:58:17Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:17Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:58:17Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:17Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:58:17Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:17Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:58:17Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:17Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2d884793-8973-45d8-9335-b721f6accbac\\\",\\\"systemUUID\\\":\\\"e14391b4-beaf-4b9f-9de4-e3bbde3f3327\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:17Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:17 crc kubenswrapper[4838]: I1128 09:58:17.599872 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:17 crc kubenswrapper[4838]: I1128 09:58:17.599895 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:17 crc kubenswrapper[4838]: I1128 09:58:17.599904 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:17 crc kubenswrapper[4838]: I1128 09:58:17.599919 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:17 crc kubenswrapper[4838]: I1128 09:58:17.599931 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:17Z","lastTransitionTime":"2025-11-28T09:58:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:17 crc kubenswrapper[4838]: E1128 09:58:17.611709 4838 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:58:17Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:17Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:58:17Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:17Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:58:17Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:17Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:58:17Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:17Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2d884793-8973-45d8-9335-b721f6accbac\\\",\\\"systemUUID\\\":\\\"e14391b4-beaf-4b9f-9de4-e3bbde3f3327\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:17Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:17 crc kubenswrapper[4838]: E1128 09:58:17.611855 4838 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 28 09:58:17 crc kubenswrapper[4838]: I1128 09:58:17.614348 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:17 crc kubenswrapper[4838]: I1128 09:58:17.614398 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:17 crc kubenswrapper[4838]: I1128 09:58:17.614410 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:17 crc kubenswrapper[4838]: I1128 09:58:17.614433 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:17 crc kubenswrapper[4838]: I1128 09:58:17.614447 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:17Z","lastTransitionTime":"2025-11-28T09:58:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:17 crc kubenswrapper[4838]: I1128 09:58:17.718392 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:17 crc kubenswrapper[4838]: I1128 09:58:17.718433 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:17 crc kubenswrapper[4838]: I1128 09:58:17.718445 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:17 crc kubenswrapper[4838]: I1128 09:58:17.718465 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:17 crc kubenswrapper[4838]: I1128 09:58:17.718478 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:17Z","lastTransitionTime":"2025-11-28T09:58:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:17 crc kubenswrapper[4838]: I1128 09:58:17.821661 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:17 crc kubenswrapper[4838]: I1128 09:58:17.821813 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:17 crc kubenswrapper[4838]: I1128 09:58:17.821837 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:17 crc kubenswrapper[4838]: I1128 09:58:17.821906 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:17 crc kubenswrapper[4838]: I1128 09:58:17.821924 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:17Z","lastTransitionTime":"2025-11-28T09:58:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:17 crc kubenswrapper[4838]: I1128 09:58:17.926092 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:17 crc kubenswrapper[4838]: I1128 09:58:17.926131 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:17 crc kubenswrapper[4838]: I1128 09:58:17.926139 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:17 crc kubenswrapper[4838]: I1128 09:58:17.926153 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:17 crc kubenswrapper[4838]: I1128 09:58:17.926163 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:17Z","lastTransitionTime":"2025-11-28T09:58:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:18 crc kubenswrapper[4838]: I1128 09:58:18.029604 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:18 crc kubenswrapper[4838]: I1128 09:58:18.029650 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:18 crc kubenswrapper[4838]: I1128 09:58:18.029660 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:18 crc kubenswrapper[4838]: I1128 09:58:18.029676 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:18 crc kubenswrapper[4838]: I1128 09:58:18.029686 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:18Z","lastTransitionTime":"2025-11-28T09:58:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:18 crc kubenswrapper[4838]: I1128 09:58:18.133207 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:18 crc kubenswrapper[4838]: I1128 09:58:18.133316 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:18 crc kubenswrapper[4838]: I1128 09:58:18.133336 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:18 crc kubenswrapper[4838]: I1128 09:58:18.133358 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:18 crc kubenswrapper[4838]: I1128 09:58:18.133374 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:18Z","lastTransitionTime":"2025-11-28T09:58:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:18 crc kubenswrapper[4838]: I1128 09:58:18.236019 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:18 crc kubenswrapper[4838]: I1128 09:58:18.236095 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:18 crc kubenswrapper[4838]: I1128 09:58:18.236105 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:18 crc kubenswrapper[4838]: I1128 09:58:18.236147 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:18 crc kubenswrapper[4838]: I1128 09:58:18.236166 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:18Z","lastTransitionTime":"2025-11-28T09:58:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:18 crc kubenswrapper[4838]: I1128 09:58:18.340128 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:18 crc kubenswrapper[4838]: I1128 09:58:18.340203 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:18 crc kubenswrapper[4838]: I1128 09:58:18.340222 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:18 crc kubenswrapper[4838]: I1128 09:58:18.340279 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:18 crc kubenswrapper[4838]: I1128 09:58:18.340297 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:18Z","lastTransitionTime":"2025-11-28T09:58:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:18 crc kubenswrapper[4838]: I1128 09:58:18.444408 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:18 crc kubenswrapper[4838]: I1128 09:58:18.444456 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:18 crc kubenswrapper[4838]: I1128 09:58:18.444466 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:18 crc kubenswrapper[4838]: I1128 09:58:18.444486 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:18 crc kubenswrapper[4838]: I1128 09:58:18.444496 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:18Z","lastTransitionTime":"2025-11-28T09:58:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:18 crc kubenswrapper[4838]: I1128 09:58:18.547985 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:18 crc kubenswrapper[4838]: I1128 09:58:18.548107 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:18 crc kubenswrapper[4838]: I1128 09:58:18.548122 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:18 crc kubenswrapper[4838]: I1128 09:58:18.548148 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:18 crc kubenswrapper[4838]: I1128 09:58:18.548164 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:18Z","lastTransitionTime":"2025-11-28T09:58:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:18 crc kubenswrapper[4838]: I1128 09:58:18.561909 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-p69l6" Nov 28 09:58:18 crc kubenswrapper[4838]: E1128 09:58:18.562087 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-p69l6" podUID="2a223cc8-af33-4e83-8bfc-2676c5700447" Nov 28 09:58:18 crc kubenswrapper[4838]: I1128 09:58:18.577304 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42be35de-5c72-4cc2-a5e4-fb7872425cdf\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5a39765f9493a3a9454db77d07855624ff5645e9dbd898e6dcb880d7a01a8c42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://426741a23e7b1b9fae6769b581c0d44694f957b0da985476923801395fad082f\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T09:57:46Z\\\",\\\"message\\\":\\\"+ timeout 3m /bin/bash -exuo pipefail -c 'while [ -n \\\\\\\"$(ss -Htanop \\\\\\\\( sport = 10357 \\\\\\\\))\\\\\\\" ]; do sleep 1; done'\\\\n++ ss -Htanop '(' sport = 10357 ')'\\\\n+ '[' -n '' ']'\\\\n+ exec cluster-policy-controller start --config=/etc/kubernetes/static-pod-resources/configmaps/cluster-policy-controller-config/config.yaml --kubeconfig=/etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig --namespace=openshift-kube-controller-manager -v=2\\\\nI1128 09:57:12.554377 1 leaderelection.go:121] The leader election gives 4 retries and allows for 30s of clock skew. The kube-apiserver downtime tolerance is 78s. Worst non-graceful lease acquisition is 2m43s. Worst graceful lease acquisition is {26s}.\\\\nI1128 09:57:12.555793 1 observer_polling.go:159] Starting file observer\\\\nI1128 09:57:12.567187 1 builder.go:298] cluster-policy-controller version 4.18.0-202501230001.p0.g5fd8525.assembly.stream.el9-5fd8525-5fd852525909ce6eab52972ba9ce8fcf56528eb9\\\\nI1128 09:57:12.568976 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.crt::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.key\\\\\\\"\\\\nI1128 09:57:45.000857 1 cmd.go:138] Received SIGTERM or SIGINT signal, shutting down controller.\\\\nF1128 09:57:46.203931 1 cmd.go:179] failed checking apiserver connectivity: client rate limiter Wait returned an error: context deadline exceeded\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:10Z\\\"}},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef54c8995d6bb8e077c6d1c3d796b6f2ce88370b6cadc4f040f590760103320b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fab4ce68cf732b3e6b32f68e84805013d646a9cbd6d5c55ea3d2f41a7f9db83d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2da93de71b5896e3c9ba192df2896b946b1376fefe2a87cf21adb87ea665be04\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:08Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:18Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:18 crc kubenswrapper[4838]: I1128 09:58:18.596414 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1630b1c6-63b5-4481-a711-0485765d37e3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3a04db28528da269759635186b06952f9a3dc4c2b130458354a5bf9ef994db8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://065f0c39a0f1fbdb83a6a758ddd20a4af3ddf96297ce35551b570f5e6c9deb89\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T09:57:35Z\\\",\\\"message\\\":\\\"rpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"192.168.126.11:2379\\\\\\\", ServerName: \\\\\\\"192.168.126.11:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp 192.168.126.11:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:30.193159 13 logging.go:55] [core] [Channel #1 SubChannel #5]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"192.168.126.11:2379\\\\\\\", ServerName: \\\\\\\"192.168.126.11:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp 192.168.126.11:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:30.410371 13 logging.go:55] [core] [Channel #7 SubChannel #8]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"192.168.126.11:2379\\\\\\\", ServerName: \\\\\\\"192.168.126.11:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp 192.168.126.11:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:30.766067 13 logging.go:55] [core] [Channel #7 SubChannel #9]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"localhost:2379\\\\\\\", ServerName: \\\\\\\"localhost:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp [::1]:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:30.792318 13 logging.go:55] [core] [Channel #2 SubChannel #4]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"localhost:2379\\\\\\\", ServerName: \\\\\\\"localhost:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp [::1]:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:34.548830 13 logging.go:55] [core] [Channel #1 SubChannel #6]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"localhost:2379\\\\\\\", ServerName: \\\\\\\"localhost:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: authentication handshake failed: context canceled\\\\\\\"\\\\nE1128 09:57:34.559534 13 run.go:72] \\\\\\\"command failed\\\\\\\" err=\\\\\\\"context deadline exceeded\\\\\\\"\\\\nI1128 09:57:34.572762 1 main.go:235] Termination finished with exit code 1\\\\nI1128 09:57:34.572819 1 main.go:188] Deleting termination lock file \\\\\\\"/var/log/kube-apiserver/.terminating\\\\\\\"\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25d620ea6d7c38547e89d61e7a60f227d28b21f18d563055db47256b266d5b6b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://837cc7d33d601516b4ea56a283f71167b41da7c769070c97ea77f29e97cf1555\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a821e5b105f62cf7f3b12714bfb0f58867a808d1c777f2fb711895c345d8ee9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ad3223a9346861cf1b27af8c95207349f10af6f416380747e32c4faf1d3add4\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T09:57:48Z\\\",\\\"message\\\":\\\"le observer\\\\nW1128 09:57:46.202169 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1128 09:57:46.202478 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 09:57:46.203984 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2688063589/tls.crt::/tmp/serving-cert-2688063589/tls.key\\\\\\\"\\\\nI1128 09:57:46.517349 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 09:57:46.756626 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 09:57:46.756681 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 09:57:46.756806 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 09:57:46.756825 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 09:57:46.768610 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1128 09:57:46.768654 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1128 09:57:46.768660 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 09:57:46.768697 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 09:57:46.768707 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 09:57:46.768714 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 09:57:46.768746 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 09:57:46.768752 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1128 09:57:46.772242 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:40Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c690f38f717fbfbd681f21f5dc845b94601530b4fc0860fdbeb1317042c793b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:18Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b4d4af6fc5dd97d5b6104ef79b62fd241db5659dfdfd496a38536453b207a4e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b4d4af6fc5dd97d5b6104ef79b62fd241db5659dfdfd496a38536453b207a4e5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:08Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:18Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:18 crc kubenswrapper[4838]: I1128 09:58:18.610611 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33d18a643d8d8a58731e53993b40c94bb51ff9c35242f768090dc737cac26503\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:18Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:18 crc kubenswrapper[4838]: I1128 09:58:18.631053 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gmhsj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"41b01f7d-5c75-49de-86f7-87e04bf71194\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ccbb0df20c7e22308632a5a2d8837d77579134973f3888355be0fe46b4e59aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://87fcac052cf598fe1999137913f0b5ab8c999dad7d8fb512c3b079fd3b49bdaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b1f5c4da04a97ccc1851e901f9a13763e45ff8c7f6aab25bcbb934541ae4a776\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5781261b70d6722e8d28b91441c4241e1b48e6ff71ae3ec8973bde50180fc146\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3a5c0b96cef205139d500d85f27d2c9230ab23a84ffc78f2587a465a7ff25e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://669de13027827632f353811e082cce7cca326651a9bf4820e66504bb59067d72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://53360b6257759a7c7274680307597ae7ca0e40e9966c24d50262c25b203a5177\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1ae634151347de87041ca338bcea3a34ee0c2330a3c6630a3e342f62beba0ab8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9efead1ac3ae101f3a81e0c5568645b6ac107dc126ff57bc55745b86ffacb730\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9efead1ac3ae101f3a81e0c5568645b6ac107dc126ff57bc55745b86ffacb730\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-gmhsj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:18Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:18 crc kubenswrapper[4838]: I1128 09:58:18.643112 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5c3daa53-8c4e-4e30-aeba-146602dd45cd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d582c5c17a21e943db9e4af274b6cb31d7551e545039aca20a767b35e2ca5040\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9dmrk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5374d5a62ca21176826339023247946593cc1a7bdb4fb39596f12bf598790697\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9dmrk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-5dxdd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:18Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:18 crc kubenswrapper[4838]: I1128 09:58:18.650672 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:18 crc kubenswrapper[4838]: I1128 09:58:18.650741 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:18 crc kubenswrapper[4838]: I1128 09:58:18.650759 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:18 crc kubenswrapper[4838]: I1128 09:58:18.650787 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:18 crc kubenswrapper[4838]: I1128 09:58:18.650801 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:18Z","lastTransitionTime":"2025-11-28T09:58:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:18 crc kubenswrapper[4838]: I1128 09:58:18.657137 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"faf44892-fdd2-4b45-8772-20049c555d3b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8f8f5484d008289a9e34ceaffd3cf2582565e7265003b0a6a913fe424760fc65\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://496ac21c6d7e650f191b3bc29ab676bab6ba40727c5ac4d517833ef9a115ae07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://698aacf6e94caf2da7095c89716d63d935ff60d95cb91c9a39dfe9282cbba005\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8352058616fa4ed90ca907e547bad2201d9aa696330f1eb8434c3c3d54d71d89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8352058616fa4ed90ca907e547bad2201d9aa696330f1eb8434c3c3d54d71d89\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:09Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:08Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:18Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:18 crc kubenswrapper[4838]: I1128 09:58:18.671871 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:18Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:18 crc kubenswrapper[4838]: I1128 09:58:18.690230 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:18Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:18 crc kubenswrapper[4838]: I1128 09:58:18.701743 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-tj8hl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cbb3c60a-bf9f-4a62-9310-30898e42be4f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e6fe3f1cbc53f02e2556c5fb44cffcf13330c3240a0ff10a8471080466416bd1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c2jkq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:54Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-tj8hl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:18Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:18 crc kubenswrapper[4838]: I1128 09:58:18.716591 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://05cb23094534a9fdebbe33d8d34a79412ee49437c25e185c6dfa99384cbf9629\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:18Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:18 crc kubenswrapper[4838]: I1128 09:58:18.730129 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3968fb11df6b2265691177838400dcb08e03d330c166dd880b3acfac7ec7938f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e91940e5398321649eac2960a545bb4bbe047113c81f152aa55894cabee55006\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:18Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:18 crc kubenswrapper[4838]: I1128 09:58:18.744599 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-58mh7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f556bd7-3b15-4d7d-b8e2-4815bb5c9c7d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0a0fbfb7a81acf63b4deabef68d55dd843092bce1c00c27c127955417bede44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f0a0fbfb7a81acf63b4deabef68d55dd843092bce1c00c27c127955417bede44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dea8cf8adbb0c946731fdc71c2b0d80ace50b919cf9c776eb70cb6ff36529401\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dea8cf8adbb0c946731fdc71c2b0d80ace50b919cf9c776eb70cb6ff36529401\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f717c0691a541e3a8310cd520a96e92144902511c74835a5ef8ba9536cd65657\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f717c0691a541e3a8310cd520a96e92144902511c74835a5ef8ba9536cd65657\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b8b67d281ed50a87ac086c5ce0bcc082c30a41d80074b638340326c334fa4f0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b8b67d281ed50a87ac086c5ce0bcc082c30a41d80074b638340326c334fa4f0c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4ac7e0a6a8340f45399bb4b576bbcd90d4eee54c571e6d64846c521486a2607\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f4ac7e0a6a8340f45399bb4b576bbcd90d4eee54c571e6d64846c521486a2607\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e3e1a16aa714e8c2b82811d71b1845942712cad84d7dd465c02a3c4478419af\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0e3e1a16aa714e8c2b82811d71b1845942712cad84d7dd465c02a3c4478419af\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-58mh7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:18Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:18 crc kubenswrapper[4838]: I1128 09:58:18.753519 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:18 crc kubenswrapper[4838]: I1128 09:58:18.753569 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:18 crc kubenswrapper[4838]: I1128 09:58:18.753580 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:18 crc kubenswrapper[4838]: I1128 09:58:18.753598 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:18 crc kubenswrapper[4838]: I1128 09:58:18.753610 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:18Z","lastTransitionTime":"2025-11-28T09:58:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:18 crc kubenswrapper[4838]: I1128 09:58:18.756252 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-sft2b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"200cdb91-cc86-40be-a5b6-30f7b9beba6d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c4f94520c534ecc5a5a92d18b6047c6df131fa7d2a9b8712c021a74c3ffc18ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lpt6k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-sft2b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:18Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:18 crc kubenswrapper[4838]: I1128 09:58:18.767776 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-p69l6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2a223cc8-af33-4e83-8bfc-2676c5700447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h25xx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h25xx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:58:05Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-p69l6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:18Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:18 crc kubenswrapper[4838]: I1128 09:58:18.778569 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"de645de9-a5cd-4075-8bfd-402a619ea73f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://17e965aad7643d62c651c1e652be45bd914cfe3f14a0a6f43e4e4376b4cb7be0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://40b6222c83e4141c5d286efddc1b65ef732c5cadda9cebdd8d9ee114bf2eb533\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://40b6222c83e4141c5d286efddc1b65ef732c5cadda9cebdd8d9ee114bf2eb533\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:08Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:18Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:18 crc kubenswrapper[4838]: I1128 09:58:18.789245 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:18Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:18 crc kubenswrapper[4838]: I1128 09:58:18.803277 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4rv9b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"051f7e1c-2d47-4be9-bbd5-14feec16eb16\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f641ae0677a50fbc7b20fac7bb5c567f031af1b6a06fbe9e965091efa4ee4ec3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-szqtp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4rv9b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:18Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:18 crc kubenswrapper[4838]: I1128 09:58:18.818629 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-tnclp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ebace5c6-6ca4-48ff-9c50-c6b769d599b5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://572204db3ac1da6b232430bf06fb87e94638e151ea0edc2f8b111deb7d82c58c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8pxnw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://efa96991cdc29f4f075bfd7751f620c7b091510ea68d577b243811ddd4140cec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8pxnw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:58:03Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-tnclp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:18Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:18 crc kubenswrapper[4838]: I1128 09:58:18.856859 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:18 crc kubenswrapper[4838]: I1128 09:58:18.856936 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:18 crc kubenswrapper[4838]: I1128 09:58:18.856951 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:18 crc kubenswrapper[4838]: I1128 09:58:18.856969 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:18 crc kubenswrapper[4838]: I1128 09:58:18.856982 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:18Z","lastTransitionTime":"2025-11-28T09:58:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:18 crc kubenswrapper[4838]: I1128 09:58:18.885309 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 09:58:18 crc kubenswrapper[4838]: I1128 09:58:18.899153 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"faf44892-fdd2-4b45-8772-20049c555d3b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8f8f5484d008289a9e34ceaffd3cf2582565e7265003b0a6a913fe424760fc65\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://496ac21c6d7e650f191b3bc29ab676bab6ba40727c5ac4d517833ef9a115ae07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://698aacf6e94caf2da7095c89716d63d935ff60d95cb91c9a39dfe9282cbba005\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8352058616fa4ed90ca907e547bad2201d9aa696330f1eb8434c3c3d54d71d89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8352058616fa4ed90ca907e547bad2201d9aa696330f1eb8434c3c3d54d71d89\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:09Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:08Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:18Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:18 crc kubenswrapper[4838]: I1128 09:58:18.915759 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:18Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:18 crc kubenswrapper[4838]: I1128 09:58:18.931093 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:18Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:18 crc kubenswrapper[4838]: I1128 09:58:18.943287 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-tj8hl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cbb3c60a-bf9f-4a62-9310-30898e42be4f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e6fe3f1cbc53f02e2556c5fb44cffcf13330c3240a0ff10a8471080466416bd1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c2jkq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:54Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-tj8hl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:18Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:18 crc kubenswrapper[4838]: I1128 09:58:18.960066 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:18 crc kubenswrapper[4838]: I1128 09:58:18.960124 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:18 crc kubenswrapper[4838]: I1128 09:58:18.960180 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:18 crc kubenswrapper[4838]: I1128 09:58:18.960205 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:18 crc kubenswrapper[4838]: I1128 09:58:18.960222 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:18Z","lastTransitionTime":"2025-11-28T09:58:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:18 crc kubenswrapper[4838]: I1128 09:58:18.960249 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://05cb23094534a9fdebbe33d8d34a79412ee49437c25e185c6dfa99384cbf9629\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:18Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:18 crc kubenswrapper[4838]: I1128 09:58:18.978837 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3968fb11df6b2265691177838400dcb08e03d330c166dd880b3acfac7ec7938f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e91940e5398321649eac2960a545bb4bbe047113c81f152aa55894cabee55006\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:18Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:18 crc kubenswrapper[4838]: I1128 09:58:18.998969 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-58mh7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f556bd7-3b15-4d7d-b8e2-4815bb5c9c7d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0a0fbfb7a81acf63b4deabef68d55dd843092bce1c00c27c127955417bede44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f0a0fbfb7a81acf63b4deabef68d55dd843092bce1c00c27c127955417bede44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dea8cf8adbb0c946731fdc71c2b0d80ace50b919cf9c776eb70cb6ff36529401\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dea8cf8adbb0c946731fdc71c2b0d80ace50b919cf9c776eb70cb6ff36529401\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f717c0691a541e3a8310cd520a96e92144902511c74835a5ef8ba9536cd65657\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f717c0691a541e3a8310cd520a96e92144902511c74835a5ef8ba9536cd65657\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b8b67d281ed50a87ac086c5ce0bcc082c30a41d80074b638340326c334fa4f0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b8b67d281ed50a87ac086c5ce0bcc082c30a41d80074b638340326c334fa4f0c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4ac7e0a6a8340f45399bb4b576bbcd90d4eee54c571e6d64846c521486a2607\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f4ac7e0a6a8340f45399bb4b576bbcd90d4eee54c571e6d64846c521486a2607\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e3e1a16aa714e8c2b82811d71b1845942712cad84d7dd465c02a3c4478419af\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0e3e1a16aa714e8c2b82811d71b1845942712cad84d7dd465c02a3c4478419af\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-58mh7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:18Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:19 crc kubenswrapper[4838]: I1128 09:58:19.011996 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-sft2b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"200cdb91-cc86-40be-a5b6-30f7b9beba6d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c4f94520c534ecc5a5a92d18b6047c6df131fa7d2a9b8712c021a74c3ffc18ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lpt6k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-sft2b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:19Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:19 crc kubenswrapper[4838]: I1128 09:58:19.024326 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-p69l6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2a223cc8-af33-4e83-8bfc-2676c5700447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h25xx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h25xx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:58:05Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-p69l6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:19Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:19 crc kubenswrapper[4838]: I1128 09:58:19.035210 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"de645de9-a5cd-4075-8bfd-402a619ea73f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://17e965aad7643d62c651c1e652be45bd914cfe3f14a0a6f43e4e4376b4cb7be0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://40b6222c83e4141c5d286efddc1b65ef732c5cadda9cebdd8d9ee114bf2eb533\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://40b6222c83e4141c5d286efddc1b65ef732c5cadda9cebdd8d9ee114bf2eb533\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:08Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:19Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:19 crc kubenswrapper[4838]: I1128 09:58:19.043011 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-58mh7" event={"ID":"3f556bd7-3b15-4d7d-b8e2-4815bb5c9c7d","Type":"ContainerStarted","Data":"658172db64e44d752eb06fe2788dda717db3fc2e672b073a2bdf159a16fd901f"} Nov 28 09:58:19 crc kubenswrapper[4838]: I1128 09:58:19.045147 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-gmhsj_41b01f7d-5c75-49de-86f7-87e04bf71194/ovnkube-controller/0.log" Nov 28 09:58:19 crc kubenswrapper[4838]: I1128 09:58:19.048156 4838 generic.go:334] "Generic (PLEG): container finished" podID="41b01f7d-5c75-49de-86f7-87e04bf71194" containerID="53360b6257759a7c7274680307597ae7ca0e40e9966c24d50262c25b203a5177" exitCode=1 Nov 28 09:58:19 crc kubenswrapper[4838]: I1128 09:58:19.048188 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gmhsj" event={"ID":"41b01f7d-5c75-49de-86f7-87e04bf71194","Type":"ContainerDied","Data":"53360b6257759a7c7274680307597ae7ca0e40e9966c24d50262c25b203a5177"} Nov 28 09:58:19 crc kubenswrapper[4838]: I1128 09:58:19.048609 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:19Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:19 crc kubenswrapper[4838]: I1128 09:58:19.048814 4838 scope.go:117] "RemoveContainer" containerID="53360b6257759a7c7274680307597ae7ca0e40e9966c24d50262c25b203a5177" Nov 28 09:58:19 crc kubenswrapper[4838]: I1128 09:58:19.063004 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:19 crc kubenswrapper[4838]: I1128 09:58:19.063046 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:19 crc kubenswrapper[4838]: I1128 09:58:19.063056 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:19 crc kubenswrapper[4838]: I1128 09:58:19.063073 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:19 crc kubenswrapper[4838]: I1128 09:58:19.063084 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:19Z","lastTransitionTime":"2025-11-28T09:58:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:19 crc kubenswrapper[4838]: I1128 09:58:19.064918 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4rv9b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"051f7e1c-2d47-4be9-bbd5-14feec16eb16\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f641ae0677a50fbc7b20fac7bb5c567f031af1b6a06fbe9e965091efa4ee4ec3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-szqtp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4rv9b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:19Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:19 crc kubenswrapper[4838]: I1128 09:58:19.082141 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-tnclp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ebace5c6-6ca4-48ff-9c50-c6b769d599b5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://572204db3ac1da6b232430bf06fb87e94638e151ea0edc2f8b111deb7d82c58c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8pxnw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://efa96991cdc29f4f075bfd7751f620c7b091510ea68d577b243811ddd4140cec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8pxnw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:58:03Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-tnclp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:19Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:19 crc kubenswrapper[4838]: I1128 09:58:19.095866 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42be35de-5c72-4cc2-a5e4-fb7872425cdf\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5a39765f9493a3a9454db77d07855624ff5645e9dbd898e6dcb880d7a01a8c42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://426741a23e7b1b9fae6769b581c0d44694f957b0da985476923801395fad082f\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T09:57:46Z\\\",\\\"message\\\":\\\"+ timeout 3m /bin/bash -exuo pipefail -c 'while [ -n \\\\\\\"$(ss -Htanop \\\\\\\\( sport = 10357 \\\\\\\\))\\\\\\\" ]; do sleep 1; done'\\\\n++ ss -Htanop '(' sport = 10357 ')'\\\\n+ '[' -n '' ']'\\\\n+ exec cluster-policy-controller start --config=/etc/kubernetes/static-pod-resources/configmaps/cluster-policy-controller-config/config.yaml --kubeconfig=/etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig --namespace=openshift-kube-controller-manager -v=2\\\\nI1128 09:57:12.554377 1 leaderelection.go:121] The leader election gives 4 retries and allows for 30s of clock skew. The kube-apiserver downtime tolerance is 78s. Worst non-graceful lease acquisition is 2m43s. Worst graceful lease acquisition is {26s}.\\\\nI1128 09:57:12.555793 1 observer_polling.go:159] Starting file observer\\\\nI1128 09:57:12.567187 1 builder.go:298] cluster-policy-controller version 4.18.0-202501230001.p0.g5fd8525.assembly.stream.el9-5fd8525-5fd852525909ce6eab52972ba9ce8fcf56528eb9\\\\nI1128 09:57:12.568976 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.crt::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.key\\\\\\\"\\\\nI1128 09:57:45.000857 1 cmd.go:138] Received SIGTERM or SIGINT signal, shutting down controller.\\\\nF1128 09:57:46.203931 1 cmd.go:179] failed checking apiserver connectivity: client rate limiter Wait returned an error: context deadline exceeded\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:10Z\\\"}},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef54c8995d6bb8e077c6d1c3d796b6f2ce88370b6cadc4f040f590760103320b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fab4ce68cf732b3e6b32f68e84805013d646a9cbd6d5c55ea3d2f41a7f9db83d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2da93de71b5896e3c9ba192df2896b946b1376fefe2a87cf21adb87ea665be04\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:08Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:19Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:19 crc kubenswrapper[4838]: I1128 09:58:19.116015 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1630b1c6-63b5-4481-a711-0485765d37e3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3a04db28528da269759635186b06952f9a3dc4c2b130458354a5bf9ef994db8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://065f0c39a0f1fbdb83a6a758ddd20a4af3ddf96297ce35551b570f5e6c9deb89\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T09:57:35Z\\\",\\\"message\\\":\\\"rpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"192.168.126.11:2379\\\\\\\", ServerName: \\\\\\\"192.168.126.11:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp 192.168.126.11:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:30.193159 13 logging.go:55] [core] [Channel #1 SubChannel #5]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"192.168.126.11:2379\\\\\\\", ServerName: \\\\\\\"192.168.126.11:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp 192.168.126.11:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:30.410371 13 logging.go:55] [core] [Channel #7 SubChannel #8]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"192.168.126.11:2379\\\\\\\", ServerName: \\\\\\\"192.168.126.11:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp 192.168.126.11:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:30.766067 13 logging.go:55] [core] [Channel #7 SubChannel #9]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"localhost:2379\\\\\\\", ServerName: \\\\\\\"localhost:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp [::1]:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:30.792318 13 logging.go:55] [core] [Channel #2 SubChannel #4]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"localhost:2379\\\\\\\", ServerName: \\\\\\\"localhost:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp [::1]:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:34.548830 13 logging.go:55] [core] [Channel #1 SubChannel #6]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"localhost:2379\\\\\\\", ServerName: \\\\\\\"localhost:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: authentication handshake failed: context canceled\\\\\\\"\\\\nE1128 09:57:34.559534 13 run.go:72] \\\\\\\"command failed\\\\\\\" err=\\\\\\\"context deadline exceeded\\\\\\\"\\\\nI1128 09:57:34.572762 1 main.go:235] Termination finished with exit code 1\\\\nI1128 09:57:34.572819 1 main.go:188] Deleting termination lock file \\\\\\\"/var/log/kube-apiserver/.terminating\\\\\\\"\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25d620ea6d7c38547e89d61e7a60f227d28b21f18d563055db47256b266d5b6b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://837cc7d33d601516b4ea56a283f71167b41da7c769070c97ea77f29e97cf1555\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a821e5b105f62cf7f3b12714bfb0f58867a808d1c777f2fb711895c345d8ee9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ad3223a9346861cf1b27af8c95207349f10af6f416380747e32c4faf1d3add4\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T09:57:48Z\\\",\\\"message\\\":\\\"le observer\\\\nW1128 09:57:46.202169 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1128 09:57:46.202478 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 09:57:46.203984 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2688063589/tls.crt::/tmp/serving-cert-2688063589/tls.key\\\\\\\"\\\\nI1128 09:57:46.517349 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 09:57:46.756626 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 09:57:46.756681 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 09:57:46.756806 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 09:57:46.756825 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 09:57:46.768610 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1128 09:57:46.768654 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1128 09:57:46.768660 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 09:57:46.768697 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 09:57:46.768707 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 09:57:46.768714 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 09:57:46.768746 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 09:57:46.768752 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1128 09:57:46.772242 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:40Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c690f38f717fbfbd681f21f5dc845b94601530b4fc0860fdbeb1317042c793b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:18Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b4d4af6fc5dd97d5b6104ef79b62fd241db5659dfdfd496a38536453b207a4e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b4d4af6fc5dd97d5b6104ef79b62fd241db5659dfdfd496a38536453b207a4e5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:08Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:19Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:19 crc kubenswrapper[4838]: I1128 09:58:19.130364 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33d18a643d8d8a58731e53993b40c94bb51ff9c35242f768090dc737cac26503\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:19Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:19 crc kubenswrapper[4838]: I1128 09:58:19.150945 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gmhsj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"41b01f7d-5c75-49de-86f7-87e04bf71194\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ccbb0df20c7e22308632a5a2d8837d77579134973f3888355be0fe46b4e59aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://87fcac052cf598fe1999137913f0b5ab8c999dad7d8fb512c3b079fd3b49bdaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b1f5c4da04a97ccc1851e901f9a13763e45ff8c7f6aab25bcbb934541ae4a776\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5781261b70d6722e8d28b91441c4241e1b48e6ff71ae3ec8973bde50180fc146\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3a5c0b96cef205139d500d85f27d2c9230ab23a84ffc78f2587a465a7ff25e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://669de13027827632f353811e082cce7cca326651a9bf4820e66504bb59067d72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://53360b6257759a7c7274680307597ae7ca0e40e9966c24d50262c25b203a5177\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1ae634151347de87041ca338bcea3a34ee0c2330a3c6630a3e342f62beba0ab8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9efead1ac3ae101f3a81e0c5568645b6ac107dc126ff57bc55745b86ffacb730\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9efead1ac3ae101f3a81e0c5568645b6ac107dc126ff57bc55745b86ffacb730\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-gmhsj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:19Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:19 crc kubenswrapper[4838]: I1128 09:58:19.162117 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5c3daa53-8c4e-4e30-aeba-146602dd45cd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d582c5c17a21e943db9e4af274b6cb31d7551e545039aca20a767b35e2ca5040\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9dmrk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5374d5a62ca21176826339023247946593cc1a7bdb4fb39596f12bf598790697\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9dmrk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-5dxdd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:19Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:19 crc kubenswrapper[4838]: I1128 09:58:19.166889 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:19 crc kubenswrapper[4838]: I1128 09:58:19.166953 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:19 crc kubenswrapper[4838]: I1128 09:58:19.166968 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:19 crc kubenswrapper[4838]: I1128 09:58:19.166994 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:19 crc kubenswrapper[4838]: I1128 09:58:19.167007 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:19Z","lastTransitionTime":"2025-11-28T09:58:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:19 crc kubenswrapper[4838]: I1128 09:58:19.171421 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"de645de9-a5cd-4075-8bfd-402a619ea73f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://17e965aad7643d62c651c1e652be45bd914cfe3f14a0a6f43e4e4376b4cb7be0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://40b6222c83e4141c5d286efddc1b65ef732c5cadda9cebdd8d9ee114bf2eb533\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://40b6222c83e4141c5d286efddc1b65ef732c5cadda9cebdd8d9ee114bf2eb533\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:08Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:19Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:19 crc kubenswrapper[4838]: I1128 09:58:19.183292 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:19Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:19 crc kubenswrapper[4838]: I1128 09:58:19.227297 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4rv9b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"051f7e1c-2d47-4be9-bbd5-14feec16eb16\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f641ae0677a50fbc7b20fac7bb5c567f031af1b6a06fbe9e965091efa4ee4ec3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-szqtp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4rv9b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:19Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:19 crc kubenswrapper[4838]: I1128 09:58:19.237559 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-tnclp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ebace5c6-6ca4-48ff-9c50-c6b769d599b5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://572204db3ac1da6b232430bf06fb87e94638e151ea0edc2f8b111deb7d82c58c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8pxnw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://efa96991cdc29f4f075bfd7751f620c7b091510ea68d577b243811ddd4140cec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8pxnw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:58:03Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-tnclp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:19Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:19 crc kubenswrapper[4838]: I1128 09:58:19.250660 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5c3daa53-8c4e-4e30-aeba-146602dd45cd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d582c5c17a21e943db9e4af274b6cb31d7551e545039aca20a767b35e2ca5040\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9dmrk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5374d5a62ca21176826339023247946593cc1a7bdb4fb39596f12bf598790697\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9dmrk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-5dxdd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:19Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:19 crc kubenswrapper[4838]: I1128 09:58:19.263058 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42be35de-5c72-4cc2-a5e4-fb7872425cdf\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5a39765f9493a3a9454db77d07855624ff5645e9dbd898e6dcb880d7a01a8c42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://426741a23e7b1b9fae6769b581c0d44694f957b0da985476923801395fad082f\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T09:57:46Z\\\",\\\"message\\\":\\\"+ timeout 3m /bin/bash -exuo pipefail -c 'while [ -n \\\\\\\"$(ss -Htanop \\\\\\\\( sport = 10357 \\\\\\\\))\\\\\\\" ]; do sleep 1; done'\\\\n++ ss -Htanop '(' sport = 10357 ')'\\\\n+ '[' -n '' ']'\\\\n+ exec cluster-policy-controller start --config=/etc/kubernetes/static-pod-resources/configmaps/cluster-policy-controller-config/config.yaml --kubeconfig=/etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig --namespace=openshift-kube-controller-manager -v=2\\\\nI1128 09:57:12.554377 1 leaderelection.go:121] The leader election gives 4 retries and allows for 30s of clock skew. The kube-apiserver downtime tolerance is 78s. Worst non-graceful lease acquisition is 2m43s. Worst graceful lease acquisition is {26s}.\\\\nI1128 09:57:12.555793 1 observer_polling.go:159] Starting file observer\\\\nI1128 09:57:12.567187 1 builder.go:298] cluster-policy-controller version 4.18.0-202501230001.p0.g5fd8525.assembly.stream.el9-5fd8525-5fd852525909ce6eab52972ba9ce8fcf56528eb9\\\\nI1128 09:57:12.568976 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.crt::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.key\\\\\\\"\\\\nI1128 09:57:45.000857 1 cmd.go:138] Received SIGTERM or SIGINT signal, shutting down controller.\\\\nF1128 09:57:46.203931 1 cmd.go:179] failed checking apiserver connectivity: client rate limiter Wait returned an error: context deadline exceeded\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:10Z\\\"}},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef54c8995d6bb8e077c6d1c3d796b6f2ce88370b6cadc4f040f590760103320b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fab4ce68cf732b3e6b32f68e84805013d646a9cbd6d5c55ea3d2f41a7f9db83d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2da93de71b5896e3c9ba192df2896b946b1376fefe2a87cf21adb87ea665be04\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:08Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:19Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:19 crc kubenswrapper[4838]: I1128 09:58:19.269870 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:19 crc kubenswrapper[4838]: I1128 09:58:19.269910 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:19 crc kubenswrapper[4838]: I1128 09:58:19.269919 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:19 crc kubenswrapper[4838]: I1128 09:58:19.269939 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:19 crc kubenswrapper[4838]: I1128 09:58:19.269953 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:19Z","lastTransitionTime":"2025-11-28T09:58:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:19 crc kubenswrapper[4838]: I1128 09:58:19.278346 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1630b1c6-63b5-4481-a711-0485765d37e3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3a04db28528da269759635186b06952f9a3dc4c2b130458354a5bf9ef994db8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://065f0c39a0f1fbdb83a6a758ddd20a4af3ddf96297ce35551b570f5e6c9deb89\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T09:57:35Z\\\",\\\"message\\\":\\\"rpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"192.168.126.11:2379\\\\\\\", ServerName: \\\\\\\"192.168.126.11:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp 192.168.126.11:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:30.193159 13 logging.go:55] [core] [Channel #1 SubChannel #5]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"192.168.126.11:2379\\\\\\\", ServerName: \\\\\\\"192.168.126.11:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp 192.168.126.11:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:30.410371 13 logging.go:55] [core] [Channel #7 SubChannel #8]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"192.168.126.11:2379\\\\\\\", ServerName: \\\\\\\"192.168.126.11:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp 192.168.126.11:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:30.766067 13 logging.go:55] [core] [Channel #7 SubChannel #9]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"localhost:2379\\\\\\\", ServerName: \\\\\\\"localhost:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp [::1]:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:30.792318 13 logging.go:55] [core] [Channel #2 SubChannel #4]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"localhost:2379\\\\\\\", ServerName: \\\\\\\"localhost:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp [::1]:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:34.548830 13 logging.go:55] [core] [Channel #1 SubChannel #6]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"localhost:2379\\\\\\\", ServerName: \\\\\\\"localhost:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: authentication handshake failed: context canceled\\\\\\\"\\\\nE1128 09:57:34.559534 13 run.go:72] \\\\\\\"command failed\\\\\\\" err=\\\\\\\"context deadline exceeded\\\\\\\"\\\\nI1128 09:57:34.572762 1 main.go:235] Termination finished with exit code 1\\\\nI1128 09:57:34.572819 1 main.go:188] Deleting termination lock file \\\\\\\"/var/log/kube-apiserver/.terminating\\\\\\\"\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25d620ea6d7c38547e89d61e7a60f227d28b21f18d563055db47256b266d5b6b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://837cc7d33d601516b4ea56a283f71167b41da7c769070c97ea77f29e97cf1555\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a821e5b105f62cf7f3b12714bfb0f58867a808d1c777f2fb711895c345d8ee9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ad3223a9346861cf1b27af8c95207349f10af6f416380747e32c4faf1d3add4\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T09:57:48Z\\\",\\\"message\\\":\\\"le observer\\\\nW1128 09:57:46.202169 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1128 09:57:46.202478 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 09:57:46.203984 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2688063589/tls.crt::/tmp/serving-cert-2688063589/tls.key\\\\\\\"\\\\nI1128 09:57:46.517349 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 09:57:46.756626 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 09:57:46.756681 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 09:57:46.756806 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 09:57:46.756825 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 09:57:46.768610 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1128 09:57:46.768654 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1128 09:57:46.768660 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 09:57:46.768697 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 09:57:46.768707 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 09:57:46.768714 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 09:57:46.768746 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 09:57:46.768752 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1128 09:57:46.772242 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:40Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c690f38f717fbfbd681f21f5dc845b94601530b4fc0860fdbeb1317042c793b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:18Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b4d4af6fc5dd97d5b6104ef79b62fd241db5659dfdfd496a38536453b207a4e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b4d4af6fc5dd97d5b6104ef79b62fd241db5659dfdfd496a38536453b207a4e5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:08Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:19Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:19 crc kubenswrapper[4838]: I1128 09:58:19.290109 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33d18a643d8d8a58731e53993b40c94bb51ff9c35242f768090dc737cac26503\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:19Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:19 crc kubenswrapper[4838]: I1128 09:58:19.314024 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gmhsj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"41b01f7d-5c75-49de-86f7-87e04bf71194\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ccbb0df20c7e22308632a5a2d8837d77579134973f3888355be0fe46b4e59aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://87fcac052cf598fe1999137913f0b5ab8c999dad7d8fb512c3b079fd3b49bdaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b1f5c4da04a97ccc1851e901f9a13763e45ff8c7f6aab25bcbb934541ae4a776\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5781261b70d6722e8d28b91441c4241e1b48e6ff71ae3ec8973bde50180fc146\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3a5c0b96cef205139d500d85f27d2c9230ab23a84ffc78f2587a465a7ff25e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://669de13027827632f353811e082cce7cca326651a9bf4820e66504bb59067d72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://53360b6257759a7c7274680307597ae7ca0e40e9966c24d50262c25b203a5177\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://53360b6257759a7c7274680307597ae7ca0e40e9966c24d50262c25b203a5177\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T09:58:18Z\\\",\\\"message\\\":\\\"s/go-controller/pkg/crd/userdefinednetwork/v1/apis/informers/externalversions/factory.go:140\\\\nI1128 09:58:16.670401 6284 reflector.go:311] Stopping reflector *v1.NetworkAttachmentDefinition (0s) from github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/informers/externalversions/factory.go:117\\\\nI1128 09:58:16.670903 6284 reflector.go:311] Stopping reflector *v1.AdminPolicyBasedExternalRoute (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/informers/externalversions/factory.go:140\\\\nI1128 09:58:16.670954 6284 reflector.go:311] Stopping reflector *v1.EgressService (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressservice/v1/apis/informers/externalversions/factory.go:140\\\\nI1128 09:58:16.671581 6284 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1128 09:58:16.671659 6284 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1128 09:58:16.671666 6284 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1128 09:58:16.671699 6284 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1128 09:58:16.671739 6284 handler.go:208] Removed *v1.Node event handler 2\\\\nI1128 09:58:16.671750 6284 factory.go:656] Stopping watch factory\\\\nI1128 09:58:16.671769 6284 ovnkube.go:599] Stopped ovnkube\\\\nI11\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1ae634151347de87041ca338bcea3a34ee0c2330a3c6630a3e342f62beba0ab8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9efead1ac3ae101f3a81e0c5568645b6ac107dc126ff57bc55745b86ffacb730\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9efead1ac3ae101f3a81e0c5568645b6ac107dc126ff57bc55745b86ffacb730\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-gmhsj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:19Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:19 crc kubenswrapper[4838]: I1128 09:58:19.325920 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-tj8hl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cbb3c60a-bf9f-4a62-9310-30898e42be4f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e6fe3f1cbc53f02e2556c5fb44cffcf13330c3240a0ff10a8471080466416bd1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c2jkq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:54Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-tj8hl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:19Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:19 crc kubenswrapper[4838]: I1128 09:58:19.337114 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"faf44892-fdd2-4b45-8772-20049c555d3b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8f8f5484d008289a9e34ceaffd3cf2582565e7265003b0a6a913fe424760fc65\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://496ac21c6d7e650f191b3bc29ab676bab6ba40727c5ac4d517833ef9a115ae07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://698aacf6e94caf2da7095c89716d63d935ff60d95cb91c9a39dfe9282cbba005\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8352058616fa4ed90ca907e547bad2201d9aa696330f1eb8434c3c3d54d71d89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8352058616fa4ed90ca907e547bad2201d9aa696330f1eb8434c3c3d54d71d89\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:09Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:08Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:19Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:19 crc kubenswrapper[4838]: I1128 09:58:19.349955 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:19Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:19 crc kubenswrapper[4838]: I1128 09:58:19.367444 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:19Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:19 crc kubenswrapper[4838]: I1128 09:58:19.372333 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:19 crc kubenswrapper[4838]: I1128 09:58:19.372449 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:19 crc kubenswrapper[4838]: I1128 09:58:19.372508 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:19 crc kubenswrapper[4838]: I1128 09:58:19.372566 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:19 crc kubenswrapper[4838]: I1128 09:58:19.372708 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:19Z","lastTransitionTime":"2025-11-28T09:58:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:19 crc kubenswrapper[4838]: I1128 09:58:19.380918 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-p69l6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2a223cc8-af33-4e83-8bfc-2676c5700447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h25xx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h25xx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:58:05Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-p69l6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:19Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:19 crc kubenswrapper[4838]: I1128 09:58:19.394779 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://05cb23094534a9fdebbe33d8d34a79412ee49437c25e185c6dfa99384cbf9629\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:19Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:19 crc kubenswrapper[4838]: I1128 09:58:19.408274 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3968fb11df6b2265691177838400dcb08e03d330c166dd880b3acfac7ec7938f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e91940e5398321649eac2960a545bb4bbe047113c81f152aa55894cabee55006\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:19Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:19 crc kubenswrapper[4838]: I1128 09:58:19.425579 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-58mh7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f556bd7-3b15-4d7d-b8e2-4815bb5c9c7d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://658172db64e44d752eb06fe2788dda717db3fc2e672b073a2bdf159a16fd901f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0a0fbfb7a81acf63b4deabef68d55dd843092bce1c00c27c127955417bede44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f0a0fbfb7a81acf63b4deabef68d55dd843092bce1c00c27c127955417bede44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dea8cf8adbb0c946731fdc71c2b0d80ace50b919cf9c776eb70cb6ff36529401\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dea8cf8adbb0c946731fdc71c2b0d80ace50b919cf9c776eb70cb6ff36529401\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f717c0691a541e3a8310cd520a96e92144902511c74835a5ef8ba9536cd65657\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f717c0691a541e3a8310cd520a96e92144902511c74835a5ef8ba9536cd65657\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b8b67d281ed50a87ac086c5ce0bcc082c30a41d80074b638340326c334fa4f0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b8b67d281ed50a87ac086c5ce0bcc082c30a41d80074b638340326c334fa4f0c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4ac7e0a6a8340f45399bb4b576bbcd90d4eee54c571e6d64846c521486a2607\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f4ac7e0a6a8340f45399bb4b576bbcd90d4eee54c571e6d64846c521486a2607\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e3e1a16aa714e8c2b82811d71b1845942712cad84d7dd465c02a3c4478419af\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0e3e1a16aa714e8c2b82811d71b1845942712cad84d7dd465c02a3c4478419af\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-58mh7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:19Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:19 crc kubenswrapper[4838]: I1128 09:58:19.437398 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-sft2b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"200cdb91-cc86-40be-a5b6-30f7b9beba6d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c4f94520c534ecc5a5a92d18b6047c6df131fa7d2a9b8712c021a74c3ffc18ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lpt6k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-sft2b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:19Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:19 crc kubenswrapper[4838]: I1128 09:58:19.475196 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:19 crc kubenswrapper[4838]: I1128 09:58:19.475264 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:19 crc kubenswrapper[4838]: I1128 09:58:19.475288 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:19 crc kubenswrapper[4838]: I1128 09:58:19.475315 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:19 crc kubenswrapper[4838]: I1128 09:58:19.475335 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:19Z","lastTransitionTime":"2025-11-28T09:58:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:19 crc kubenswrapper[4838]: I1128 09:58:19.561125 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 09:58:19 crc kubenswrapper[4838]: I1128 09:58:19.561125 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 09:58:19 crc kubenswrapper[4838]: E1128 09:58:19.561354 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 09:58:19 crc kubenswrapper[4838]: E1128 09:58:19.561466 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 09:58:19 crc kubenswrapper[4838]: I1128 09:58:19.561677 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 09:58:19 crc kubenswrapper[4838]: E1128 09:58:19.562077 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 09:58:19 crc kubenswrapper[4838]: I1128 09:58:19.577930 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:19 crc kubenswrapper[4838]: I1128 09:58:19.577959 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:19 crc kubenswrapper[4838]: I1128 09:58:19.577967 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:19 crc kubenswrapper[4838]: I1128 09:58:19.577981 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:19 crc kubenswrapper[4838]: I1128 09:58:19.577991 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:19Z","lastTransitionTime":"2025-11-28T09:58:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:19 crc kubenswrapper[4838]: I1128 09:58:19.680959 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:19 crc kubenswrapper[4838]: I1128 09:58:19.681296 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:19 crc kubenswrapper[4838]: I1128 09:58:19.681437 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:19 crc kubenswrapper[4838]: I1128 09:58:19.681574 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:19 crc kubenswrapper[4838]: I1128 09:58:19.681713 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:19Z","lastTransitionTime":"2025-11-28T09:58:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:19 crc kubenswrapper[4838]: I1128 09:58:19.784301 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:19 crc kubenswrapper[4838]: I1128 09:58:19.784576 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:19 crc kubenswrapper[4838]: I1128 09:58:19.784657 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:19 crc kubenswrapper[4838]: I1128 09:58:19.784753 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:19 crc kubenswrapper[4838]: I1128 09:58:19.784833 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:19Z","lastTransitionTime":"2025-11-28T09:58:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:19 crc kubenswrapper[4838]: I1128 09:58:19.887643 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:19 crc kubenswrapper[4838]: I1128 09:58:19.887681 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:19 crc kubenswrapper[4838]: I1128 09:58:19.887689 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:19 crc kubenswrapper[4838]: I1128 09:58:19.887704 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:19 crc kubenswrapper[4838]: I1128 09:58:19.887732 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:19Z","lastTransitionTime":"2025-11-28T09:58:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:19 crc kubenswrapper[4838]: I1128 09:58:19.989575 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:19 crc kubenswrapper[4838]: I1128 09:58:19.989634 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:19 crc kubenswrapper[4838]: I1128 09:58:19.989652 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:19 crc kubenswrapper[4838]: I1128 09:58:19.989674 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:19 crc kubenswrapper[4838]: I1128 09:58:19.989690 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:19Z","lastTransitionTime":"2025-11-28T09:58:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:20 crc kubenswrapper[4838]: I1128 09:58:20.092205 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:20 crc kubenswrapper[4838]: I1128 09:58:20.092242 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:20 crc kubenswrapper[4838]: I1128 09:58:20.092254 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:20 crc kubenswrapper[4838]: I1128 09:58:20.092271 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:20 crc kubenswrapper[4838]: I1128 09:58:20.092285 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:20Z","lastTransitionTime":"2025-11-28T09:58:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:20 crc kubenswrapper[4838]: I1128 09:58:20.195131 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:20 crc kubenswrapper[4838]: I1128 09:58:20.195190 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:20 crc kubenswrapper[4838]: I1128 09:58:20.195209 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:20 crc kubenswrapper[4838]: I1128 09:58:20.195234 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:20 crc kubenswrapper[4838]: I1128 09:58:20.195251 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:20Z","lastTransitionTime":"2025-11-28T09:58:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:20 crc kubenswrapper[4838]: I1128 09:58:20.298318 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:20 crc kubenswrapper[4838]: I1128 09:58:20.298700 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:20 crc kubenswrapper[4838]: I1128 09:58:20.298733 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:20 crc kubenswrapper[4838]: I1128 09:58:20.298752 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:20 crc kubenswrapper[4838]: I1128 09:58:20.298764 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:20Z","lastTransitionTime":"2025-11-28T09:58:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:20 crc kubenswrapper[4838]: I1128 09:58:20.401810 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:20 crc kubenswrapper[4838]: I1128 09:58:20.401858 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:20 crc kubenswrapper[4838]: I1128 09:58:20.401871 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:20 crc kubenswrapper[4838]: I1128 09:58:20.401888 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:20 crc kubenswrapper[4838]: I1128 09:58:20.401904 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:20Z","lastTransitionTime":"2025-11-28T09:58:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:20 crc kubenswrapper[4838]: I1128 09:58:20.504369 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:20 crc kubenswrapper[4838]: I1128 09:58:20.504395 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:20 crc kubenswrapper[4838]: I1128 09:58:20.504405 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:20 crc kubenswrapper[4838]: I1128 09:58:20.504417 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:20 crc kubenswrapper[4838]: I1128 09:58:20.504425 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:20Z","lastTransitionTime":"2025-11-28T09:58:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:20 crc kubenswrapper[4838]: I1128 09:58:20.562254 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-p69l6" Nov 28 09:58:20 crc kubenswrapper[4838]: E1128 09:58:20.562578 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-p69l6" podUID="2a223cc8-af33-4e83-8bfc-2676c5700447" Nov 28 09:58:20 crc kubenswrapper[4838]: I1128 09:58:20.606622 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:20 crc kubenswrapper[4838]: I1128 09:58:20.606703 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:20 crc kubenswrapper[4838]: I1128 09:58:20.606759 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:20 crc kubenswrapper[4838]: I1128 09:58:20.606795 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:20 crc kubenswrapper[4838]: I1128 09:58:20.606819 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:20Z","lastTransitionTime":"2025-11-28T09:58:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:20 crc kubenswrapper[4838]: I1128 09:58:20.710145 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:20 crc kubenswrapper[4838]: I1128 09:58:20.710199 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:20 crc kubenswrapper[4838]: I1128 09:58:20.710213 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:20 crc kubenswrapper[4838]: I1128 09:58:20.710240 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:20 crc kubenswrapper[4838]: I1128 09:58:20.710255 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:20Z","lastTransitionTime":"2025-11-28T09:58:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:20 crc kubenswrapper[4838]: I1128 09:58:20.812894 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:20 crc kubenswrapper[4838]: I1128 09:58:20.812940 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:20 crc kubenswrapper[4838]: I1128 09:58:20.812949 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:20 crc kubenswrapper[4838]: I1128 09:58:20.812963 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:20 crc kubenswrapper[4838]: I1128 09:58:20.812972 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:20Z","lastTransitionTime":"2025-11-28T09:58:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:20 crc kubenswrapper[4838]: I1128 09:58:20.839866 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/2a223cc8-af33-4e83-8bfc-2676c5700447-metrics-certs\") pod \"network-metrics-daemon-p69l6\" (UID: \"2a223cc8-af33-4e83-8bfc-2676c5700447\") " pod="openshift-multus/network-metrics-daemon-p69l6" Nov 28 09:58:20 crc kubenswrapper[4838]: E1128 09:58:20.840065 4838 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 28 09:58:20 crc kubenswrapper[4838]: E1128 09:58:20.840189 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/2a223cc8-af33-4e83-8bfc-2676c5700447-metrics-certs podName:2a223cc8-af33-4e83-8bfc-2676c5700447 nodeName:}" failed. No retries permitted until 2025-11-28 09:58:36.840165286 +0000 UTC m=+88.539139626 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/2a223cc8-af33-4e83-8bfc-2676c5700447-metrics-certs") pod "network-metrics-daemon-p69l6" (UID: "2a223cc8-af33-4e83-8bfc-2676c5700447") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 28 09:58:20 crc kubenswrapper[4838]: I1128 09:58:20.916210 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:20 crc kubenswrapper[4838]: I1128 09:58:20.916280 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:20 crc kubenswrapper[4838]: I1128 09:58:20.916295 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:20 crc kubenswrapper[4838]: I1128 09:58:20.916322 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:20 crc kubenswrapper[4838]: I1128 09:58:20.916339 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:20Z","lastTransitionTime":"2025-11-28T09:58:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:21 crc kubenswrapper[4838]: I1128 09:58:21.018949 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:21 crc kubenswrapper[4838]: I1128 09:58:21.018995 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:21 crc kubenswrapper[4838]: I1128 09:58:21.019003 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:21 crc kubenswrapper[4838]: I1128 09:58:21.019022 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:21 crc kubenswrapper[4838]: I1128 09:58:21.019036 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:21Z","lastTransitionTime":"2025-11-28T09:58:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:21 crc kubenswrapper[4838]: I1128 09:58:21.059771 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-gmhsj_41b01f7d-5c75-49de-86f7-87e04bf71194/ovnkube-controller/0.log" Nov 28 09:58:21 crc kubenswrapper[4838]: I1128 09:58:21.063272 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gmhsj" event={"ID":"41b01f7d-5c75-49de-86f7-87e04bf71194","Type":"ContainerStarted","Data":"33add8cb97a3d547f8fbfa22a35902ceb364337862331a17551cab1a60de5dae"} Nov 28 09:58:21 crc kubenswrapper[4838]: I1128 09:58:21.063861 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-gmhsj" Nov 28 09:58:21 crc kubenswrapper[4838]: I1128 09:58:21.076497 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:21Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:21 crc kubenswrapper[4838]: I1128 09:58:21.090793 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:21Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:21 crc kubenswrapper[4838]: I1128 09:58:21.099524 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-tj8hl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cbb3c60a-bf9f-4a62-9310-30898e42be4f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e6fe3f1cbc53f02e2556c5fb44cffcf13330c3240a0ff10a8471080466416bd1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c2jkq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:54Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-tj8hl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:21Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:21 crc kubenswrapper[4838]: I1128 09:58:21.111311 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"faf44892-fdd2-4b45-8772-20049c555d3b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8f8f5484d008289a9e34ceaffd3cf2582565e7265003b0a6a913fe424760fc65\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://496ac21c6d7e650f191b3bc29ab676bab6ba40727c5ac4d517833ef9a115ae07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://698aacf6e94caf2da7095c89716d63d935ff60d95cb91c9a39dfe9282cbba005\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8352058616fa4ed90ca907e547bad2201d9aa696330f1eb8434c3c3d54d71d89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8352058616fa4ed90ca907e547bad2201d9aa696330f1eb8434c3c3d54d71d89\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:09Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:08Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:21Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:21 crc kubenswrapper[4838]: I1128 09:58:21.121914 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:21 crc kubenswrapper[4838]: I1128 09:58:21.121966 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:21 crc kubenswrapper[4838]: I1128 09:58:21.121983 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:21 crc kubenswrapper[4838]: I1128 09:58:21.122007 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:21 crc kubenswrapper[4838]: I1128 09:58:21.122020 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:21Z","lastTransitionTime":"2025-11-28T09:58:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:21 crc kubenswrapper[4838]: I1128 09:58:21.124739 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-58mh7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f556bd7-3b15-4d7d-b8e2-4815bb5c9c7d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://658172db64e44d752eb06fe2788dda717db3fc2e672b073a2bdf159a16fd901f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0a0fbfb7a81acf63b4deabef68d55dd843092bce1c00c27c127955417bede44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f0a0fbfb7a81acf63b4deabef68d55dd843092bce1c00c27c127955417bede44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dea8cf8adbb0c946731fdc71c2b0d80ace50b919cf9c776eb70cb6ff36529401\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dea8cf8adbb0c946731fdc71c2b0d80ace50b919cf9c776eb70cb6ff36529401\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f717c0691a541e3a8310cd520a96e92144902511c74835a5ef8ba9536cd65657\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f717c0691a541e3a8310cd520a96e92144902511c74835a5ef8ba9536cd65657\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b8b67d281ed50a87ac086c5ce0bcc082c30a41d80074b638340326c334fa4f0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b8b67d281ed50a87ac086c5ce0bcc082c30a41d80074b638340326c334fa4f0c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4ac7e0a6a8340f45399bb4b576bbcd90d4eee54c571e6d64846c521486a2607\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f4ac7e0a6a8340f45399bb4b576bbcd90d4eee54c571e6d64846c521486a2607\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e3e1a16aa714e8c2b82811d71b1845942712cad84d7dd465c02a3c4478419af\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0e3e1a16aa714e8c2b82811d71b1845942712cad84d7dd465c02a3c4478419af\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-58mh7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:21Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:21 crc kubenswrapper[4838]: I1128 09:58:21.136688 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-sft2b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"200cdb91-cc86-40be-a5b6-30f7b9beba6d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c4f94520c534ecc5a5a92d18b6047c6df131fa7d2a9b8712c021a74c3ffc18ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lpt6k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-sft2b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:21Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:21 crc kubenswrapper[4838]: I1128 09:58:21.149321 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-p69l6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2a223cc8-af33-4e83-8bfc-2676c5700447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h25xx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h25xx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:58:05Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-p69l6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:21Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:21 crc kubenswrapper[4838]: I1128 09:58:21.165764 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://05cb23094534a9fdebbe33d8d34a79412ee49437c25e185c6dfa99384cbf9629\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:21Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:21 crc kubenswrapper[4838]: I1128 09:58:21.176063 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3968fb11df6b2265691177838400dcb08e03d330c166dd880b3acfac7ec7938f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e91940e5398321649eac2960a545bb4bbe047113c81f152aa55894cabee55006\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:21Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:21 crc kubenswrapper[4838]: I1128 09:58:21.187517 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4rv9b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"051f7e1c-2d47-4be9-bbd5-14feec16eb16\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f641ae0677a50fbc7b20fac7bb5c567f031af1b6a06fbe9e965091efa4ee4ec3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-szqtp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4rv9b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:21Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:21 crc kubenswrapper[4838]: I1128 09:58:21.201213 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-tnclp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ebace5c6-6ca4-48ff-9c50-c6b769d599b5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://572204db3ac1da6b232430bf06fb87e94638e151ea0edc2f8b111deb7d82c58c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8pxnw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://efa96991cdc29f4f075bfd7751f620c7b091510ea68d577b243811ddd4140cec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8pxnw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:58:03Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-tnclp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:21Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:21 crc kubenswrapper[4838]: I1128 09:58:21.217949 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"de645de9-a5cd-4075-8bfd-402a619ea73f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://17e965aad7643d62c651c1e652be45bd914cfe3f14a0a6f43e4e4376b4cb7be0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://40b6222c83e4141c5d286efddc1b65ef732c5cadda9cebdd8d9ee114bf2eb533\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://40b6222c83e4141c5d286efddc1b65ef732c5cadda9cebdd8d9ee114bf2eb533\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:08Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:21Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:21 crc kubenswrapper[4838]: I1128 09:58:21.224754 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:21 crc kubenswrapper[4838]: I1128 09:58:21.224814 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:21 crc kubenswrapper[4838]: I1128 09:58:21.224825 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:21 crc kubenswrapper[4838]: I1128 09:58:21.224839 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:21 crc kubenswrapper[4838]: I1128 09:58:21.224849 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:21Z","lastTransitionTime":"2025-11-28T09:58:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:21 crc kubenswrapper[4838]: I1128 09:58:21.231214 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:21Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:21 crc kubenswrapper[4838]: I1128 09:58:21.242917 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33d18a643d8d8a58731e53993b40c94bb51ff9c35242f768090dc737cac26503\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:21Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:21 crc kubenswrapper[4838]: I1128 09:58:21.259182 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gmhsj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"41b01f7d-5c75-49de-86f7-87e04bf71194\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ccbb0df20c7e22308632a5a2d8837d77579134973f3888355be0fe46b4e59aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://87fcac052cf598fe1999137913f0b5ab8c999dad7d8fb512c3b079fd3b49bdaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b1f5c4da04a97ccc1851e901f9a13763e45ff8c7f6aab25bcbb934541ae4a776\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5781261b70d6722e8d28b91441c4241e1b48e6ff71ae3ec8973bde50180fc146\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3a5c0b96cef205139d500d85f27d2c9230ab23a84ffc78f2587a465a7ff25e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://669de13027827632f353811e082cce7cca326651a9bf4820e66504bb59067d72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://33add8cb97a3d547f8fbfa22a35902ceb364337862331a17551cab1a60de5dae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://53360b6257759a7c7274680307597ae7ca0e40e9966c24d50262c25b203a5177\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T09:58:18Z\\\",\\\"message\\\":\\\"s/go-controller/pkg/crd/userdefinednetwork/v1/apis/informers/externalversions/factory.go:140\\\\nI1128 09:58:16.670401 6284 reflector.go:311] Stopping reflector *v1.NetworkAttachmentDefinition (0s) from github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/informers/externalversions/factory.go:117\\\\nI1128 09:58:16.670903 6284 reflector.go:311] Stopping reflector *v1.AdminPolicyBasedExternalRoute (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/informers/externalversions/factory.go:140\\\\nI1128 09:58:16.670954 6284 reflector.go:311] Stopping reflector *v1.EgressService (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressservice/v1/apis/informers/externalversions/factory.go:140\\\\nI1128 09:58:16.671581 6284 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1128 09:58:16.671659 6284 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1128 09:58:16.671666 6284 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1128 09:58:16.671699 6284 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1128 09:58:16.671739 6284 handler.go:208] Removed *v1.Node event handler 2\\\\nI1128 09:58:16.671750 6284 factory.go:656] Stopping watch factory\\\\nI1128 09:58:16.671769 6284 ovnkube.go:599] Stopped ovnkube\\\\nI11\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:12Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1ae634151347de87041ca338bcea3a34ee0c2330a3c6630a3e342f62beba0ab8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9efead1ac3ae101f3a81e0c5568645b6ac107dc126ff57bc55745b86ffacb730\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9efead1ac3ae101f3a81e0c5568645b6ac107dc126ff57bc55745b86ffacb730\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-gmhsj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:21Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:21 crc kubenswrapper[4838]: I1128 09:58:21.269706 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5c3daa53-8c4e-4e30-aeba-146602dd45cd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d582c5c17a21e943db9e4af274b6cb31d7551e545039aca20a767b35e2ca5040\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9dmrk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5374d5a62ca21176826339023247946593cc1a7bdb4fb39596f12bf598790697\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9dmrk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-5dxdd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:21Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:21 crc kubenswrapper[4838]: I1128 09:58:21.282514 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42be35de-5c72-4cc2-a5e4-fb7872425cdf\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5a39765f9493a3a9454db77d07855624ff5645e9dbd898e6dcb880d7a01a8c42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://426741a23e7b1b9fae6769b581c0d44694f957b0da985476923801395fad082f\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T09:57:46Z\\\",\\\"message\\\":\\\"+ timeout 3m /bin/bash -exuo pipefail -c 'while [ -n \\\\\\\"$(ss -Htanop \\\\\\\\( sport = 10357 \\\\\\\\))\\\\\\\" ]; do sleep 1; done'\\\\n++ ss -Htanop '(' sport = 10357 ')'\\\\n+ '[' -n '' ']'\\\\n+ exec cluster-policy-controller start --config=/etc/kubernetes/static-pod-resources/configmaps/cluster-policy-controller-config/config.yaml --kubeconfig=/etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig --namespace=openshift-kube-controller-manager -v=2\\\\nI1128 09:57:12.554377 1 leaderelection.go:121] The leader election gives 4 retries and allows for 30s of clock skew. The kube-apiserver downtime tolerance is 78s. Worst non-graceful lease acquisition is 2m43s. Worst graceful lease acquisition is {26s}.\\\\nI1128 09:57:12.555793 1 observer_polling.go:159] Starting file observer\\\\nI1128 09:57:12.567187 1 builder.go:298] cluster-policy-controller version 4.18.0-202501230001.p0.g5fd8525.assembly.stream.el9-5fd8525-5fd852525909ce6eab52972ba9ce8fcf56528eb9\\\\nI1128 09:57:12.568976 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.crt::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.key\\\\\\\"\\\\nI1128 09:57:45.000857 1 cmd.go:138] Received SIGTERM or SIGINT signal, shutting down controller.\\\\nF1128 09:57:46.203931 1 cmd.go:179] failed checking apiserver connectivity: client rate limiter Wait returned an error: context deadline exceeded\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:10Z\\\"}},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef54c8995d6bb8e077c6d1c3d796b6f2ce88370b6cadc4f040f590760103320b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fab4ce68cf732b3e6b32f68e84805013d646a9cbd6d5c55ea3d2f41a7f9db83d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2da93de71b5896e3c9ba192df2896b946b1376fefe2a87cf21adb87ea665be04\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:08Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:21Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:21 crc kubenswrapper[4838]: I1128 09:58:21.296835 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1630b1c6-63b5-4481-a711-0485765d37e3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3a04db28528da269759635186b06952f9a3dc4c2b130458354a5bf9ef994db8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://065f0c39a0f1fbdb83a6a758ddd20a4af3ddf96297ce35551b570f5e6c9deb89\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T09:57:35Z\\\",\\\"message\\\":\\\"rpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"192.168.126.11:2379\\\\\\\", ServerName: \\\\\\\"192.168.126.11:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp 192.168.126.11:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:30.193159 13 logging.go:55] [core] [Channel #1 SubChannel #5]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"192.168.126.11:2379\\\\\\\", ServerName: \\\\\\\"192.168.126.11:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp 192.168.126.11:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:30.410371 13 logging.go:55] [core] [Channel #7 SubChannel #8]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"192.168.126.11:2379\\\\\\\", ServerName: \\\\\\\"192.168.126.11:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp 192.168.126.11:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:30.766067 13 logging.go:55] [core] [Channel #7 SubChannel #9]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"localhost:2379\\\\\\\", ServerName: \\\\\\\"localhost:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp [::1]:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:30.792318 13 logging.go:55] [core] [Channel #2 SubChannel #4]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"localhost:2379\\\\\\\", ServerName: \\\\\\\"localhost:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp [::1]:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:34.548830 13 logging.go:55] [core] [Channel #1 SubChannel #6]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"localhost:2379\\\\\\\", ServerName: \\\\\\\"localhost:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: authentication handshake failed: context canceled\\\\\\\"\\\\nE1128 09:57:34.559534 13 run.go:72] \\\\\\\"command failed\\\\\\\" err=\\\\\\\"context deadline exceeded\\\\\\\"\\\\nI1128 09:57:34.572762 1 main.go:235] Termination finished with exit code 1\\\\nI1128 09:57:34.572819 1 main.go:188] Deleting termination lock file \\\\\\\"/var/log/kube-apiserver/.terminating\\\\\\\"\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25d620ea6d7c38547e89d61e7a60f227d28b21f18d563055db47256b266d5b6b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://837cc7d33d601516b4ea56a283f71167b41da7c769070c97ea77f29e97cf1555\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a821e5b105f62cf7f3b12714bfb0f58867a808d1c777f2fb711895c345d8ee9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ad3223a9346861cf1b27af8c95207349f10af6f416380747e32c4faf1d3add4\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T09:57:48Z\\\",\\\"message\\\":\\\"le observer\\\\nW1128 09:57:46.202169 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1128 09:57:46.202478 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 09:57:46.203984 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2688063589/tls.crt::/tmp/serving-cert-2688063589/tls.key\\\\\\\"\\\\nI1128 09:57:46.517349 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 09:57:46.756626 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 09:57:46.756681 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 09:57:46.756806 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 09:57:46.756825 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 09:57:46.768610 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1128 09:57:46.768654 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1128 09:57:46.768660 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 09:57:46.768697 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 09:57:46.768707 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 09:57:46.768714 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 09:57:46.768746 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 09:57:46.768752 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1128 09:57:46.772242 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:40Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c690f38f717fbfbd681f21f5dc845b94601530b4fc0860fdbeb1317042c793b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:18Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b4d4af6fc5dd97d5b6104ef79b62fd241db5659dfdfd496a38536453b207a4e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b4d4af6fc5dd97d5b6104ef79b62fd241db5659dfdfd496a38536453b207a4e5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:08Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:21Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:21 crc kubenswrapper[4838]: I1128 09:58:21.327629 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:21 crc kubenswrapper[4838]: I1128 09:58:21.327680 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:21 crc kubenswrapper[4838]: I1128 09:58:21.327691 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:21 crc kubenswrapper[4838]: I1128 09:58:21.327708 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:21 crc kubenswrapper[4838]: I1128 09:58:21.327734 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:21Z","lastTransitionTime":"2025-11-28T09:58:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:21 crc kubenswrapper[4838]: I1128 09:58:21.430200 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:21 crc kubenswrapper[4838]: I1128 09:58:21.430274 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:21 crc kubenswrapper[4838]: I1128 09:58:21.430293 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:21 crc kubenswrapper[4838]: I1128 09:58:21.430317 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:21 crc kubenswrapper[4838]: I1128 09:58:21.430334 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:21Z","lastTransitionTime":"2025-11-28T09:58:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:21 crc kubenswrapper[4838]: I1128 09:58:21.533756 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:21 crc kubenswrapper[4838]: I1128 09:58:21.533818 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:21 crc kubenswrapper[4838]: I1128 09:58:21.533835 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:21 crc kubenswrapper[4838]: I1128 09:58:21.533863 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:21 crc kubenswrapper[4838]: I1128 09:58:21.533880 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:21Z","lastTransitionTime":"2025-11-28T09:58:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:21 crc kubenswrapper[4838]: I1128 09:58:21.561395 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 09:58:21 crc kubenswrapper[4838]: I1128 09:58:21.561447 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 09:58:21 crc kubenswrapper[4838]: I1128 09:58:21.561426 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 09:58:21 crc kubenswrapper[4838]: E1128 09:58:21.561569 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 09:58:21 crc kubenswrapper[4838]: E1128 09:58:21.561745 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 09:58:21 crc kubenswrapper[4838]: E1128 09:58:21.561806 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 09:58:21 crc kubenswrapper[4838]: I1128 09:58:21.637072 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:21 crc kubenswrapper[4838]: I1128 09:58:21.637140 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:21 crc kubenswrapper[4838]: I1128 09:58:21.637161 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:21 crc kubenswrapper[4838]: I1128 09:58:21.637189 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:21 crc kubenswrapper[4838]: I1128 09:58:21.637207 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:21Z","lastTransitionTime":"2025-11-28T09:58:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:21 crc kubenswrapper[4838]: I1128 09:58:21.740240 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:21 crc kubenswrapper[4838]: I1128 09:58:21.740308 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:21 crc kubenswrapper[4838]: I1128 09:58:21.740319 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:21 crc kubenswrapper[4838]: I1128 09:58:21.740335 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:21 crc kubenswrapper[4838]: I1128 09:58:21.740346 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:21Z","lastTransitionTime":"2025-11-28T09:58:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:21 crc kubenswrapper[4838]: I1128 09:58:21.842524 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:21 crc kubenswrapper[4838]: I1128 09:58:21.842568 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:21 crc kubenswrapper[4838]: I1128 09:58:21.842580 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:21 crc kubenswrapper[4838]: I1128 09:58:21.842598 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:21 crc kubenswrapper[4838]: I1128 09:58:21.842612 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:21Z","lastTransitionTime":"2025-11-28T09:58:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:21 crc kubenswrapper[4838]: I1128 09:58:21.945572 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:21 crc kubenswrapper[4838]: I1128 09:58:21.945624 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:21 crc kubenswrapper[4838]: I1128 09:58:21.945635 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:21 crc kubenswrapper[4838]: I1128 09:58:21.945654 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:21 crc kubenswrapper[4838]: I1128 09:58:21.945669 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:21Z","lastTransitionTime":"2025-11-28T09:58:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:22 crc kubenswrapper[4838]: I1128 09:58:22.049371 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:22 crc kubenswrapper[4838]: I1128 09:58:22.049430 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:22 crc kubenswrapper[4838]: I1128 09:58:22.049449 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:22 crc kubenswrapper[4838]: I1128 09:58:22.049476 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:22 crc kubenswrapper[4838]: I1128 09:58:22.049494 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:22Z","lastTransitionTime":"2025-11-28T09:58:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:22 crc kubenswrapper[4838]: I1128 09:58:22.153180 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:22 crc kubenswrapper[4838]: I1128 09:58:22.153248 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:22 crc kubenswrapper[4838]: I1128 09:58:22.153270 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:22 crc kubenswrapper[4838]: I1128 09:58:22.153303 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:22 crc kubenswrapper[4838]: I1128 09:58:22.153325 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:22Z","lastTransitionTime":"2025-11-28T09:58:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:22 crc kubenswrapper[4838]: I1128 09:58:22.257315 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:22 crc kubenswrapper[4838]: I1128 09:58:22.257395 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:22 crc kubenswrapper[4838]: I1128 09:58:22.257413 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:22 crc kubenswrapper[4838]: I1128 09:58:22.257901 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:22 crc kubenswrapper[4838]: I1128 09:58:22.257961 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:22Z","lastTransitionTime":"2025-11-28T09:58:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:22 crc kubenswrapper[4838]: I1128 09:58:22.360931 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:22 crc kubenswrapper[4838]: I1128 09:58:22.360975 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:22 crc kubenswrapper[4838]: I1128 09:58:22.360991 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:22 crc kubenswrapper[4838]: I1128 09:58:22.361012 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:22 crc kubenswrapper[4838]: I1128 09:58:22.361032 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:22Z","lastTransitionTime":"2025-11-28T09:58:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:22 crc kubenswrapper[4838]: I1128 09:58:22.463956 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:22 crc kubenswrapper[4838]: I1128 09:58:22.464003 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:22 crc kubenswrapper[4838]: I1128 09:58:22.464014 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:22 crc kubenswrapper[4838]: I1128 09:58:22.464038 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:22 crc kubenswrapper[4838]: I1128 09:58:22.464052 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:22Z","lastTransitionTime":"2025-11-28T09:58:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:22 crc kubenswrapper[4838]: I1128 09:58:22.562195 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-p69l6" Nov 28 09:58:22 crc kubenswrapper[4838]: E1128 09:58:22.562798 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-p69l6" podUID="2a223cc8-af33-4e83-8bfc-2676c5700447" Nov 28 09:58:22 crc kubenswrapper[4838]: I1128 09:58:22.573620 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:22 crc kubenswrapper[4838]: I1128 09:58:22.573710 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:22 crc kubenswrapper[4838]: I1128 09:58:22.573750 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:22 crc kubenswrapper[4838]: I1128 09:58:22.573786 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:22 crc kubenswrapper[4838]: I1128 09:58:22.573809 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:22Z","lastTransitionTime":"2025-11-28T09:58:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:22 crc kubenswrapper[4838]: I1128 09:58:22.676748 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:22 crc kubenswrapper[4838]: I1128 09:58:22.676804 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:22 crc kubenswrapper[4838]: I1128 09:58:22.676822 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:22 crc kubenswrapper[4838]: I1128 09:58:22.676850 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:22 crc kubenswrapper[4838]: I1128 09:58:22.676867 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:22Z","lastTransitionTime":"2025-11-28T09:58:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:22 crc kubenswrapper[4838]: I1128 09:58:22.779839 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:22 crc kubenswrapper[4838]: I1128 09:58:22.779882 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:22 crc kubenswrapper[4838]: I1128 09:58:22.779892 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:22 crc kubenswrapper[4838]: I1128 09:58:22.779912 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:22 crc kubenswrapper[4838]: I1128 09:58:22.779923 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:22Z","lastTransitionTime":"2025-11-28T09:58:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:22 crc kubenswrapper[4838]: I1128 09:58:22.883234 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:22 crc kubenswrapper[4838]: I1128 09:58:22.883331 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:22 crc kubenswrapper[4838]: I1128 09:58:22.883352 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:22 crc kubenswrapper[4838]: I1128 09:58:22.883386 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:22 crc kubenswrapper[4838]: I1128 09:58:22.883411 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:22Z","lastTransitionTime":"2025-11-28T09:58:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:22 crc kubenswrapper[4838]: I1128 09:58:22.986635 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:22 crc kubenswrapper[4838]: I1128 09:58:22.986683 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:22 crc kubenswrapper[4838]: I1128 09:58:22.986700 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:22 crc kubenswrapper[4838]: I1128 09:58:22.986758 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:22 crc kubenswrapper[4838]: I1128 09:58:22.986779 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:22Z","lastTransitionTime":"2025-11-28T09:58:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:23 crc kubenswrapper[4838]: I1128 09:58:23.070919 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-gmhsj_41b01f7d-5c75-49de-86f7-87e04bf71194/ovnkube-controller/1.log" Nov 28 09:58:23 crc kubenswrapper[4838]: I1128 09:58:23.071367 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-gmhsj_41b01f7d-5c75-49de-86f7-87e04bf71194/ovnkube-controller/0.log" Nov 28 09:58:23 crc kubenswrapper[4838]: I1128 09:58:23.073623 4838 generic.go:334] "Generic (PLEG): container finished" podID="41b01f7d-5c75-49de-86f7-87e04bf71194" containerID="33add8cb97a3d547f8fbfa22a35902ceb364337862331a17551cab1a60de5dae" exitCode=1 Nov 28 09:58:23 crc kubenswrapper[4838]: I1128 09:58:23.073664 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gmhsj" event={"ID":"41b01f7d-5c75-49de-86f7-87e04bf71194","Type":"ContainerDied","Data":"33add8cb97a3d547f8fbfa22a35902ceb364337862331a17551cab1a60de5dae"} Nov 28 09:58:23 crc kubenswrapper[4838]: I1128 09:58:23.073699 4838 scope.go:117] "RemoveContainer" containerID="53360b6257759a7c7274680307597ae7ca0e40e9966c24d50262c25b203a5177" Nov 28 09:58:23 crc kubenswrapper[4838]: I1128 09:58:23.074427 4838 scope.go:117] "RemoveContainer" containerID="33add8cb97a3d547f8fbfa22a35902ceb364337862331a17551cab1a60de5dae" Nov 28 09:58:23 crc kubenswrapper[4838]: E1128 09:58:23.074585 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-gmhsj_openshift-ovn-kubernetes(41b01f7d-5c75-49de-86f7-87e04bf71194)\"" pod="openshift-ovn-kubernetes/ovnkube-node-gmhsj" podUID="41b01f7d-5c75-49de-86f7-87e04bf71194" Nov 28 09:58:23 crc kubenswrapper[4838]: I1128 09:58:23.089778 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:23 crc kubenswrapper[4838]: I1128 09:58:23.089813 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:23 crc kubenswrapper[4838]: I1128 09:58:23.089822 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:23 crc kubenswrapper[4838]: I1128 09:58:23.089834 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:23 crc kubenswrapper[4838]: I1128 09:58:23.089843 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:23Z","lastTransitionTime":"2025-11-28T09:58:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:23 crc kubenswrapper[4838]: I1128 09:58:23.097322 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3968fb11df6b2265691177838400dcb08e03d330c166dd880b3acfac7ec7938f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e91940e5398321649eac2960a545bb4bbe047113c81f152aa55894cabee55006\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:23Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:23 crc kubenswrapper[4838]: I1128 09:58:23.114954 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-58mh7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f556bd7-3b15-4d7d-b8e2-4815bb5c9c7d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://658172db64e44d752eb06fe2788dda717db3fc2e672b073a2bdf159a16fd901f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0a0fbfb7a81acf63b4deabef68d55dd843092bce1c00c27c127955417bede44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f0a0fbfb7a81acf63b4deabef68d55dd843092bce1c00c27c127955417bede44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dea8cf8adbb0c946731fdc71c2b0d80ace50b919cf9c776eb70cb6ff36529401\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dea8cf8adbb0c946731fdc71c2b0d80ace50b919cf9c776eb70cb6ff36529401\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f717c0691a541e3a8310cd520a96e92144902511c74835a5ef8ba9536cd65657\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f717c0691a541e3a8310cd520a96e92144902511c74835a5ef8ba9536cd65657\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b8b67d281ed50a87ac086c5ce0bcc082c30a41d80074b638340326c334fa4f0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b8b67d281ed50a87ac086c5ce0bcc082c30a41d80074b638340326c334fa4f0c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4ac7e0a6a8340f45399bb4b576bbcd90d4eee54c571e6d64846c521486a2607\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f4ac7e0a6a8340f45399bb4b576bbcd90d4eee54c571e6d64846c521486a2607\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e3e1a16aa714e8c2b82811d71b1845942712cad84d7dd465c02a3c4478419af\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0e3e1a16aa714e8c2b82811d71b1845942712cad84d7dd465c02a3c4478419af\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-58mh7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:23Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:23 crc kubenswrapper[4838]: I1128 09:58:23.129580 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-sft2b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"200cdb91-cc86-40be-a5b6-30f7b9beba6d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c4f94520c534ecc5a5a92d18b6047c6df131fa7d2a9b8712c021a74c3ffc18ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lpt6k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-sft2b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:23Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:23 crc kubenswrapper[4838]: I1128 09:58:23.145649 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-p69l6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2a223cc8-af33-4e83-8bfc-2676c5700447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h25xx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h25xx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:58:05Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-p69l6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:23Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:23 crc kubenswrapper[4838]: I1128 09:58:23.164902 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://05cb23094534a9fdebbe33d8d34a79412ee49437c25e185c6dfa99384cbf9629\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:23Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:23 crc kubenswrapper[4838]: I1128 09:58:23.185200 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:23Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:23 crc kubenswrapper[4838]: I1128 09:58:23.192140 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:23 crc kubenswrapper[4838]: I1128 09:58:23.192190 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:23 crc kubenswrapper[4838]: I1128 09:58:23.192201 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:23 crc kubenswrapper[4838]: I1128 09:58:23.192219 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:23 crc kubenswrapper[4838]: I1128 09:58:23.192231 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:23Z","lastTransitionTime":"2025-11-28T09:58:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:23 crc kubenswrapper[4838]: I1128 09:58:23.204046 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4rv9b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"051f7e1c-2d47-4be9-bbd5-14feec16eb16\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f641ae0677a50fbc7b20fac7bb5c567f031af1b6a06fbe9e965091efa4ee4ec3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-szqtp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4rv9b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:23Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:23 crc kubenswrapper[4838]: I1128 09:58:23.217967 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-tnclp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ebace5c6-6ca4-48ff-9c50-c6b769d599b5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://572204db3ac1da6b232430bf06fb87e94638e151ea0edc2f8b111deb7d82c58c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8pxnw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://efa96991cdc29f4f075bfd7751f620c7b091510ea68d577b243811ddd4140cec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8pxnw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:58:03Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-tnclp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:23Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:23 crc kubenswrapper[4838]: I1128 09:58:23.229706 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"de645de9-a5cd-4075-8bfd-402a619ea73f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://17e965aad7643d62c651c1e652be45bd914cfe3f14a0a6f43e4e4376b4cb7be0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://40b6222c83e4141c5d286efddc1b65ef732c5cadda9cebdd8d9ee114bf2eb533\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://40b6222c83e4141c5d286efddc1b65ef732c5cadda9cebdd8d9ee114bf2eb533\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:08Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:23Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:23 crc kubenswrapper[4838]: I1128 09:58:23.244770 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1630b1c6-63b5-4481-a711-0485765d37e3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3a04db28528da269759635186b06952f9a3dc4c2b130458354a5bf9ef994db8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://065f0c39a0f1fbdb83a6a758ddd20a4af3ddf96297ce35551b570f5e6c9deb89\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T09:57:35Z\\\",\\\"message\\\":\\\"rpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"192.168.126.11:2379\\\\\\\", ServerName: \\\\\\\"192.168.126.11:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp 192.168.126.11:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:30.193159 13 logging.go:55] [core] [Channel #1 SubChannel #5]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"192.168.126.11:2379\\\\\\\", ServerName: \\\\\\\"192.168.126.11:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp 192.168.126.11:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:30.410371 13 logging.go:55] [core] [Channel #7 SubChannel #8]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"192.168.126.11:2379\\\\\\\", ServerName: \\\\\\\"192.168.126.11:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp 192.168.126.11:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:30.766067 13 logging.go:55] [core] [Channel #7 SubChannel #9]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"localhost:2379\\\\\\\", ServerName: \\\\\\\"localhost:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp [::1]:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:30.792318 13 logging.go:55] [core] [Channel #2 SubChannel #4]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"localhost:2379\\\\\\\", ServerName: \\\\\\\"localhost:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp [::1]:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:34.548830 13 logging.go:55] [core] [Channel #1 SubChannel #6]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"localhost:2379\\\\\\\", ServerName: \\\\\\\"localhost:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: authentication handshake failed: context canceled\\\\\\\"\\\\nE1128 09:57:34.559534 13 run.go:72] \\\\\\\"command failed\\\\\\\" err=\\\\\\\"context deadline exceeded\\\\\\\"\\\\nI1128 09:57:34.572762 1 main.go:235] Termination finished with exit code 1\\\\nI1128 09:57:34.572819 1 main.go:188] Deleting termination lock file \\\\\\\"/var/log/kube-apiserver/.terminating\\\\\\\"\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25d620ea6d7c38547e89d61e7a60f227d28b21f18d563055db47256b266d5b6b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://837cc7d33d601516b4ea56a283f71167b41da7c769070c97ea77f29e97cf1555\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a821e5b105f62cf7f3b12714bfb0f58867a808d1c777f2fb711895c345d8ee9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ad3223a9346861cf1b27af8c95207349f10af6f416380747e32c4faf1d3add4\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T09:57:48Z\\\",\\\"message\\\":\\\"le observer\\\\nW1128 09:57:46.202169 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1128 09:57:46.202478 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 09:57:46.203984 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2688063589/tls.crt::/tmp/serving-cert-2688063589/tls.key\\\\\\\"\\\\nI1128 09:57:46.517349 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 09:57:46.756626 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 09:57:46.756681 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 09:57:46.756806 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 09:57:46.756825 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 09:57:46.768610 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1128 09:57:46.768654 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1128 09:57:46.768660 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 09:57:46.768697 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 09:57:46.768707 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 09:57:46.768714 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 09:57:46.768746 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 09:57:46.768752 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1128 09:57:46.772242 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:40Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c690f38f717fbfbd681f21f5dc845b94601530b4fc0860fdbeb1317042c793b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:18Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b4d4af6fc5dd97d5b6104ef79b62fd241db5659dfdfd496a38536453b207a4e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b4d4af6fc5dd97d5b6104ef79b62fd241db5659dfdfd496a38536453b207a4e5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:08Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:23Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:23 crc kubenswrapper[4838]: I1128 09:58:23.257923 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33d18a643d8d8a58731e53993b40c94bb51ff9c35242f768090dc737cac26503\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:23Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:23 crc kubenswrapper[4838]: I1128 09:58:23.279360 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gmhsj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"41b01f7d-5c75-49de-86f7-87e04bf71194\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ccbb0df20c7e22308632a5a2d8837d77579134973f3888355be0fe46b4e59aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://87fcac052cf598fe1999137913f0b5ab8c999dad7d8fb512c3b079fd3b49bdaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b1f5c4da04a97ccc1851e901f9a13763e45ff8c7f6aab25bcbb934541ae4a776\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5781261b70d6722e8d28b91441c4241e1b48e6ff71ae3ec8973bde50180fc146\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3a5c0b96cef205139d500d85f27d2c9230ab23a84ffc78f2587a465a7ff25e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://669de13027827632f353811e082cce7cca326651a9bf4820e66504bb59067d72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://33add8cb97a3d547f8fbfa22a35902ceb364337862331a17551cab1a60de5dae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://53360b6257759a7c7274680307597ae7ca0e40e9966c24d50262c25b203a5177\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T09:58:18Z\\\",\\\"message\\\":\\\"s/go-controller/pkg/crd/userdefinednetwork/v1/apis/informers/externalversions/factory.go:140\\\\nI1128 09:58:16.670401 6284 reflector.go:311] Stopping reflector *v1.NetworkAttachmentDefinition (0s) from github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/informers/externalversions/factory.go:117\\\\nI1128 09:58:16.670903 6284 reflector.go:311] Stopping reflector *v1.AdminPolicyBasedExternalRoute (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/informers/externalversions/factory.go:140\\\\nI1128 09:58:16.670954 6284 reflector.go:311] Stopping reflector *v1.EgressService (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressservice/v1/apis/informers/externalversions/factory.go:140\\\\nI1128 09:58:16.671581 6284 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1128 09:58:16.671659 6284 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1128 09:58:16.671666 6284 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1128 09:58:16.671699 6284 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1128 09:58:16.671739 6284 handler.go:208] Removed *v1.Node event handler 2\\\\nI1128 09:58:16.671750 6284 factory.go:656] Stopping watch factory\\\\nI1128 09:58:16.671769 6284 ovnkube.go:599] Stopped ovnkube\\\\nI11\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:12Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://33add8cb97a3d547f8fbfa22a35902ceb364337862331a17551cab1a60de5dae\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T09:58:22Z\\\",\\\"message\\\":\\\"9:58:21.338450 6507 services_controller.go:452] Built service openshift-machine-config-operator/machine-config-daemon per-node LB for network=default: []services.LB{}\\\\nI1128 09:58:21.338487 6507 services_controller.go:453] Built service openshift-machine-config-operator/machine-config-daemon template LB for network=default: []services.LB{}\\\\nI1128 09:58:21.338500 6507 services_controller.go:454] Service openshift-machine-config-operator/machine-config-daemon for network=default has 2 cluster-wide, 0 per-node configs, 0 template configs, making 1 (cluster) 0 (per node) and 0 (template) load balancers\\\\nI1128 09:58:21.338503 6507 loadbalancer.go:304] Deleted 0 stale LBs for map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-machine-api/machine-api-controllers\\\\\\\"}\\\\nI1128 09:58:21.338518 6507 services_controller.go:360] Finished syncing service machine-api-controllers on namespace openshift-machine-api for network=default : 2.046583ms\\\\nF1128 09:58:21.338532 6507 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: failed to add event handler: handler {0x1e60340 0x1e60020 0x1e5ffc0} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to \\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1ae634151347de87041ca338bcea3a34ee0c2330a3c6630a3e342f62beba0ab8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9efead1ac3ae101f3a81e0c5568645b6ac107dc126ff57bc55745b86ffacb730\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9efead1ac3ae101f3a81e0c5568645b6ac107dc126ff57bc55745b86ffacb730\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-gmhsj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:23Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:23 crc kubenswrapper[4838]: I1128 09:58:23.294762 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5c3daa53-8c4e-4e30-aeba-146602dd45cd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d582c5c17a21e943db9e4af274b6cb31d7551e545039aca20a767b35e2ca5040\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9dmrk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5374d5a62ca21176826339023247946593cc1a7bdb4fb39596f12bf598790697\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9dmrk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-5dxdd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:23Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:23 crc kubenswrapper[4838]: I1128 09:58:23.294872 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:23 crc kubenswrapper[4838]: I1128 09:58:23.294929 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:23 crc kubenswrapper[4838]: I1128 09:58:23.294943 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:23 crc kubenswrapper[4838]: I1128 09:58:23.294970 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:23 crc kubenswrapper[4838]: I1128 09:58:23.294988 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:23Z","lastTransitionTime":"2025-11-28T09:58:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:23 crc kubenswrapper[4838]: I1128 09:58:23.308119 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42be35de-5c72-4cc2-a5e4-fb7872425cdf\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5a39765f9493a3a9454db77d07855624ff5645e9dbd898e6dcb880d7a01a8c42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://426741a23e7b1b9fae6769b581c0d44694f957b0da985476923801395fad082f\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T09:57:46Z\\\",\\\"message\\\":\\\"+ timeout 3m /bin/bash -exuo pipefail -c 'while [ -n \\\\\\\"$(ss -Htanop \\\\\\\\( sport = 10357 \\\\\\\\))\\\\\\\" ]; do sleep 1; done'\\\\n++ ss -Htanop '(' sport = 10357 ')'\\\\n+ '[' -n '' ']'\\\\n+ exec cluster-policy-controller start --config=/etc/kubernetes/static-pod-resources/configmaps/cluster-policy-controller-config/config.yaml --kubeconfig=/etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig --namespace=openshift-kube-controller-manager -v=2\\\\nI1128 09:57:12.554377 1 leaderelection.go:121] The leader election gives 4 retries and allows for 30s of clock skew. The kube-apiserver downtime tolerance is 78s. Worst non-graceful lease acquisition is 2m43s. Worst graceful lease acquisition is {26s}.\\\\nI1128 09:57:12.555793 1 observer_polling.go:159] Starting file observer\\\\nI1128 09:57:12.567187 1 builder.go:298] cluster-policy-controller version 4.18.0-202501230001.p0.g5fd8525.assembly.stream.el9-5fd8525-5fd852525909ce6eab52972ba9ce8fcf56528eb9\\\\nI1128 09:57:12.568976 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.crt::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.key\\\\\\\"\\\\nI1128 09:57:45.000857 1 cmd.go:138] Received SIGTERM or SIGINT signal, shutting down controller.\\\\nF1128 09:57:46.203931 1 cmd.go:179] failed checking apiserver connectivity: client rate limiter Wait returned an error: context deadline exceeded\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:10Z\\\"}},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef54c8995d6bb8e077c6d1c3d796b6f2ce88370b6cadc4f040f590760103320b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fab4ce68cf732b3e6b32f68e84805013d646a9cbd6d5c55ea3d2f41a7f9db83d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2da93de71b5896e3c9ba192df2896b946b1376fefe2a87cf21adb87ea665be04\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:08Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:23Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:23 crc kubenswrapper[4838]: I1128 09:58:23.319623 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:23Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:23 crc kubenswrapper[4838]: I1128 09:58:23.332696 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:23Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:23 crc kubenswrapper[4838]: I1128 09:58:23.344323 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-tj8hl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cbb3c60a-bf9f-4a62-9310-30898e42be4f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e6fe3f1cbc53f02e2556c5fb44cffcf13330c3240a0ff10a8471080466416bd1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c2jkq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:54Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-tj8hl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:23Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:23 crc kubenswrapper[4838]: I1128 09:58:23.354813 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"faf44892-fdd2-4b45-8772-20049c555d3b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8f8f5484d008289a9e34ceaffd3cf2582565e7265003b0a6a913fe424760fc65\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://496ac21c6d7e650f191b3bc29ab676bab6ba40727c5ac4d517833ef9a115ae07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://698aacf6e94caf2da7095c89716d63d935ff60d95cb91c9a39dfe9282cbba005\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8352058616fa4ed90ca907e547bad2201d9aa696330f1eb8434c3c3d54d71d89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8352058616fa4ed90ca907e547bad2201d9aa696330f1eb8434c3c3d54d71d89\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:09Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:08Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:23Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:23 crc kubenswrapper[4838]: I1128 09:58:23.399442 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:23 crc kubenswrapper[4838]: I1128 09:58:23.399503 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:23 crc kubenswrapper[4838]: I1128 09:58:23.399516 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:23 crc kubenswrapper[4838]: I1128 09:58:23.399541 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:23 crc kubenswrapper[4838]: I1128 09:58:23.399556 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:23Z","lastTransitionTime":"2025-11-28T09:58:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:23 crc kubenswrapper[4838]: I1128 09:58:23.503509 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:23 crc kubenswrapper[4838]: I1128 09:58:23.503574 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:23 crc kubenswrapper[4838]: I1128 09:58:23.503591 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:23 crc kubenswrapper[4838]: I1128 09:58:23.503616 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:23 crc kubenswrapper[4838]: I1128 09:58:23.503633 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:23Z","lastTransitionTime":"2025-11-28T09:58:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:23 crc kubenswrapper[4838]: I1128 09:58:23.561186 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 09:58:23 crc kubenswrapper[4838]: I1128 09:58:23.561261 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 09:58:23 crc kubenswrapper[4838]: I1128 09:58:23.561344 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 09:58:23 crc kubenswrapper[4838]: E1128 09:58:23.561540 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 09:58:23 crc kubenswrapper[4838]: E1128 09:58:23.561931 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 09:58:23 crc kubenswrapper[4838]: E1128 09:58:23.562199 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 09:58:23 crc kubenswrapper[4838]: I1128 09:58:23.606077 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:23 crc kubenswrapper[4838]: I1128 09:58:23.606136 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:23 crc kubenswrapper[4838]: I1128 09:58:23.606146 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:23 crc kubenswrapper[4838]: I1128 09:58:23.606167 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:23 crc kubenswrapper[4838]: I1128 09:58:23.606181 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:23Z","lastTransitionTime":"2025-11-28T09:58:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:23 crc kubenswrapper[4838]: I1128 09:58:23.709195 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:23 crc kubenswrapper[4838]: I1128 09:58:23.709256 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:23 crc kubenswrapper[4838]: I1128 09:58:23.709266 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:23 crc kubenswrapper[4838]: I1128 09:58:23.709286 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:23 crc kubenswrapper[4838]: I1128 09:58:23.709298 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:23Z","lastTransitionTime":"2025-11-28T09:58:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:23 crc kubenswrapper[4838]: I1128 09:58:23.812396 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:23 crc kubenswrapper[4838]: I1128 09:58:23.812464 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:23 crc kubenswrapper[4838]: I1128 09:58:23.812477 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:23 crc kubenswrapper[4838]: I1128 09:58:23.812503 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:23 crc kubenswrapper[4838]: I1128 09:58:23.812521 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:23Z","lastTransitionTime":"2025-11-28T09:58:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:23 crc kubenswrapper[4838]: I1128 09:58:23.916599 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:23 crc kubenswrapper[4838]: I1128 09:58:23.917108 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:23 crc kubenswrapper[4838]: I1128 09:58:23.917181 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:23 crc kubenswrapper[4838]: I1128 09:58:23.917290 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:23 crc kubenswrapper[4838]: I1128 09:58:23.917358 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:23Z","lastTransitionTime":"2025-11-28T09:58:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:24 crc kubenswrapper[4838]: I1128 09:58:24.020538 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:24 crc kubenswrapper[4838]: I1128 09:58:24.020572 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:24 crc kubenswrapper[4838]: I1128 09:58:24.020582 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:24 crc kubenswrapper[4838]: I1128 09:58:24.020597 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:24 crc kubenswrapper[4838]: I1128 09:58:24.020610 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:24Z","lastTransitionTime":"2025-11-28T09:58:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:24 crc kubenswrapper[4838]: I1128 09:58:24.079548 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-gmhsj_41b01f7d-5c75-49de-86f7-87e04bf71194/ovnkube-controller/1.log" Nov 28 09:58:24 crc kubenswrapper[4838]: I1128 09:58:24.122933 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:24 crc kubenswrapper[4838]: I1128 09:58:24.122976 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:24 crc kubenswrapper[4838]: I1128 09:58:24.122987 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:24 crc kubenswrapper[4838]: I1128 09:58:24.123001 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:24 crc kubenswrapper[4838]: I1128 09:58:24.123012 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:24Z","lastTransitionTime":"2025-11-28T09:58:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:24 crc kubenswrapper[4838]: I1128 09:58:24.225756 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:24 crc kubenswrapper[4838]: I1128 09:58:24.225801 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:24 crc kubenswrapper[4838]: I1128 09:58:24.225811 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:24 crc kubenswrapper[4838]: I1128 09:58:24.225829 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:24 crc kubenswrapper[4838]: I1128 09:58:24.225841 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:24Z","lastTransitionTime":"2025-11-28T09:58:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:24 crc kubenswrapper[4838]: I1128 09:58:24.328693 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:24 crc kubenswrapper[4838]: I1128 09:58:24.328808 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:24 crc kubenswrapper[4838]: I1128 09:58:24.328835 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:24 crc kubenswrapper[4838]: I1128 09:58:24.328868 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:24 crc kubenswrapper[4838]: I1128 09:58:24.328893 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:24Z","lastTransitionTime":"2025-11-28T09:58:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:24 crc kubenswrapper[4838]: I1128 09:58:24.432175 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:24 crc kubenswrapper[4838]: I1128 09:58:24.432227 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:24 crc kubenswrapper[4838]: I1128 09:58:24.432242 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:24 crc kubenswrapper[4838]: I1128 09:58:24.432266 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:24 crc kubenswrapper[4838]: I1128 09:58:24.432282 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:24Z","lastTransitionTime":"2025-11-28T09:58:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:24 crc kubenswrapper[4838]: I1128 09:58:24.534782 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:24 crc kubenswrapper[4838]: I1128 09:58:24.534831 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:24 crc kubenswrapper[4838]: I1128 09:58:24.534847 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:24 crc kubenswrapper[4838]: I1128 09:58:24.534869 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:24 crc kubenswrapper[4838]: I1128 09:58:24.534885 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:24Z","lastTransitionTime":"2025-11-28T09:58:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:24 crc kubenswrapper[4838]: I1128 09:58:24.564150 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-p69l6" Nov 28 09:58:24 crc kubenswrapper[4838]: E1128 09:58:24.564397 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-p69l6" podUID="2a223cc8-af33-4e83-8bfc-2676c5700447" Nov 28 09:58:24 crc kubenswrapper[4838]: I1128 09:58:24.637916 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:24 crc kubenswrapper[4838]: I1128 09:58:24.637977 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:24 crc kubenswrapper[4838]: I1128 09:58:24.638000 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:24 crc kubenswrapper[4838]: I1128 09:58:24.638028 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:24 crc kubenswrapper[4838]: I1128 09:58:24.638052 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:24Z","lastTransitionTime":"2025-11-28T09:58:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:24 crc kubenswrapper[4838]: I1128 09:58:24.741636 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:24 crc kubenswrapper[4838]: I1128 09:58:24.741677 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:24 crc kubenswrapper[4838]: I1128 09:58:24.741692 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:24 crc kubenswrapper[4838]: I1128 09:58:24.741709 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:24 crc kubenswrapper[4838]: I1128 09:58:24.741747 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:24Z","lastTransitionTime":"2025-11-28T09:58:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:24 crc kubenswrapper[4838]: I1128 09:58:24.844846 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:24 crc kubenswrapper[4838]: I1128 09:58:24.844900 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:24 crc kubenswrapper[4838]: I1128 09:58:24.844916 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:24 crc kubenswrapper[4838]: I1128 09:58:24.844942 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:24 crc kubenswrapper[4838]: I1128 09:58:24.844961 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:24Z","lastTransitionTime":"2025-11-28T09:58:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:24 crc kubenswrapper[4838]: I1128 09:58:24.948297 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:24 crc kubenswrapper[4838]: I1128 09:58:24.948386 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:24 crc kubenswrapper[4838]: I1128 09:58:24.948398 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:24 crc kubenswrapper[4838]: I1128 09:58:24.948420 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:24 crc kubenswrapper[4838]: I1128 09:58:24.948432 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:24Z","lastTransitionTime":"2025-11-28T09:58:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:25 crc kubenswrapper[4838]: I1128 09:58:25.050969 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:25 crc kubenswrapper[4838]: I1128 09:58:25.051020 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:25 crc kubenswrapper[4838]: I1128 09:58:25.051033 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:25 crc kubenswrapper[4838]: I1128 09:58:25.051052 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:25 crc kubenswrapper[4838]: I1128 09:58:25.051063 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:25Z","lastTransitionTime":"2025-11-28T09:58:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:25 crc kubenswrapper[4838]: I1128 09:58:25.154574 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:25 crc kubenswrapper[4838]: I1128 09:58:25.154621 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:25 crc kubenswrapper[4838]: I1128 09:58:25.154632 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:25 crc kubenswrapper[4838]: I1128 09:58:25.154655 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:25 crc kubenswrapper[4838]: I1128 09:58:25.154667 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:25Z","lastTransitionTime":"2025-11-28T09:58:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:25 crc kubenswrapper[4838]: I1128 09:58:25.257987 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:25 crc kubenswrapper[4838]: I1128 09:58:25.258019 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:25 crc kubenswrapper[4838]: I1128 09:58:25.258026 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:25 crc kubenswrapper[4838]: I1128 09:58:25.258039 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:25 crc kubenswrapper[4838]: I1128 09:58:25.258048 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:25Z","lastTransitionTime":"2025-11-28T09:58:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:25 crc kubenswrapper[4838]: I1128 09:58:25.360821 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:25 crc kubenswrapper[4838]: I1128 09:58:25.360885 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:25 crc kubenswrapper[4838]: I1128 09:58:25.360901 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:25 crc kubenswrapper[4838]: I1128 09:58:25.360926 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:25 crc kubenswrapper[4838]: I1128 09:58:25.360950 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:25Z","lastTransitionTime":"2025-11-28T09:58:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:25 crc kubenswrapper[4838]: I1128 09:58:25.387586 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 09:58:25 crc kubenswrapper[4838]: I1128 09:58:25.387751 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 09:58:25 crc kubenswrapper[4838]: E1128 09:58:25.387771 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 09:58:57.387742477 +0000 UTC m=+109.086716657 (durationBeforeRetry 32s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 09:58:25 crc kubenswrapper[4838]: I1128 09:58:25.387829 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 09:58:25 crc kubenswrapper[4838]: E1128 09:58:25.387875 4838 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 28 09:58:25 crc kubenswrapper[4838]: E1128 09:58:25.387895 4838 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 28 09:58:25 crc kubenswrapper[4838]: E1128 09:58:25.387908 4838 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 09:58:25 crc kubenswrapper[4838]: E1128 09:58:25.387955 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-28 09:58:57.387937482 +0000 UTC m=+109.086911752 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 09:58:25 crc kubenswrapper[4838]: I1128 09:58:25.387877 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 09:58:25 crc kubenswrapper[4838]: E1128 09:58:25.387987 4838 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 28 09:58:25 crc kubenswrapper[4838]: I1128 09:58:25.387999 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 09:58:25 crc kubenswrapper[4838]: E1128 09:58:25.388005 4838 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 28 09:58:25 crc kubenswrapper[4838]: E1128 09:58:25.388039 4838 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 09:58:25 crc kubenswrapper[4838]: E1128 09:58:25.388070 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-28 09:58:57.388062195 +0000 UTC m=+109.087036365 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 09:58:25 crc kubenswrapper[4838]: E1128 09:58:25.388093 4838 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 28 09:58:25 crc kubenswrapper[4838]: E1128 09:58:25.388150 4838 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 28 09:58:25 crc kubenswrapper[4838]: E1128 09:58:25.388194 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-28 09:58:57.388167297 +0000 UTC m=+109.087141517 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 28 09:58:25 crc kubenswrapper[4838]: E1128 09:58:25.388244 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-28 09:58:57.388209458 +0000 UTC m=+109.087183648 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 28 09:58:25 crc kubenswrapper[4838]: I1128 09:58:25.463843 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:25 crc kubenswrapper[4838]: I1128 09:58:25.463881 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:25 crc kubenswrapper[4838]: I1128 09:58:25.463890 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:25 crc kubenswrapper[4838]: I1128 09:58:25.463907 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:25 crc kubenswrapper[4838]: I1128 09:58:25.463916 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:25Z","lastTransitionTime":"2025-11-28T09:58:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:25 crc kubenswrapper[4838]: I1128 09:58:25.561097 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 09:58:25 crc kubenswrapper[4838]: I1128 09:58:25.561153 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 09:58:25 crc kubenswrapper[4838]: I1128 09:58:25.561113 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 09:58:25 crc kubenswrapper[4838]: E1128 09:58:25.561273 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 09:58:25 crc kubenswrapper[4838]: E1128 09:58:25.561354 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 09:58:25 crc kubenswrapper[4838]: E1128 09:58:25.561408 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 09:58:25 crc kubenswrapper[4838]: I1128 09:58:25.565899 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:25 crc kubenswrapper[4838]: I1128 09:58:25.565953 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:25 crc kubenswrapper[4838]: I1128 09:58:25.565971 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:25 crc kubenswrapper[4838]: I1128 09:58:25.565993 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:25 crc kubenswrapper[4838]: I1128 09:58:25.566017 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:25Z","lastTransitionTime":"2025-11-28T09:58:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:25 crc kubenswrapper[4838]: I1128 09:58:25.669246 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:25 crc kubenswrapper[4838]: I1128 09:58:25.669291 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:25 crc kubenswrapper[4838]: I1128 09:58:25.669304 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:25 crc kubenswrapper[4838]: I1128 09:58:25.669323 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:25 crc kubenswrapper[4838]: I1128 09:58:25.669341 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:25Z","lastTransitionTime":"2025-11-28T09:58:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:25 crc kubenswrapper[4838]: I1128 09:58:25.771615 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:25 crc kubenswrapper[4838]: I1128 09:58:25.771648 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:25 crc kubenswrapper[4838]: I1128 09:58:25.771658 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:25 crc kubenswrapper[4838]: I1128 09:58:25.771670 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:25 crc kubenswrapper[4838]: I1128 09:58:25.771677 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:25Z","lastTransitionTime":"2025-11-28T09:58:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:25 crc kubenswrapper[4838]: I1128 09:58:25.874841 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:25 crc kubenswrapper[4838]: I1128 09:58:25.874899 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:25 crc kubenswrapper[4838]: I1128 09:58:25.874917 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:25 crc kubenswrapper[4838]: I1128 09:58:25.874942 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:25 crc kubenswrapper[4838]: I1128 09:58:25.874960 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:25Z","lastTransitionTime":"2025-11-28T09:58:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:25 crc kubenswrapper[4838]: I1128 09:58:25.977851 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:25 crc kubenswrapper[4838]: I1128 09:58:25.977909 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:25 crc kubenswrapper[4838]: I1128 09:58:25.977936 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:25 crc kubenswrapper[4838]: I1128 09:58:25.977978 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:25 crc kubenswrapper[4838]: I1128 09:58:25.978003 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:25Z","lastTransitionTime":"2025-11-28T09:58:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:26 crc kubenswrapper[4838]: I1128 09:58:26.081199 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:26 crc kubenswrapper[4838]: I1128 09:58:26.081500 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:26 crc kubenswrapper[4838]: I1128 09:58:26.081641 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:26 crc kubenswrapper[4838]: I1128 09:58:26.081831 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:26 crc kubenswrapper[4838]: I1128 09:58:26.081978 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:26Z","lastTransitionTime":"2025-11-28T09:58:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:26 crc kubenswrapper[4838]: I1128 09:58:26.185240 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:26 crc kubenswrapper[4838]: I1128 09:58:26.185586 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:26 crc kubenswrapper[4838]: I1128 09:58:26.185754 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:26 crc kubenswrapper[4838]: I1128 09:58:26.185954 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:26 crc kubenswrapper[4838]: I1128 09:58:26.186101 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:26Z","lastTransitionTime":"2025-11-28T09:58:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:26 crc kubenswrapper[4838]: I1128 09:58:26.289297 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:26 crc kubenswrapper[4838]: I1128 09:58:26.289635 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:26 crc kubenswrapper[4838]: I1128 09:58:26.289935 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:26 crc kubenswrapper[4838]: I1128 09:58:26.290123 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:26 crc kubenswrapper[4838]: I1128 09:58:26.290274 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:26Z","lastTransitionTime":"2025-11-28T09:58:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:26 crc kubenswrapper[4838]: I1128 09:58:26.393564 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:26 crc kubenswrapper[4838]: I1128 09:58:26.394528 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:26 crc kubenswrapper[4838]: I1128 09:58:26.394669 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:26 crc kubenswrapper[4838]: I1128 09:58:26.395003 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:26 crc kubenswrapper[4838]: I1128 09:58:26.395141 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:26Z","lastTransitionTime":"2025-11-28T09:58:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:26 crc kubenswrapper[4838]: I1128 09:58:26.497916 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:26 crc kubenswrapper[4838]: I1128 09:58:26.497974 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:26 crc kubenswrapper[4838]: I1128 09:58:26.497991 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:26 crc kubenswrapper[4838]: I1128 09:58:26.498018 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:26 crc kubenswrapper[4838]: I1128 09:58:26.498035 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:26Z","lastTransitionTime":"2025-11-28T09:58:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:26 crc kubenswrapper[4838]: I1128 09:58:26.562204 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-p69l6" Nov 28 09:58:26 crc kubenswrapper[4838]: E1128 09:58:26.562398 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-p69l6" podUID="2a223cc8-af33-4e83-8bfc-2676c5700447" Nov 28 09:58:26 crc kubenswrapper[4838]: I1128 09:58:26.600296 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:26 crc kubenswrapper[4838]: I1128 09:58:26.600343 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:26 crc kubenswrapper[4838]: I1128 09:58:26.600366 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:26 crc kubenswrapper[4838]: I1128 09:58:26.600394 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:26 crc kubenswrapper[4838]: I1128 09:58:26.600415 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:26Z","lastTransitionTime":"2025-11-28T09:58:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:26 crc kubenswrapper[4838]: I1128 09:58:26.702912 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:26 crc kubenswrapper[4838]: I1128 09:58:26.702955 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:26 crc kubenswrapper[4838]: I1128 09:58:26.702976 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:26 crc kubenswrapper[4838]: I1128 09:58:26.703005 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:26 crc kubenswrapper[4838]: I1128 09:58:26.703026 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:26Z","lastTransitionTime":"2025-11-28T09:58:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:26 crc kubenswrapper[4838]: I1128 09:58:26.805797 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:26 crc kubenswrapper[4838]: I1128 09:58:26.805923 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:26 crc kubenswrapper[4838]: I1128 09:58:26.806006 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:26 crc kubenswrapper[4838]: I1128 09:58:26.806042 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:26 crc kubenswrapper[4838]: I1128 09:58:26.806113 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:26Z","lastTransitionTime":"2025-11-28T09:58:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:26 crc kubenswrapper[4838]: I1128 09:58:26.908795 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:26 crc kubenswrapper[4838]: I1128 09:58:26.908841 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:26 crc kubenswrapper[4838]: I1128 09:58:26.908857 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:26 crc kubenswrapper[4838]: I1128 09:58:26.908880 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:26 crc kubenswrapper[4838]: I1128 09:58:26.908897 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:26Z","lastTransitionTime":"2025-11-28T09:58:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:27 crc kubenswrapper[4838]: I1128 09:58:27.011488 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:27 crc kubenswrapper[4838]: I1128 09:58:27.011536 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:27 crc kubenswrapper[4838]: I1128 09:58:27.011552 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:27 crc kubenswrapper[4838]: I1128 09:58:27.011576 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:27 crc kubenswrapper[4838]: I1128 09:58:27.011594 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:27Z","lastTransitionTime":"2025-11-28T09:58:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:27 crc kubenswrapper[4838]: I1128 09:58:27.114707 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:27 crc kubenswrapper[4838]: I1128 09:58:27.115105 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:27 crc kubenswrapper[4838]: I1128 09:58:27.115258 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:27 crc kubenswrapper[4838]: I1128 09:58:27.115418 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:27 crc kubenswrapper[4838]: I1128 09:58:27.115563 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:27Z","lastTransitionTime":"2025-11-28T09:58:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:27 crc kubenswrapper[4838]: I1128 09:58:27.218634 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:27 crc kubenswrapper[4838]: I1128 09:58:27.218697 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:27 crc kubenswrapper[4838]: I1128 09:58:27.218741 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:27 crc kubenswrapper[4838]: I1128 09:58:27.218767 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:27 crc kubenswrapper[4838]: I1128 09:58:27.218786 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:27Z","lastTransitionTime":"2025-11-28T09:58:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:27 crc kubenswrapper[4838]: I1128 09:58:27.322231 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:27 crc kubenswrapper[4838]: I1128 09:58:27.322296 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:27 crc kubenswrapper[4838]: I1128 09:58:27.322313 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:27 crc kubenswrapper[4838]: I1128 09:58:27.322336 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:27 crc kubenswrapper[4838]: I1128 09:58:27.322353 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:27Z","lastTransitionTime":"2025-11-28T09:58:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:27 crc kubenswrapper[4838]: I1128 09:58:27.425032 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:27 crc kubenswrapper[4838]: I1128 09:58:27.425078 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:27 crc kubenswrapper[4838]: I1128 09:58:27.425090 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:27 crc kubenswrapper[4838]: I1128 09:58:27.425106 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:27 crc kubenswrapper[4838]: I1128 09:58:27.425117 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:27Z","lastTransitionTime":"2025-11-28T09:58:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:27 crc kubenswrapper[4838]: I1128 09:58:27.527994 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:27 crc kubenswrapper[4838]: I1128 09:58:27.528055 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:27 crc kubenswrapper[4838]: I1128 09:58:27.528075 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:27 crc kubenswrapper[4838]: I1128 09:58:27.528099 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:27 crc kubenswrapper[4838]: I1128 09:58:27.528117 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:27Z","lastTransitionTime":"2025-11-28T09:58:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:27 crc kubenswrapper[4838]: I1128 09:58:27.561699 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 09:58:27 crc kubenswrapper[4838]: I1128 09:58:27.561709 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 09:58:27 crc kubenswrapper[4838]: I1128 09:58:27.561746 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 09:58:27 crc kubenswrapper[4838]: E1128 09:58:27.562073 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 09:58:27 crc kubenswrapper[4838]: E1128 09:58:27.562165 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 09:58:27 crc kubenswrapper[4838]: E1128 09:58:27.561908 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 09:58:27 crc kubenswrapper[4838]: I1128 09:58:27.632451 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:27 crc kubenswrapper[4838]: I1128 09:58:27.632506 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:27 crc kubenswrapper[4838]: I1128 09:58:27.632522 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:27 crc kubenswrapper[4838]: I1128 09:58:27.632545 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:27 crc kubenswrapper[4838]: I1128 09:58:27.632563 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:27Z","lastTransitionTime":"2025-11-28T09:58:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:27 crc kubenswrapper[4838]: I1128 09:58:27.735470 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:27 crc kubenswrapper[4838]: I1128 09:58:27.735521 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:27 crc kubenswrapper[4838]: I1128 09:58:27.735538 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:27 crc kubenswrapper[4838]: I1128 09:58:27.735560 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:27 crc kubenswrapper[4838]: I1128 09:58:27.735577 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:27Z","lastTransitionTime":"2025-11-28T09:58:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:27 crc kubenswrapper[4838]: I1128 09:58:27.838670 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:27 crc kubenswrapper[4838]: I1128 09:58:27.838804 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:27 crc kubenswrapper[4838]: I1128 09:58:27.838821 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:27 crc kubenswrapper[4838]: I1128 09:58:27.838846 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:27 crc kubenswrapper[4838]: I1128 09:58:27.838862 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:27Z","lastTransitionTime":"2025-11-28T09:58:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:27 crc kubenswrapper[4838]: I1128 09:58:27.993036 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:27 crc kubenswrapper[4838]: I1128 09:58:27.993105 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:27 crc kubenswrapper[4838]: I1128 09:58:27.993120 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:27 crc kubenswrapper[4838]: I1128 09:58:27.993143 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:27 crc kubenswrapper[4838]: I1128 09:58:27.993158 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:27Z","lastTransitionTime":"2025-11-28T09:58:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:28 crc kubenswrapper[4838]: E1128 09:58:28.005968 4838 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:58:27Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:27Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:58:27Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:27Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:58:27Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:27Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:58:27Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:27Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2d884793-8973-45d8-9335-b721f6accbac\\\",\\\"systemUUID\\\":\\\"e14391b4-beaf-4b9f-9de4-e3bbde3f3327\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:28Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:28 crc kubenswrapper[4838]: I1128 09:58:28.010459 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:28 crc kubenswrapper[4838]: I1128 09:58:28.010497 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:28 crc kubenswrapper[4838]: I1128 09:58:28.010508 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:28 crc kubenswrapper[4838]: I1128 09:58:28.010524 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:28 crc kubenswrapper[4838]: I1128 09:58:28.010533 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:28Z","lastTransitionTime":"2025-11-28T09:58:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:28 crc kubenswrapper[4838]: E1128 09:58:28.022729 4838 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:58:28Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:28Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:58:28Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:28Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:58:28Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:28Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:58:28Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:28Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2d884793-8973-45d8-9335-b721f6accbac\\\",\\\"systemUUID\\\":\\\"e14391b4-beaf-4b9f-9de4-e3bbde3f3327\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:28Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:28 crc kubenswrapper[4838]: I1128 09:58:28.026426 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:28 crc kubenswrapper[4838]: I1128 09:58:28.026484 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:28 crc kubenswrapper[4838]: I1128 09:58:28.026499 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:28 crc kubenswrapper[4838]: I1128 09:58:28.026519 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:28 crc kubenswrapper[4838]: I1128 09:58:28.026532 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:28Z","lastTransitionTime":"2025-11-28T09:58:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:28 crc kubenswrapper[4838]: E1128 09:58:28.039956 4838 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:58:28Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:28Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:58:28Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:28Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:58:28Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:28Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:58:28Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:28Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2d884793-8973-45d8-9335-b721f6accbac\\\",\\\"systemUUID\\\":\\\"e14391b4-beaf-4b9f-9de4-e3bbde3f3327\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:28Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:28 crc kubenswrapper[4838]: I1128 09:58:28.043319 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:28 crc kubenswrapper[4838]: I1128 09:58:28.043357 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:28 crc kubenswrapper[4838]: I1128 09:58:28.043390 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:28 crc kubenswrapper[4838]: I1128 09:58:28.043405 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:28 crc kubenswrapper[4838]: I1128 09:58:28.043418 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:28Z","lastTransitionTime":"2025-11-28T09:58:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:28 crc kubenswrapper[4838]: E1128 09:58:28.068015 4838 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:58:28Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:28Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:58:28Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:28Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:58:28Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:28Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:58:28Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:28Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2d884793-8973-45d8-9335-b721f6accbac\\\",\\\"systemUUID\\\":\\\"e14391b4-beaf-4b9f-9de4-e3bbde3f3327\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:28Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:28 crc kubenswrapper[4838]: I1128 09:58:28.073113 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:28 crc kubenswrapper[4838]: I1128 09:58:28.073172 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:28 crc kubenswrapper[4838]: I1128 09:58:28.073181 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:28 crc kubenswrapper[4838]: I1128 09:58:28.073195 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:28 crc kubenswrapper[4838]: I1128 09:58:28.073205 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:28Z","lastTransitionTime":"2025-11-28T09:58:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:28 crc kubenswrapper[4838]: E1128 09:58:28.098204 4838 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:58:28Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:28Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:58:28Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:28Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:58:28Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:28Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:58:28Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:28Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2d884793-8973-45d8-9335-b721f6accbac\\\",\\\"systemUUID\\\":\\\"e14391b4-beaf-4b9f-9de4-e3bbde3f3327\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:28Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:28 crc kubenswrapper[4838]: E1128 09:58:28.098334 4838 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 28 09:58:28 crc kubenswrapper[4838]: I1128 09:58:28.107115 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:28 crc kubenswrapper[4838]: I1128 09:58:28.107834 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:28 crc kubenswrapper[4838]: I1128 09:58:28.107862 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:28 crc kubenswrapper[4838]: I1128 09:58:28.107885 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:28 crc kubenswrapper[4838]: I1128 09:58:28.107898 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:28Z","lastTransitionTime":"2025-11-28T09:58:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:28 crc kubenswrapper[4838]: I1128 09:58:28.210678 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:28 crc kubenswrapper[4838]: I1128 09:58:28.210763 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:28 crc kubenswrapper[4838]: I1128 09:58:28.210776 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:28 crc kubenswrapper[4838]: I1128 09:58:28.210801 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:28 crc kubenswrapper[4838]: I1128 09:58:28.210814 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:28Z","lastTransitionTime":"2025-11-28T09:58:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:28 crc kubenswrapper[4838]: I1128 09:58:28.313832 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:28 crc kubenswrapper[4838]: I1128 09:58:28.313882 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:28 crc kubenswrapper[4838]: I1128 09:58:28.313892 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:28 crc kubenswrapper[4838]: I1128 09:58:28.313912 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:28 crc kubenswrapper[4838]: I1128 09:58:28.313922 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:28Z","lastTransitionTime":"2025-11-28T09:58:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:28 crc kubenswrapper[4838]: I1128 09:58:28.417051 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:28 crc kubenswrapper[4838]: I1128 09:58:28.417112 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:28 crc kubenswrapper[4838]: I1128 09:58:28.417129 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:28 crc kubenswrapper[4838]: I1128 09:58:28.417153 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:28 crc kubenswrapper[4838]: I1128 09:58:28.417171 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:28Z","lastTransitionTime":"2025-11-28T09:58:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:28 crc kubenswrapper[4838]: I1128 09:58:28.519849 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:28 crc kubenswrapper[4838]: I1128 09:58:28.519912 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:28 crc kubenswrapper[4838]: I1128 09:58:28.519935 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:28 crc kubenswrapper[4838]: I1128 09:58:28.519962 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:28 crc kubenswrapper[4838]: I1128 09:58:28.519982 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:28Z","lastTransitionTime":"2025-11-28T09:58:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:28 crc kubenswrapper[4838]: I1128 09:58:28.561889 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-p69l6" Nov 28 09:58:28 crc kubenswrapper[4838]: E1128 09:58:28.562097 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-p69l6" podUID="2a223cc8-af33-4e83-8bfc-2676c5700447" Nov 28 09:58:28 crc kubenswrapper[4838]: I1128 09:58:28.587165 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-58mh7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f556bd7-3b15-4d7d-b8e2-4815bb5c9c7d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://658172db64e44d752eb06fe2788dda717db3fc2e672b073a2bdf159a16fd901f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0a0fbfb7a81acf63b4deabef68d55dd843092bce1c00c27c127955417bede44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f0a0fbfb7a81acf63b4deabef68d55dd843092bce1c00c27c127955417bede44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dea8cf8adbb0c946731fdc71c2b0d80ace50b919cf9c776eb70cb6ff36529401\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dea8cf8adbb0c946731fdc71c2b0d80ace50b919cf9c776eb70cb6ff36529401\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f717c0691a541e3a8310cd520a96e92144902511c74835a5ef8ba9536cd65657\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f717c0691a541e3a8310cd520a96e92144902511c74835a5ef8ba9536cd65657\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b8b67d281ed50a87ac086c5ce0bcc082c30a41d80074b638340326c334fa4f0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b8b67d281ed50a87ac086c5ce0bcc082c30a41d80074b638340326c334fa4f0c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4ac7e0a6a8340f45399bb4b576bbcd90d4eee54c571e6d64846c521486a2607\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f4ac7e0a6a8340f45399bb4b576bbcd90d4eee54c571e6d64846c521486a2607\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e3e1a16aa714e8c2b82811d71b1845942712cad84d7dd465c02a3c4478419af\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0e3e1a16aa714e8c2b82811d71b1845942712cad84d7dd465c02a3c4478419af\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-58mh7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:28Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:28 crc kubenswrapper[4838]: I1128 09:58:28.600409 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-sft2b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"200cdb91-cc86-40be-a5b6-30f7b9beba6d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c4f94520c534ecc5a5a92d18b6047c6df131fa7d2a9b8712c021a74c3ffc18ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lpt6k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-sft2b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:28Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:28 crc kubenswrapper[4838]: I1128 09:58:28.611000 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-p69l6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2a223cc8-af33-4e83-8bfc-2676c5700447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h25xx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h25xx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:58:05Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-p69l6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:28Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:28 crc kubenswrapper[4838]: I1128 09:58:28.622216 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:28 crc kubenswrapper[4838]: I1128 09:58:28.622254 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:28 crc kubenswrapper[4838]: I1128 09:58:28.622267 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:28 crc kubenswrapper[4838]: I1128 09:58:28.622283 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:28 crc kubenswrapper[4838]: I1128 09:58:28.622296 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:28Z","lastTransitionTime":"2025-11-28T09:58:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:28 crc kubenswrapper[4838]: I1128 09:58:28.623970 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://05cb23094534a9fdebbe33d8d34a79412ee49437c25e185c6dfa99384cbf9629\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:28Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:28 crc kubenswrapper[4838]: I1128 09:58:28.637512 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3968fb11df6b2265691177838400dcb08e03d330c166dd880b3acfac7ec7938f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e91940e5398321649eac2960a545bb4bbe047113c81f152aa55894cabee55006\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:28Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:28 crc kubenswrapper[4838]: I1128 09:58:28.653590 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4rv9b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"051f7e1c-2d47-4be9-bbd5-14feec16eb16\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f641ae0677a50fbc7b20fac7bb5c567f031af1b6a06fbe9e965091efa4ee4ec3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-szqtp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4rv9b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:28Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:28 crc kubenswrapper[4838]: I1128 09:58:28.673336 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-tnclp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ebace5c6-6ca4-48ff-9c50-c6b769d599b5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://572204db3ac1da6b232430bf06fb87e94638e151ea0edc2f8b111deb7d82c58c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8pxnw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://efa96991cdc29f4f075bfd7751f620c7b091510ea68d577b243811ddd4140cec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8pxnw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:58:03Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-tnclp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:28Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:28 crc kubenswrapper[4838]: I1128 09:58:28.687143 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"de645de9-a5cd-4075-8bfd-402a619ea73f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://17e965aad7643d62c651c1e652be45bd914cfe3f14a0a6f43e4e4376b4cb7be0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://40b6222c83e4141c5d286efddc1b65ef732c5cadda9cebdd8d9ee114bf2eb533\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://40b6222c83e4141c5d286efddc1b65ef732c5cadda9cebdd8d9ee114bf2eb533\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:08Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:28Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:28 crc kubenswrapper[4838]: I1128 09:58:28.704613 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:28Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:28 crc kubenswrapper[4838]: I1128 09:58:28.717008 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33d18a643d8d8a58731e53993b40c94bb51ff9c35242f768090dc737cac26503\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:28Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:28 crc kubenswrapper[4838]: I1128 09:58:28.724819 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:28 crc kubenswrapper[4838]: I1128 09:58:28.724858 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:28 crc kubenswrapper[4838]: I1128 09:58:28.724870 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:28 crc kubenswrapper[4838]: I1128 09:58:28.724886 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:28 crc kubenswrapper[4838]: I1128 09:58:28.724897 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:28Z","lastTransitionTime":"2025-11-28T09:58:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:28 crc kubenswrapper[4838]: I1128 09:58:28.741499 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gmhsj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"41b01f7d-5c75-49de-86f7-87e04bf71194\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ccbb0df20c7e22308632a5a2d8837d77579134973f3888355be0fe46b4e59aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://87fcac052cf598fe1999137913f0b5ab8c999dad7d8fb512c3b079fd3b49bdaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b1f5c4da04a97ccc1851e901f9a13763e45ff8c7f6aab25bcbb934541ae4a776\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5781261b70d6722e8d28b91441c4241e1b48e6ff71ae3ec8973bde50180fc146\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3a5c0b96cef205139d500d85f27d2c9230ab23a84ffc78f2587a465a7ff25e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://669de13027827632f353811e082cce7cca326651a9bf4820e66504bb59067d72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://33add8cb97a3d547f8fbfa22a35902ceb364337862331a17551cab1a60de5dae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://53360b6257759a7c7274680307597ae7ca0e40e9966c24d50262c25b203a5177\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T09:58:18Z\\\",\\\"message\\\":\\\"s/go-controller/pkg/crd/userdefinednetwork/v1/apis/informers/externalversions/factory.go:140\\\\nI1128 09:58:16.670401 6284 reflector.go:311] Stopping reflector *v1.NetworkAttachmentDefinition (0s) from github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/informers/externalversions/factory.go:117\\\\nI1128 09:58:16.670903 6284 reflector.go:311] Stopping reflector *v1.AdminPolicyBasedExternalRoute (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/informers/externalversions/factory.go:140\\\\nI1128 09:58:16.670954 6284 reflector.go:311] Stopping reflector *v1.EgressService (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressservice/v1/apis/informers/externalversions/factory.go:140\\\\nI1128 09:58:16.671581 6284 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1128 09:58:16.671659 6284 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1128 09:58:16.671666 6284 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1128 09:58:16.671699 6284 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1128 09:58:16.671739 6284 handler.go:208] Removed *v1.Node event handler 2\\\\nI1128 09:58:16.671750 6284 factory.go:656] Stopping watch factory\\\\nI1128 09:58:16.671769 6284 ovnkube.go:599] Stopped ovnkube\\\\nI11\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:12Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://33add8cb97a3d547f8fbfa22a35902ceb364337862331a17551cab1a60de5dae\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T09:58:22Z\\\",\\\"message\\\":\\\"9:58:21.338450 6507 services_controller.go:452] Built service openshift-machine-config-operator/machine-config-daemon per-node LB for network=default: []services.LB{}\\\\nI1128 09:58:21.338487 6507 services_controller.go:453] Built service openshift-machine-config-operator/machine-config-daemon template LB for network=default: []services.LB{}\\\\nI1128 09:58:21.338500 6507 services_controller.go:454] Service openshift-machine-config-operator/machine-config-daemon for network=default has 2 cluster-wide, 0 per-node configs, 0 template configs, making 1 (cluster) 0 (per node) and 0 (template) load balancers\\\\nI1128 09:58:21.338503 6507 loadbalancer.go:304] Deleted 0 stale LBs for map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-machine-api/machine-api-controllers\\\\\\\"}\\\\nI1128 09:58:21.338518 6507 services_controller.go:360] Finished syncing service machine-api-controllers on namespace openshift-machine-api for network=default : 2.046583ms\\\\nF1128 09:58:21.338532 6507 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: failed to add event handler: handler {0x1e60340 0x1e60020 0x1e5ffc0} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to \\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1ae634151347de87041ca338bcea3a34ee0c2330a3c6630a3e342f62beba0ab8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9efead1ac3ae101f3a81e0c5568645b6ac107dc126ff57bc55745b86ffacb730\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9efead1ac3ae101f3a81e0c5568645b6ac107dc126ff57bc55745b86ffacb730\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-gmhsj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:28Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:28 crc kubenswrapper[4838]: I1128 09:58:28.756341 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5c3daa53-8c4e-4e30-aeba-146602dd45cd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d582c5c17a21e943db9e4af274b6cb31d7551e545039aca20a767b35e2ca5040\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9dmrk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5374d5a62ca21176826339023247946593cc1a7bdb4fb39596f12bf598790697\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9dmrk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-5dxdd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:28Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:28 crc kubenswrapper[4838]: I1128 09:58:28.775972 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42be35de-5c72-4cc2-a5e4-fb7872425cdf\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5a39765f9493a3a9454db77d07855624ff5645e9dbd898e6dcb880d7a01a8c42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://426741a23e7b1b9fae6769b581c0d44694f957b0da985476923801395fad082f\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T09:57:46Z\\\",\\\"message\\\":\\\"+ timeout 3m /bin/bash -exuo pipefail -c 'while [ -n \\\\\\\"$(ss -Htanop \\\\\\\\( sport = 10357 \\\\\\\\))\\\\\\\" ]; do sleep 1; done'\\\\n++ ss -Htanop '(' sport = 10357 ')'\\\\n+ '[' -n '' ']'\\\\n+ exec cluster-policy-controller start --config=/etc/kubernetes/static-pod-resources/configmaps/cluster-policy-controller-config/config.yaml --kubeconfig=/etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig --namespace=openshift-kube-controller-manager -v=2\\\\nI1128 09:57:12.554377 1 leaderelection.go:121] The leader election gives 4 retries and allows for 30s of clock skew. The kube-apiserver downtime tolerance is 78s. Worst non-graceful lease acquisition is 2m43s. Worst graceful lease acquisition is {26s}.\\\\nI1128 09:57:12.555793 1 observer_polling.go:159] Starting file observer\\\\nI1128 09:57:12.567187 1 builder.go:298] cluster-policy-controller version 4.18.0-202501230001.p0.g5fd8525.assembly.stream.el9-5fd8525-5fd852525909ce6eab52972ba9ce8fcf56528eb9\\\\nI1128 09:57:12.568976 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.crt::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.key\\\\\\\"\\\\nI1128 09:57:45.000857 1 cmd.go:138] Received SIGTERM or SIGINT signal, shutting down controller.\\\\nF1128 09:57:46.203931 1 cmd.go:179] failed checking apiserver connectivity: client rate limiter Wait returned an error: context deadline exceeded\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:10Z\\\"}},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef54c8995d6bb8e077c6d1c3d796b6f2ce88370b6cadc4f040f590760103320b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fab4ce68cf732b3e6b32f68e84805013d646a9cbd6d5c55ea3d2f41a7f9db83d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2da93de71b5896e3c9ba192df2896b946b1376fefe2a87cf21adb87ea665be04\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:08Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:28Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:28 crc kubenswrapper[4838]: I1128 09:58:28.795688 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1630b1c6-63b5-4481-a711-0485765d37e3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3a04db28528da269759635186b06952f9a3dc4c2b130458354a5bf9ef994db8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://065f0c39a0f1fbdb83a6a758ddd20a4af3ddf96297ce35551b570f5e6c9deb89\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T09:57:35Z\\\",\\\"message\\\":\\\"rpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"192.168.126.11:2379\\\\\\\", ServerName: \\\\\\\"192.168.126.11:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp 192.168.126.11:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:30.193159 13 logging.go:55] [core] [Channel #1 SubChannel #5]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"192.168.126.11:2379\\\\\\\", ServerName: \\\\\\\"192.168.126.11:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp 192.168.126.11:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:30.410371 13 logging.go:55] [core] [Channel #7 SubChannel #8]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"192.168.126.11:2379\\\\\\\", ServerName: \\\\\\\"192.168.126.11:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp 192.168.126.11:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:30.766067 13 logging.go:55] [core] [Channel #7 SubChannel #9]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"localhost:2379\\\\\\\", ServerName: \\\\\\\"localhost:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp [::1]:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:30.792318 13 logging.go:55] [core] [Channel #2 SubChannel #4]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"localhost:2379\\\\\\\", ServerName: \\\\\\\"localhost:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp [::1]:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:34.548830 13 logging.go:55] [core] [Channel #1 SubChannel #6]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"localhost:2379\\\\\\\", ServerName: \\\\\\\"localhost:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: authentication handshake failed: context canceled\\\\\\\"\\\\nE1128 09:57:34.559534 13 run.go:72] \\\\\\\"command failed\\\\\\\" err=\\\\\\\"context deadline exceeded\\\\\\\"\\\\nI1128 09:57:34.572762 1 main.go:235] Termination finished with exit code 1\\\\nI1128 09:57:34.572819 1 main.go:188] Deleting termination lock file \\\\\\\"/var/log/kube-apiserver/.terminating\\\\\\\"\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25d620ea6d7c38547e89d61e7a60f227d28b21f18d563055db47256b266d5b6b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://837cc7d33d601516b4ea56a283f71167b41da7c769070c97ea77f29e97cf1555\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a821e5b105f62cf7f3b12714bfb0f58867a808d1c777f2fb711895c345d8ee9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ad3223a9346861cf1b27af8c95207349f10af6f416380747e32c4faf1d3add4\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T09:57:48Z\\\",\\\"message\\\":\\\"le observer\\\\nW1128 09:57:46.202169 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1128 09:57:46.202478 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 09:57:46.203984 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2688063589/tls.crt::/tmp/serving-cert-2688063589/tls.key\\\\\\\"\\\\nI1128 09:57:46.517349 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 09:57:46.756626 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 09:57:46.756681 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 09:57:46.756806 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 09:57:46.756825 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 09:57:46.768610 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1128 09:57:46.768654 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1128 09:57:46.768660 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 09:57:46.768697 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 09:57:46.768707 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 09:57:46.768714 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 09:57:46.768746 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 09:57:46.768752 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1128 09:57:46.772242 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:40Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c690f38f717fbfbd681f21f5dc845b94601530b4fc0860fdbeb1317042c793b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:18Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b4d4af6fc5dd97d5b6104ef79b62fd241db5659dfdfd496a38536453b207a4e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b4d4af6fc5dd97d5b6104ef79b62fd241db5659dfdfd496a38536453b207a4e5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:08Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:28Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:28 crc kubenswrapper[4838]: I1128 09:58:28.807640 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:28Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:28 crc kubenswrapper[4838]: I1128 09:58:28.816781 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:28Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:28 crc kubenswrapper[4838]: I1128 09:58:28.831945 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-tj8hl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cbb3c60a-bf9f-4a62-9310-30898e42be4f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e6fe3f1cbc53f02e2556c5fb44cffcf13330c3240a0ff10a8471080466416bd1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c2jkq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:54Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-tj8hl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:28Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:28 crc kubenswrapper[4838]: I1128 09:58:28.832793 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:28 crc kubenswrapper[4838]: I1128 09:58:28.832834 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:28 crc kubenswrapper[4838]: I1128 09:58:28.832848 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:28 crc kubenswrapper[4838]: I1128 09:58:28.832866 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:28 crc kubenswrapper[4838]: I1128 09:58:28.832879 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:28Z","lastTransitionTime":"2025-11-28T09:58:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:28 crc kubenswrapper[4838]: I1128 09:58:28.842399 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"faf44892-fdd2-4b45-8772-20049c555d3b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8f8f5484d008289a9e34ceaffd3cf2582565e7265003b0a6a913fe424760fc65\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://496ac21c6d7e650f191b3bc29ab676bab6ba40727c5ac4d517833ef9a115ae07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://698aacf6e94caf2da7095c89716d63d935ff60d95cb91c9a39dfe9282cbba005\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8352058616fa4ed90ca907e547bad2201d9aa696330f1eb8434c3c3d54d71d89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8352058616fa4ed90ca907e547bad2201d9aa696330f1eb8434c3c3d54d71d89\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:09Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:08Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:28Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:28 crc kubenswrapper[4838]: I1128 09:58:28.935581 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:28 crc kubenswrapper[4838]: I1128 09:58:28.935633 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:28 crc kubenswrapper[4838]: I1128 09:58:28.935646 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:28 crc kubenswrapper[4838]: I1128 09:58:28.935665 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:28 crc kubenswrapper[4838]: I1128 09:58:28.935678 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:28Z","lastTransitionTime":"2025-11-28T09:58:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:29 crc kubenswrapper[4838]: I1128 09:58:29.038398 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:29 crc kubenswrapper[4838]: I1128 09:58:29.038456 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:29 crc kubenswrapper[4838]: I1128 09:58:29.038482 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:29 crc kubenswrapper[4838]: I1128 09:58:29.038548 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:29 crc kubenswrapper[4838]: I1128 09:58:29.038572 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:29Z","lastTransitionTime":"2025-11-28T09:58:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:29 crc kubenswrapper[4838]: I1128 09:58:29.141116 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:29 crc kubenswrapper[4838]: I1128 09:58:29.141178 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:29 crc kubenswrapper[4838]: I1128 09:58:29.141194 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:29 crc kubenswrapper[4838]: I1128 09:58:29.141222 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:29 crc kubenswrapper[4838]: I1128 09:58:29.141239 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:29Z","lastTransitionTime":"2025-11-28T09:58:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:29 crc kubenswrapper[4838]: I1128 09:58:29.244313 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:29 crc kubenswrapper[4838]: I1128 09:58:29.244366 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:29 crc kubenswrapper[4838]: I1128 09:58:29.244383 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:29 crc kubenswrapper[4838]: I1128 09:58:29.244404 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:29 crc kubenswrapper[4838]: I1128 09:58:29.244422 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:29Z","lastTransitionTime":"2025-11-28T09:58:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:29 crc kubenswrapper[4838]: I1128 09:58:29.347066 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:29 crc kubenswrapper[4838]: I1128 09:58:29.347130 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:29 crc kubenswrapper[4838]: I1128 09:58:29.347147 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:29 crc kubenswrapper[4838]: I1128 09:58:29.347175 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:29 crc kubenswrapper[4838]: I1128 09:58:29.347195 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:29Z","lastTransitionTime":"2025-11-28T09:58:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:29 crc kubenswrapper[4838]: I1128 09:58:29.449914 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:29 crc kubenswrapper[4838]: I1128 09:58:29.449981 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:29 crc kubenswrapper[4838]: I1128 09:58:29.450004 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:29 crc kubenswrapper[4838]: I1128 09:58:29.450034 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:29 crc kubenswrapper[4838]: I1128 09:58:29.450058 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:29Z","lastTransitionTime":"2025-11-28T09:58:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:29 crc kubenswrapper[4838]: I1128 09:58:29.553014 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:29 crc kubenswrapper[4838]: I1128 09:58:29.554091 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:29 crc kubenswrapper[4838]: I1128 09:58:29.554666 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:29 crc kubenswrapper[4838]: I1128 09:58:29.554884 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:29 crc kubenswrapper[4838]: I1128 09:58:29.555058 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:29Z","lastTransitionTime":"2025-11-28T09:58:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:29 crc kubenswrapper[4838]: I1128 09:58:29.561378 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 09:58:29 crc kubenswrapper[4838]: I1128 09:58:29.561390 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 09:58:29 crc kubenswrapper[4838]: E1128 09:58:29.561579 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 09:58:29 crc kubenswrapper[4838]: E1128 09:58:29.561682 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 09:58:29 crc kubenswrapper[4838]: I1128 09:58:29.561409 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 09:58:29 crc kubenswrapper[4838]: E1128 09:58:29.561814 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 09:58:29 crc kubenswrapper[4838]: I1128 09:58:29.659846 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:29 crc kubenswrapper[4838]: I1128 09:58:29.659911 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:29 crc kubenswrapper[4838]: I1128 09:58:29.659934 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:29 crc kubenswrapper[4838]: I1128 09:58:29.659966 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:29 crc kubenswrapper[4838]: I1128 09:58:29.659996 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:29Z","lastTransitionTime":"2025-11-28T09:58:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:29 crc kubenswrapper[4838]: I1128 09:58:29.763567 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:29 crc kubenswrapper[4838]: I1128 09:58:29.763627 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:29 crc kubenswrapper[4838]: I1128 09:58:29.763645 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:29 crc kubenswrapper[4838]: I1128 09:58:29.763675 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:29 crc kubenswrapper[4838]: I1128 09:58:29.763696 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:29Z","lastTransitionTime":"2025-11-28T09:58:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:29 crc kubenswrapper[4838]: I1128 09:58:29.867057 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:29 crc kubenswrapper[4838]: I1128 09:58:29.867376 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:29 crc kubenswrapper[4838]: I1128 09:58:29.867510 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:29 crc kubenswrapper[4838]: I1128 09:58:29.867652 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:29 crc kubenswrapper[4838]: I1128 09:58:29.867829 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:29Z","lastTransitionTime":"2025-11-28T09:58:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:29 crc kubenswrapper[4838]: I1128 09:58:29.971147 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:29 crc kubenswrapper[4838]: I1128 09:58:29.971364 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:29 crc kubenswrapper[4838]: I1128 09:58:29.971585 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:29 crc kubenswrapper[4838]: I1128 09:58:29.971858 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:29 crc kubenswrapper[4838]: I1128 09:58:29.972103 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:29Z","lastTransitionTime":"2025-11-28T09:58:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:30 crc kubenswrapper[4838]: I1128 09:58:30.075582 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:30 crc kubenswrapper[4838]: I1128 09:58:30.075656 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:30 crc kubenswrapper[4838]: I1128 09:58:30.075680 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:30 crc kubenswrapper[4838]: I1128 09:58:30.075710 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:30 crc kubenswrapper[4838]: I1128 09:58:30.075857 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:30Z","lastTransitionTime":"2025-11-28T09:58:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:30 crc kubenswrapper[4838]: I1128 09:58:30.178962 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:30 crc kubenswrapper[4838]: I1128 09:58:30.179026 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:30 crc kubenswrapper[4838]: I1128 09:58:30.179045 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:30 crc kubenswrapper[4838]: I1128 09:58:30.179072 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:30 crc kubenswrapper[4838]: I1128 09:58:30.179089 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:30Z","lastTransitionTime":"2025-11-28T09:58:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:30 crc kubenswrapper[4838]: I1128 09:58:30.282026 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:30 crc kubenswrapper[4838]: I1128 09:58:30.282090 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:30 crc kubenswrapper[4838]: I1128 09:58:30.282103 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:30 crc kubenswrapper[4838]: I1128 09:58:30.282122 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:30 crc kubenswrapper[4838]: I1128 09:58:30.282137 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:30Z","lastTransitionTime":"2025-11-28T09:58:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:30 crc kubenswrapper[4838]: I1128 09:58:30.385332 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:30 crc kubenswrapper[4838]: I1128 09:58:30.385635 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:30 crc kubenswrapper[4838]: I1128 09:58:30.385769 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:30 crc kubenswrapper[4838]: I1128 09:58:30.385914 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:30 crc kubenswrapper[4838]: I1128 09:58:30.386008 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:30Z","lastTransitionTime":"2025-11-28T09:58:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:30 crc kubenswrapper[4838]: I1128 09:58:30.489890 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:30 crc kubenswrapper[4838]: I1128 09:58:30.490162 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:30 crc kubenswrapper[4838]: I1128 09:58:30.490236 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:30 crc kubenswrapper[4838]: I1128 09:58:30.490388 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:30 crc kubenswrapper[4838]: I1128 09:58:30.490481 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:30Z","lastTransitionTime":"2025-11-28T09:58:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:30 crc kubenswrapper[4838]: I1128 09:58:30.561373 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-p69l6" Nov 28 09:58:30 crc kubenswrapper[4838]: E1128 09:58:30.561602 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-p69l6" podUID="2a223cc8-af33-4e83-8bfc-2676c5700447" Nov 28 09:58:30 crc kubenswrapper[4838]: I1128 09:58:30.593115 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:30 crc kubenswrapper[4838]: I1128 09:58:30.593387 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:30 crc kubenswrapper[4838]: I1128 09:58:30.593479 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:30 crc kubenswrapper[4838]: I1128 09:58:30.593571 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:30 crc kubenswrapper[4838]: I1128 09:58:30.593744 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:30Z","lastTransitionTime":"2025-11-28T09:58:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:30 crc kubenswrapper[4838]: I1128 09:58:30.695964 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:30 crc kubenswrapper[4838]: I1128 09:58:30.696261 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:30 crc kubenswrapper[4838]: I1128 09:58:30.696351 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:30 crc kubenswrapper[4838]: I1128 09:58:30.696445 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:30 crc kubenswrapper[4838]: I1128 09:58:30.696534 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:30Z","lastTransitionTime":"2025-11-28T09:58:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:30 crc kubenswrapper[4838]: I1128 09:58:30.800016 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:30 crc kubenswrapper[4838]: I1128 09:58:30.800084 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:30 crc kubenswrapper[4838]: I1128 09:58:30.800103 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:30 crc kubenswrapper[4838]: I1128 09:58:30.800130 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:30 crc kubenswrapper[4838]: I1128 09:58:30.800149 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:30Z","lastTransitionTime":"2025-11-28T09:58:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:30 crc kubenswrapper[4838]: I1128 09:58:30.903326 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:30 crc kubenswrapper[4838]: I1128 09:58:30.903398 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:30 crc kubenswrapper[4838]: I1128 09:58:30.903428 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:30 crc kubenswrapper[4838]: I1128 09:58:30.903459 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:30 crc kubenswrapper[4838]: I1128 09:58:30.903478 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:30Z","lastTransitionTime":"2025-11-28T09:58:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:31 crc kubenswrapper[4838]: I1128 09:58:31.006876 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:31 crc kubenswrapper[4838]: I1128 09:58:31.006941 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:31 crc kubenswrapper[4838]: I1128 09:58:31.006963 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:31 crc kubenswrapper[4838]: I1128 09:58:31.006994 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:31 crc kubenswrapper[4838]: I1128 09:58:31.007018 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:31Z","lastTransitionTime":"2025-11-28T09:58:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:31 crc kubenswrapper[4838]: I1128 09:58:31.109705 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:31 crc kubenswrapper[4838]: I1128 09:58:31.109817 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:31 crc kubenswrapper[4838]: I1128 09:58:31.109855 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:31 crc kubenswrapper[4838]: I1128 09:58:31.109895 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:31 crc kubenswrapper[4838]: I1128 09:58:31.109922 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:31Z","lastTransitionTime":"2025-11-28T09:58:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:31 crc kubenswrapper[4838]: I1128 09:58:31.213878 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:31 crc kubenswrapper[4838]: I1128 09:58:31.213944 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:31 crc kubenswrapper[4838]: I1128 09:58:31.213963 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:31 crc kubenswrapper[4838]: I1128 09:58:31.213991 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:31 crc kubenswrapper[4838]: I1128 09:58:31.214009 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:31Z","lastTransitionTime":"2025-11-28T09:58:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:31 crc kubenswrapper[4838]: I1128 09:58:31.317437 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:31 crc kubenswrapper[4838]: I1128 09:58:31.317523 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:31 crc kubenswrapper[4838]: I1128 09:58:31.317556 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:31 crc kubenswrapper[4838]: I1128 09:58:31.317590 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:31 crc kubenswrapper[4838]: I1128 09:58:31.317611 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:31Z","lastTransitionTime":"2025-11-28T09:58:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:31 crc kubenswrapper[4838]: I1128 09:58:31.421798 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:31 crc kubenswrapper[4838]: I1128 09:58:31.421900 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:31 crc kubenswrapper[4838]: I1128 09:58:31.421917 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:31 crc kubenswrapper[4838]: I1128 09:58:31.421939 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:31 crc kubenswrapper[4838]: I1128 09:58:31.421955 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:31Z","lastTransitionTime":"2025-11-28T09:58:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:31 crc kubenswrapper[4838]: I1128 09:58:31.525792 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:31 crc kubenswrapper[4838]: I1128 09:58:31.525868 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:31 crc kubenswrapper[4838]: I1128 09:58:31.525891 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:31 crc kubenswrapper[4838]: I1128 09:58:31.525920 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:31 crc kubenswrapper[4838]: I1128 09:58:31.525937 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:31Z","lastTransitionTime":"2025-11-28T09:58:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:31 crc kubenswrapper[4838]: I1128 09:58:31.562240 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 09:58:31 crc kubenswrapper[4838]: I1128 09:58:31.562352 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 09:58:31 crc kubenswrapper[4838]: E1128 09:58:31.562459 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 09:58:31 crc kubenswrapper[4838]: I1128 09:58:31.562514 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 09:58:31 crc kubenswrapper[4838]: E1128 09:58:31.562649 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 09:58:31 crc kubenswrapper[4838]: E1128 09:58:31.562900 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 09:58:31 crc kubenswrapper[4838]: I1128 09:58:31.629455 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:31 crc kubenswrapper[4838]: I1128 09:58:31.629536 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:31 crc kubenswrapper[4838]: I1128 09:58:31.629560 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:31 crc kubenswrapper[4838]: I1128 09:58:31.629592 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:31 crc kubenswrapper[4838]: I1128 09:58:31.629610 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:31Z","lastTransitionTime":"2025-11-28T09:58:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:31 crc kubenswrapper[4838]: I1128 09:58:31.732249 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:31 crc kubenswrapper[4838]: I1128 09:58:31.732317 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:31 crc kubenswrapper[4838]: I1128 09:58:31.732336 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:31 crc kubenswrapper[4838]: I1128 09:58:31.732360 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:31 crc kubenswrapper[4838]: I1128 09:58:31.732379 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:31Z","lastTransitionTime":"2025-11-28T09:58:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:31 crc kubenswrapper[4838]: I1128 09:58:31.836040 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:31 crc kubenswrapper[4838]: I1128 09:58:31.836138 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:31 crc kubenswrapper[4838]: I1128 09:58:31.836158 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:31 crc kubenswrapper[4838]: I1128 09:58:31.836222 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:31 crc kubenswrapper[4838]: I1128 09:58:31.836243 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:31Z","lastTransitionTime":"2025-11-28T09:58:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:31 crc kubenswrapper[4838]: I1128 09:58:31.939469 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:31 crc kubenswrapper[4838]: I1128 09:58:31.939548 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:31 crc kubenswrapper[4838]: I1128 09:58:31.939567 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:31 crc kubenswrapper[4838]: I1128 09:58:31.939953 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:31 crc kubenswrapper[4838]: I1128 09:58:31.939993 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:31Z","lastTransitionTime":"2025-11-28T09:58:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:32 crc kubenswrapper[4838]: I1128 09:58:32.042795 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:32 crc kubenswrapper[4838]: I1128 09:58:32.042859 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:32 crc kubenswrapper[4838]: I1128 09:58:32.042878 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:32 crc kubenswrapper[4838]: I1128 09:58:32.042904 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:32 crc kubenswrapper[4838]: I1128 09:58:32.042920 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:32Z","lastTransitionTime":"2025-11-28T09:58:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:32 crc kubenswrapper[4838]: I1128 09:58:32.146532 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:32 crc kubenswrapper[4838]: I1128 09:58:32.146709 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:32 crc kubenswrapper[4838]: I1128 09:58:32.146770 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:32 crc kubenswrapper[4838]: I1128 09:58:32.146808 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:32 crc kubenswrapper[4838]: I1128 09:58:32.146832 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:32Z","lastTransitionTime":"2025-11-28T09:58:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:32 crc kubenswrapper[4838]: I1128 09:58:32.250247 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:32 crc kubenswrapper[4838]: I1128 09:58:32.250328 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:32 crc kubenswrapper[4838]: I1128 09:58:32.250352 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:32 crc kubenswrapper[4838]: I1128 09:58:32.250382 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:32 crc kubenswrapper[4838]: I1128 09:58:32.250404 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:32Z","lastTransitionTime":"2025-11-28T09:58:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:32 crc kubenswrapper[4838]: I1128 09:58:32.352885 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:32 crc kubenswrapper[4838]: I1128 09:58:32.352945 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:32 crc kubenswrapper[4838]: I1128 09:58:32.352967 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:32 crc kubenswrapper[4838]: I1128 09:58:32.352994 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:32 crc kubenswrapper[4838]: I1128 09:58:32.353016 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:32Z","lastTransitionTime":"2025-11-28T09:58:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:32 crc kubenswrapper[4838]: I1128 09:58:32.456677 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:32 crc kubenswrapper[4838]: I1128 09:58:32.456792 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:32 crc kubenswrapper[4838]: I1128 09:58:32.456811 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:32 crc kubenswrapper[4838]: I1128 09:58:32.456837 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:32 crc kubenswrapper[4838]: I1128 09:58:32.456858 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:32Z","lastTransitionTime":"2025-11-28T09:58:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:32 crc kubenswrapper[4838]: I1128 09:58:32.559404 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:32 crc kubenswrapper[4838]: I1128 09:58:32.559528 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:32 crc kubenswrapper[4838]: I1128 09:58:32.559543 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:32 crc kubenswrapper[4838]: I1128 09:58:32.559561 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:32 crc kubenswrapper[4838]: I1128 09:58:32.559571 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:32Z","lastTransitionTime":"2025-11-28T09:58:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:32 crc kubenswrapper[4838]: I1128 09:58:32.561644 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-p69l6" Nov 28 09:58:32 crc kubenswrapper[4838]: E1128 09:58:32.561759 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-p69l6" podUID="2a223cc8-af33-4e83-8bfc-2676c5700447" Nov 28 09:58:32 crc kubenswrapper[4838]: I1128 09:58:32.662117 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:32 crc kubenswrapper[4838]: I1128 09:58:32.662158 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:32 crc kubenswrapper[4838]: I1128 09:58:32.662174 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:32 crc kubenswrapper[4838]: I1128 09:58:32.662196 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:32 crc kubenswrapper[4838]: I1128 09:58:32.662213 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:32Z","lastTransitionTime":"2025-11-28T09:58:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:32 crc kubenswrapper[4838]: I1128 09:58:32.766017 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:32 crc kubenswrapper[4838]: I1128 09:58:32.766076 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:32 crc kubenswrapper[4838]: I1128 09:58:32.766096 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:32 crc kubenswrapper[4838]: I1128 09:58:32.766120 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:32 crc kubenswrapper[4838]: I1128 09:58:32.766139 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:32Z","lastTransitionTime":"2025-11-28T09:58:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:32 crc kubenswrapper[4838]: I1128 09:58:32.869019 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:32 crc kubenswrapper[4838]: I1128 09:58:32.869089 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:32 crc kubenswrapper[4838]: I1128 09:58:32.869112 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:32 crc kubenswrapper[4838]: I1128 09:58:32.869143 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:32 crc kubenswrapper[4838]: I1128 09:58:32.869168 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:32Z","lastTransitionTime":"2025-11-28T09:58:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:32 crc kubenswrapper[4838]: I1128 09:58:32.973094 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:32 crc kubenswrapper[4838]: I1128 09:58:32.973172 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:32 crc kubenswrapper[4838]: I1128 09:58:32.973198 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:32 crc kubenswrapper[4838]: I1128 09:58:32.973233 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:32 crc kubenswrapper[4838]: I1128 09:58:32.973262 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:32Z","lastTransitionTime":"2025-11-28T09:58:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:33 crc kubenswrapper[4838]: I1128 09:58:33.076480 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:33 crc kubenswrapper[4838]: I1128 09:58:33.076521 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:33 crc kubenswrapper[4838]: I1128 09:58:33.076529 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:33 crc kubenswrapper[4838]: I1128 09:58:33.076543 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:33 crc kubenswrapper[4838]: I1128 09:58:33.076552 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:33Z","lastTransitionTime":"2025-11-28T09:58:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:33 crc kubenswrapper[4838]: I1128 09:58:33.179327 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:33 crc kubenswrapper[4838]: I1128 09:58:33.179401 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:33 crc kubenswrapper[4838]: I1128 09:58:33.179417 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:33 crc kubenswrapper[4838]: I1128 09:58:33.179442 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:33 crc kubenswrapper[4838]: I1128 09:58:33.179460 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:33Z","lastTransitionTime":"2025-11-28T09:58:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:33 crc kubenswrapper[4838]: I1128 09:58:33.282909 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:33 crc kubenswrapper[4838]: I1128 09:58:33.282985 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:33 crc kubenswrapper[4838]: I1128 09:58:33.283003 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:33 crc kubenswrapper[4838]: I1128 09:58:33.283028 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:33 crc kubenswrapper[4838]: I1128 09:58:33.283046 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:33Z","lastTransitionTime":"2025-11-28T09:58:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:33 crc kubenswrapper[4838]: I1128 09:58:33.386411 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:33 crc kubenswrapper[4838]: I1128 09:58:33.386465 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:33 crc kubenswrapper[4838]: I1128 09:58:33.386478 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:33 crc kubenswrapper[4838]: I1128 09:58:33.386500 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:33 crc kubenswrapper[4838]: I1128 09:58:33.386514 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:33Z","lastTransitionTime":"2025-11-28T09:58:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:33 crc kubenswrapper[4838]: I1128 09:58:33.489972 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:33 crc kubenswrapper[4838]: I1128 09:58:33.490035 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:33 crc kubenswrapper[4838]: I1128 09:58:33.490052 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:33 crc kubenswrapper[4838]: I1128 09:58:33.490079 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:33 crc kubenswrapper[4838]: I1128 09:58:33.490098 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:33Z","lastTransitionTime":"2025-11-28T09:58:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:33 crc kubenswrapper[4838]: I1128 09:58:33.562443 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 09:58:33 crc kubenswrapper[4838]: I1128 09:58:33.562513 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 09:58:33 crc kubenswrapper[4838]: I1128 09:58:33.562582 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 09:58:33 crc kubenswrapper[4838]: E1128 09:58:33.562674 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 09:58:33 crc kubenswrapper[4838]: E1128 09:58:33.562904 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 09:58:33 crc kubenswrapper[4838]: E1128 09:58:33.563084 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 09:58:33 crc kubenswrapper[4838]: I1128 09:58:33.593647 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:33 crc kubenswrapper[4838]: I1128 09:58:33.593697 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:33 crc kubenswrapper[4838]: I1128 09:58:33.593777 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:33 crc kubenswrapper[4838]: I1128 09:58:33.593851 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:33 crc kubenswrapper[4838]: I1128 09:58:33.593901 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:33Z","lastTransitionTime":"2025-11-28T09:58:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:33 crc kubenswrapper[4838]: I1128 09:58:33.697116 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:33 crc kubenswrapper[4838]: I1128 09:58:33.697167 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:33 crc kubenswrapper[4838]: I1128 09:58:33.697175 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:33 crc kubenswrapper[4838]: I1128 09:58:33.697191 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:33 crc kubenswrapper[4838]: I1128 09:58:33.697200 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:33Z","lastTransitionTime":"2025-11-28T09:58:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:33 crc kubenswrapper[4838]: I1128 09:58:33.799976 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:33 crc kubenswrapper[4838]: I1128 09:58:33.800059 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:33 crc kubenswrapper[4838]: I1128 09:58:33.800077 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:33 crc kubenswrapper[4838]: I1128 09:58:33.800103 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:33 crc kubenswrapper[4838]: I1128 09:58:33.800121 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:33Z","lastTransitionTime":"2025-11-28T09:58:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:33 crc kubenswrapper[4838]: I1128 09:58:33.903411 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:33 crc kubenswrapper[4838]: I1128 09:58:33.903504 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:33 crc kubenswrapper[4838]: I1128 09:58:33.903532 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:33 crc kubenswrapper[4838]: I1128 09:58:33.903563 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:33 crc kubenswrapper[4838]: I1128 09:58:33.903587 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:33Z","lastTransitionTime":"2025-11-28T09:58:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:34 crc kubenswrapper[4838]: I1128 09:58:34.006925 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:34 crc kubenswrapper[4838]: I1128 09:58:34.006984 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:34 crc kubenswrapper[4838]: I1128 09:58:34.007002 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:34 crc kubenswrapper[4838]: I1128 09:58:34.007032 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:34 crc kubenswrapper[4838]: I1128 09:58:34.007052 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:34Z","lastTransitionTime":"2025-11-28T09:58:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:34 crc kubenswrapper[4838]: I1128 09:58:34.110613 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:34 crc kubenswrapper[4838]: I1128 09:58:34.110763 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:34 crc kubenswrapper[4838]: I1128 09:58:34.110782 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:34 crc kubenswrapper[4838]: I1128 09:58:34.110806 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:34 crc kubenswrapper[4838]: I1128 09:58:34.110823 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:34Z","lastTransitionTime":"2025-11-28T09:58:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:34 crc kubenswrapper[4838]: I1128 09:58:34.213969 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:34 crc kubenswrapper[4838]: I1128 09:58:34.214033 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:34 crc kubenswrapper[4838]: I1128 09:58:34.214049 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:34 crc kubenswrapper[4838]: I1128 09:58:34.214075 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:34 crc kubenswrapper[4838]: I1128 09:58:34.214092 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:34Z","lastTransitionTime":"2025-11-28T09:58:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:34 crc kubenswrapper[4838]: I1128 09:58:34.317807 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:34 crc kubenswrapper[4838]: I1128 09:58:34.317879 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:34 crc kubenswrapper[4838]: I1128 09:58:34.317904 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:34 crc kubenswrapper[4838]: I1128 09:58:34.317930 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:34 crc kubenswrapper[4838]: I1128 09:58:34.317952 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:34Z","lastTransitionTime":"2025-11-28T09:58:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:34 crc kubenswrapper[4838]: I1128 09:58:34.421087 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:34 crc kubenswrapper[4838]: I1128 09:58:34.421128 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:34 crc kubenswrapper[4838]: I1128 09:58:34.421139 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:34 crc kubenswrapper[4838]: I1128 09:58:34.421154 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:34 crc kubenswrapper[4838]: I1128 09:58:34.421165 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:34Z","lastTransitionTime":"2025-11-28T09:58:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:34 crc kubenswrapper[4838]: I1128 09:58:34.524581 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:34 crc kubenswrapper[4838]: I1128 09:58:34.524645 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:34 crc kubenswrapper[4838]: I1128 09:58:34.524662 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:34 crc kubenswrapper[4838]: I1128 09:58:34.524685 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:34 crc kubenswrapper[4838]: I1128 09:58:34.524704 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:34Z","lastTransitionTime":"2025-11-28T09:58:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:34 crc kubenswrapper[4838]: I1128 09:58:34.562222 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-p69l6" Nov 28 09:58:34 crc kubenswrapper[4838]: E1128 09:58:34.562453 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-p69l6" podUID="2a223cc8-af33-4e83-8bfc-2676c5700447" Nov 28 09:58:34 crc kubenswrapper[4838]: I1128 09:58:34.627205 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:34 crc kubenswrapper[4838]: I1128 09:58:34.627281 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:34 crc kubenswrapper[4838]: I1128 09:58:34.627304 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:34 crc kubenswrapper[4838]: I1128 09:58:34.627334 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:34 crc kubenswrapper[4838]: I1128 09:58:34.627358 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:34Z","lastTransitionTime":"2025-11-28T09:58:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:34 crc kubenswrapper[4838]: I1128 09:58:34.731283 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:34 crc kubenswrapper[4838]: I1128 09:58:34.731343 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:34 crc kubenswrapper[4838]: I1128 09:58:34.731363 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:34 crc kubenswrapper[4838]: I1128 09:58:34.731390 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:34 crc kubenswrapper[4838]: I1128 09:58:34.731409 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:34Z","lastTransitionTime":"2025-11-28T09:58:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:34 crc kubenswrapper[4838]: I1128 09:58:34.834136 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:34 crc kubenswrapper[4838]: I1128 09:58:34.834179 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:34 crc kubenswrapper[4838]: I1128 09:58:34.834195 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:34 crc kubenswrapper[4838]: I1128 09:58:34.834217 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:34 crc kubenswrapper[4838]: I1128 09:58:34.834234 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:34Z","lastTransitionTime":"2025-11-28T09:58:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:34 crc kubenswrapper[4838]: I1128 09:58:34.937699 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:34 crc kubenswrapper[4838]: I1128 09:58:34.937806 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:34 crc kubenswrapper[4838]: I1128 09:58:34.937829 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:34 crc kubenswrapper[4838]: I1128 09:58:34.937858 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:34 crc kubenswrapper[4838]: I1128 09:58:34.937878 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:34Z","lastTransitionTime":"2025-11-28T09:58:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:35 crc kubenswrapper[4838]: I1128 09:58:35.040789 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:35 crc kubenswrapper[4838]: I1128 09:58:35.040839 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:35 crc kubenswrapper[4838]: I1128 09:58:35.040856 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:35 crc kubenswrapper[4838]: I1128 09:58:35.040879 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:35 crc kubenswrapper[4838]: I1128 09:58:35.040897 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:35Z","lastTransitionTime":"2025-11-28T09:58:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:35 crc kubenswrapper[4838]: I1128 09:58:35.150284 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:35 crc kubenswrapper[4838]: I1128 09:58:35.150616 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:35 crc kubenswrapper[4838]: I1128 09:58:35.150883 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:35 crc kubenswrapper[4838]: I1128 09:58:35.151073 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:35 crc kubenswrapper[4838]: I1128 09:58:35.151220 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:35Z","lastTransitionTime":"2025-11-28T09:58:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:35 crc kubenswrapper[4838]: I1128 09:58:35.254628 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:35 crc kubenswrapper[4838]: I1128 09:58:35.254688 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:35 crc kubenswrapper[4838]: I1128 09:58:35.254705 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:35 crc kubenswrapper[4838]: I1128 09:58:35.254755 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:35 crc kubenswrapper[4838]: I1128 09:58:35.254772 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:35Z","lastTransitionTime":"2025-11-28T09:58:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:35 crc kubenswrapper[4838]: I1128 09:58:35.358015 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:35 crc kubenswrapper[4838]: I1128 09:58:35.358087 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:35 crc kubenswrapper[4838]: I1128 09:58:35.358112 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:35 crc kubenswrapper[4838]: I1128 09:58:35.358141 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:35 crc kubenswrapper[4838]: I1128 09:58:35.358164 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:35Z","lastTransitionTime":"2025-11-28T09:58:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:35 crc kubenswrapper[4838]: I1128 09:58:35.461645 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:35 crc kubenswrapper[4838]: I1128 09:58:35.461698 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:35 crc kubenswrapper[4838]: I1128 09:58:35.461750 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:35 crc kubenswrapper[4838]: I1128 09:58:35.461775 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:35 crc kubenswrapper[4838]: I1128 09:58:35.461793 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:35Z","lastTransitionTime":"2025-11-28T09:58:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:35 crc kubenswrapper[4838]: I1128 09:58:35.562128 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 09:58:35 crc kubenswrapper[4838]: I1128 09:58:35.562161 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 09:58:35 crc kubenswrapper[4838]: I1128 09:58:35.562279 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 09:58:35 crc kubenswrapper[4838]: E1128 09:58:35.562448 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 09:58:35 crc kubenswrapper[4838]: E1128 09:58:35.562670 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 09:58:35 crc kubenswrapper[4838]: E1128 09:58:35.562793 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 09:58:35 crc kubenswrapper[4838]: I1128 09:58:35.564435 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:35 crc kubenswrapper[4838]: I1128 09:58:35.564531 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:35 crc kubenswrapper[4838]: I1128 09:58:35.564557 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:35 crc kubenswrapper[4838]: I1128 09:58:35.564586 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:35 crc kubenswrapper[4838]: I1128 09:58:35.564613 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:35Z","lastTransitionTime":"2025-11-28T09:58:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:35 crc kubenswrapper[4838]: I1128 09:58:35.668389 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:35 crc kubenswrapper[4838]: I1128 09:58:35.668483 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:35 crc kubenswrapper[4838]: I1128 09:58:35.668502 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:35 crc kubenswrapper[4838]: I1128 09:58:35.668551 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:35 crc kubenswrapper[4838]: I1128 09:58:35.668568 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:35Z","lastTransitionTime":"2025-11-28T09:58:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:35 crc kubenswrapper[4838]: I1128 09:58:35.771646 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:35 crc kubenswrapper[4838]: I1128 09:58:35.771742 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:35 crc kubenswrapper[4838]: I1128 09:58:35.771763 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:35 crc kubenswrapper[4838]: I1128 09:58:35.771790 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:35 crc kubenswrapper[4838]: I1128 09:58:35.771809 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:35Z","lastTransitionTime":"2025-11-28T09:58:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:35 crc kubenswrapper[4838]: I1128 09:58:35.874787 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:35 crc kubenswrapper[4838]: I1128 09:58:35.874851 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:35 crc kubenswrapper[4838]: I1128 09:58:35.874875 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:35 crc kubenswrapper[4838]: I1128 09:58:35.874904 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:35 crc kubenswrapper[4838]: I1128 09:58:35.874927 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:35Z","lastTransitionTime":"2025-11-28T09:58:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:35 crc kubenswrapper[4838]: I1128 09:58:35.979152 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:35 crc kubenswrapper[4838]: I1128 09:58:35.979202 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:35 crc kubenswrapper[4838]: I1128 09:58:35.979241 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:35 crc kubenswrapper[4838]: I1128 09:58:35.979267 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:35 crc kubenswrapper[4838]: I1128 09:58:35.979281 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:35Z","lastTransitionTime":"2025-11-28T09:58:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:36 crc kubenswrapper[4838]: I1128 09:58:36.083111 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:36 crc kubenswrapper[4838]: I1128 09:58:36.083171 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:36 crc kubenswrapper[4838]: I1128 09:58:36.083184 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:36 crc kubenswrapper[4838]: I1128 09:58:36.083203 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:36 crc kubenswrapper[4838]: I1128 09:58:36.083216 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:36Z","lastTransitionTime":"2025-11-28T09:58:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:36 crc kubenswrapper[4838]: I1128 09:58:36.186152 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:36 crc kubenswrapper[4838]: I1128 09:58:36.186216 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:36 crc kubenswrapper[4838]: I1128 09:58:36.186233 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:36 crc kubenswrapper[4838]: I1128 09:58:36.186263 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:36 crc kubenswrapper[4838]: I1128 09:58:36.186285 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:36Z","lastTransitionTime":"2025-11-28T09:58:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:36 crc kubenswrapper[4838]: I1128 09:58:36.289220 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:36 crc kubenswrapper[4838]: I1128 09:58:36.289276 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:36 crc kubenswrapper[4838]: I1128 09:58:36.289314 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:36 crc kubenswrapper[4838]: I1128 09:58:36.289336 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:36 crc kubenswrapper[4838]: I1128 09:58:36.289349 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:36Z","lastTransitionTime":"2025-11-28T09:58:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:36 crc kubenswrapper[4838]: I1128 09:58:36.391396 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:36 crc kubenswrapper[4838]: I1128 09:58:36.391438 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:36 crc kubenswrapper[4838]: I1128 09:58:36.391448 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:36 crc kubenswrapper[4838]: I1128 09:58:36.391465 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:36 crc kubenswrapper[4838]: I1128 09:58:36.391505 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:36Z","lastTransitionTime":"2025-11-28T09:58:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:36 crc kubenswrapper[4838]: I1128 09:58:36.495470 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:36 crc kubenswrapper[4838]: I1128 09:58:36.495556 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:36 crc kubenswrapper[4838]: I1128 09:58:36.495574 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:36 crc kubenswrapper[4838]: I1128 09:58:36.495599 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:36 crc kubenswrapper[4838]: I1128 09:58:36.495616 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:36Z","lastTransitionTime":"2025-11-28T09:58:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:36 crc kubenswrapper[4838]: I1128 09:58:36.561588 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-p69l6" Nov 28 09:58:36 crc kubenswrapper[4838]: I1128 09:58:36.562692 4838 scope.go:117] "RemoveContainer" containerID="33add8cb97a3d547f8fbfa22a35902ceb364337862331a17551cab1a60de5dae" Nov 28 09:58:36 crc kubenswrapper[4838]: E1128 09:58:36.562282 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-p69l6" podUID="2a223cc8-af33-4e83-8bfc-2676c5700447" Nov 28 09:58:36 crc kubenswrapper[4838]: I1128 09:58:36.583202 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:36Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:36 crc kubenswrapper[4838]: I1128 09:58:36.603256 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4rv9b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"051f7e1c-2d47-4be9-bbd5-14feec16eb16\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f641ae0677a50fbc7b20fac7bb5c567f031af1b6a06fbe9e965091efa4ee4ec3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-szqtp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4rv9b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:36Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:36 crc kubenswrapper[4838]: I1128 09:58:36.610861 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:36 crc kubenswrapper[4838]: I1128 09:58:36.610903 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:36 crc kubenswrapper[4838]: I1128 09:58:36.610915 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:36 crc kubenswrapper[4838]: I1128 09:58:36.610933 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:36 crc kubenswrapper[4838]: I1128 09:58:36.610946 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:36Z","lastTransitionTime":"2025-11-28T09:58:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:36 crc kubenswrapper[4838]: I1128 09:58:36.620934 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-tnclp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ebace5c6-6ca4-48ff-9c50-c6b769d599b5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://572204db3ac1da6b232430bf06fb87e94638e151ea0edc2f8b111deb7d82c58c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8pxnw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://efa96991cdc29f4f075bfd7751f620c7b091510ea68d577b243811ddd4140cec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8pxnw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:58:03Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-tnclp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:36Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:36 crc kubenswrapper[4838]: I1128 09:58:36.638009 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"de645de9-a5cd-4075-8bfd-402a619ea73f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://17e965aad7643d62c651c1e652be45bd914cfe3f14a0a6f43e4e4376b4cb7be0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://40b6222c83e4141c5d286efddc1b65ef732c5cadda9cebdd8d9ee114bf2eb533\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://40b6222c83e4141c5d286efddc1b65ef732c5cadda9cebdd8d9ee114bf2eb533\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:08Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:36Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:36 crc kubenswrapper[4838]: I1128 09:58:36.661859 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1630b1c6-63b5-4481-a711-0485765d37e3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3a04db28528da269759635186b06952f9a3dc4c2b130458354a5bf9ef994db8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://065f0c39a0f1fbdb83a6a758ddd20a4af3ddf96297ce35551b570f5e6c9deb89\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T09:57:35Z\\\",\\\"message\\\":\\\"rpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"192.168.126.11:2379\\\\\\\", ServerName: \\\\\\\"192.168.126.11:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp 192.168.126.11:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:30.193159 13 logging.go:55] [core] [Channel #1 SubChannel #5]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"192.168.126.11:2379\\\\\\\", ServerName: \\\\\\\"192.168.126.11:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp 192.168.126.11:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:30.410371 13 logging.go:55] [core] [Channel #7 SubChannel #8]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"192.168.126.11:2379\\\\\\\", ServerName: \\\\\\\"192.168.126.11:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp 192.168.126.11:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:30.766067 13 logging.go:55] [core] [Channel #7 SubChannel #9]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"localhost:2379\\\\\\\", ServerName: \\\\\\\"localhost:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp [::1]:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:30.792318 13 logging.go:55] [core] [Channel #2 SubChannel #4]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"localhost:2379\\\\\\\", ServerName: \\\\\\\"localhost:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp [::1]:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:34.548830 13 logging.go:55] [core] [Channel #1 SubChannel #6]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"localhost:2379\\\\\\\", ServerName: \\\\\\\"localhost:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: authentication handshake failed: context canceled\\\\\\\"\\\\nE1128 09:57:34.559534 13 run.go:72] \\\\\\\"command failed\\\\\\\" err=\\\\\\\"context deadline exceeded\\\\\\\"\\\\nI1128 09:57:34.572762 1 main.go:235] Termination finished with exit code 1\\\\nI1128 09:57:34.572819 1 main.go:188] Deleting termination lock file \\\\\\\"/var/log/kube-apiserver/.terminating\\\\\\\"\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25d620ea6d7c38547e89d61e7a60f227d28b21f18d563055db47256b266d5b6b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://837cc7d33d601516b4ea56a283f71167b41da7c769070c97ea77f29e97cf1555\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a821e5b105f62cf7f3b12714bfb0f58867a808d1c777f2fb711895c345d8ee9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ad3223a9346861cf1b27af8c95207349f10af6f416380747e32c4faf1d3add4\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T09:57:48Z\\\",\\\"message\\\":\\\"le observer\\\\nW1128 09:57:46.202169 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1128 09:57:46.202478 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 09:57:46.203984 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2688063589/tls.crt::/tmp/serving-cert-2688063589/tls.key\\\\\\\"\\\\nI1128 09:57:46.517349 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 09:57:46.756626 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 09:57:46.756681 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 09:57:46.756806 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 09:57:46.756825 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 09:57:46.768610 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1128 09:57:46.768654 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1128 09:57:46.768660 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 09:57:46.768697 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 09:57:46.768707 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 09:57:46.768714 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 09:57:46.768746 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 09:57:46.768752 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1128 09:57:46.772242 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:40Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c690f38f717fbfbd681f21f5dc845b94601530b4fc0860fdbeb1317042c793b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:18Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b4d4af6fc5dd97d5b6104ef79b62fd241db5659dfdfd496a38536453b207a4e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b4d4af6fc5dd97d5b6104ef79b62fd241db5659dfdfd496a38536453b207a4e5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:08Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:36Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:36 crc kubenswrapper[4838]: I1128 09:58:36.680011 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33d18a643d8d8a58731e53993b40c94bb51ff9c35242f768090dc737cac26503\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:36Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:36 crc kubenswrapper[4838]: I1128 09:58:36.710971 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gmhsj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"41b01f7d-5c75-49de-86f7-87e04bf71194\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ccbb0df20c7e22308632a5a2d8837d77579134973f3888355be0fe46b4e59aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://87fcac052cf598fe1999137913f0b5ab8c999dad7d8fb512c3b079fd3b49bdaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b1f5c4da04a97ccc1851e901f9a13763e45ff8c7f6aab25bcbb934541ae4a776\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5781261b70d6722e8d28b91441c4241e1b48e6ff71ae3ec8973bde50180fc146\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3a5c0b96cef205139d500d85f27d2c9230ab23a84ffc78f2587a465a7ff25e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://669de13027827632f353811e082cce7cca326651a9bf4820e66504bb59067d72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://33add8cb97a3d547f8fbfa22a35902ceb364337862331a17551cab1a60de5dae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://33add8cb97a3d547f8fbfa22a35902ceb364337862331a17551cab1a60de5dae\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T09:58:22Z\\\",\\\"message\\\":\\\"9:58:21.338450 6507 services_controller.go:452] Built service openshift-machine-config-operator/machine-config-daemon per-node LB for network=default: []services.LB{}\\\\nI1128 09:58:21.338487 6507 services_controller.go:453] Built service openshift-machine-config-operator/machine-config-daemon template LB for network=default: []services.LB{}\\\\nI1128 09:58:21.338500 6507 services_controller.go:454] Service openshift-machine-config-operator/machine-config-daemon for network=default has 2 cluster-wide, 0 per-node configs, 0 template configs, making 1 (cluster) 0 (per node) and 0 (template) load balancers\\\\nI1128 09:58:21.338503 6507 loadbalancer.go:304] Deleted 0 stale LBs for map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-machine-api/machine-api-controllers\\\\\\\"}\\\\nI1128 09:58:21.338518 6507 services_controller.go:360] Finished syncing service machine-api-controllers on namespace openshift-machine-api for network=default : 2.046583ms\\\\nF1128 09:58:21.338532 6507 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: failed to add event handler: handler {0x1e60340 0x1e60020 0x1e5ffc0} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to \\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:20Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-gmhsj_openshift-ovn-kubernetes(41b01f7d-5c75-49de-86f7-87e04bf71194)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1ae634151347de87041ca338bcea3a34ee0c2330a3c6630a3e342f62beba0ab8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9efead1ac3ae101f3a81e0c5568645b6ac107dc126ff57bc55745b86ffacb730\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9efead1ac3ae101f3a81e0c5568645b6ac107dc126ff57bc55745b86ffacb730\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-gmhsj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:36Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:36 crc kubenswrapper[4838]: I1128 09:58:36.714389 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:36 crc kubenswrapper[4838]: I1128 09:58:36.714487 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:36 crc kubenswrapper[4838]: I1128 09:58:36.714505 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:36 crc kubenswrapper[4838]: I1128 09:58:36.714552 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:36 crc kubenswrapper[4838]: I1128 09:58:36.714566 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:36Z","lastTransitionTime":"2025-11-28T09:58:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:36 crc kubenswrapper[4838]: I1128 09:58:36.730574 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5c3daa53-8c4e-4e30-aeba-146602dd45cd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d582c5c17a21e943db9e4af274b6cb31d7551e545039aca20a767b35e2ca5040\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9dmrk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5374d5a62ca21176826339023247946593cc1a7bdb4fb39596f12bf598790697\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9dmrk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-5dxdd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:36Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:36 crc kubenswrapper[4838]: I1128 09:58:36.753014 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42be35de-5c72-4cc2-a5e4-fb7872425cdf\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5a39765f9493a3a9454db77d07855624ff5645e9dbd898e6dcb880d7a01a8c42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://426741a23e7b1b9fae6769b581c0d44694f957b0da985476923801395fad082f\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T09:57:46Z\\\",\\\"message\\\":\\\"+ timeout 3m /bin/bash -exuo pipefail -c 'while [ -n \\\\\\\"$(ss -Htanop \\\\\\\\( sport = 10357 \\\\\\\\))\\\\\\\" ]; do sleep 1; done'\\\\n++ ss -Htanop '(' sport = 10357 ')'\\\\n+ '[' -n '' ']'\\\\n+ exec cluster-policy-controller start --config=/etc/kubernetes/static-pod-resources/configmaps/cluster-policy-controller-config/config.yaml --kubeconfig=/etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig --namespace=openshift-kube-controller-manager -v=2\\\\nI1128 09:57:12.554377 1 leaderelection.go:121] The leader election gives 4 retries and allows for 30s of clock skew. The kube-apiserver downtime tolerance is 78s. Worst non-graceful lease acquisition is 2m43s. Worst graceful lease acquisition is {26s}.\\\\nI1128 09:57:12.555793 1 observer_polling.go:159] Starting file observer\\\\nI1128 09:57:12.567187 1 builder.go:298] cluster-policy-controller version 4.18.0-202501230001.p0.g5fd8525.assembly.stream.el9-5fd8525-5fd852525909ce6eab52972ba9ce8fcf56528eb9\\\\nI1128 09:57:12.568976 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.crt::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.key\\\\\\\"\\\\nI1128 09:57:45.000857 1 cmd.go:138] Received SIGTERM or SIGINT signal, shutting down controller.\\\\nF1128 09:57:46.203931 1 cmd.go:179] failed checking apiserver connectivity: client rate limiter Wait returned an error: context deadline exceeded\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:10Z\\\"}},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef54c8995d6bb8e077c6d1c3d796b6f2ce88370b6cadc4f040f590760103320b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fab4ce68cf732b3e6b32f68e84805013d646a9cbd6d5c55ea3d2f41a7f9db83d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2da93de71b5896e3c9ba192df2896b946b1376fefe2a87cf21adb87ea665be04\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:08Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:36Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:36 crc kubenswrapper[4838]: I1128 09:58:36.768381 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:36Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:36 crc kubenswrapper[4838]: I1128 09:58:36.788133 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:36Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:36 crc kubenswrapper[4838]: I1128 09:58:36.803183 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-tj8hl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cbb3c60a-bf9f-4a62-9310-30898e42be4f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e6fe3f1cbc53f02e2556c5fb44cffcf13330c3240a0ff10a8471080466416bd1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c2jkq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:54Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-tj8hl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:36Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:36 crc kubenswrapper[4838]: I1128 09:58:36.816961 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:36 crc kubenswrapper[4838]: I1128 09:58:36.817007 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:36 crc kubenswrapper[4838]: I1128 09:58:36.817021 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:36 crc kubenswrapper[4838]: I1128 09:58:36.817040 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:36 crc kubenswrapper[4838]: I1128 09:58:36.817053 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:36Z","lastTransitionTime":"2025-11-28T09:58:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:36 crc kubenswrapper[4838]: I1128 09:58:36.822150 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"faf44892-fdd2-4b45-8772-20049c555d3b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8f8f5484d008289a9e34ceaffd3cf2582565e7265003b0a6a913fe424760fc65\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://496ac21c6d7e650f191b3bc29ab676bab6ba40727c5ac4d517833ef9a115ae07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://698aacf6e94caf2da7095c89716d63d935ff60d95cb91c9a39dfe9282cbba005\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8352058616fa4ed90ca907e547bad2201d9aa696330f1eb8434c3c3d54d71d89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8352058616fa4ed90ca907e547bad2201d9aa696330f1eb8434c3c3d54d71d89\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:09Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:08Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:36Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:36 crc kubenswrapper[4838]: I1128 09:58:36.841215 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3968fb11df6b2265691177838400dcb08e03d330c166dd880b3acfac7ec7938f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e91940e5398321649eac2960a545bb4bbe047113c81f152aa55894cabee55006\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:36Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:36 crc kubenswrapper[4838]: I1128 09:58:36.856365 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-58mh7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f556bd7-3b15-4d7d-b8e2-4815bb5c9c7d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://658172db64e44d752eb06fe2788dda717db3fc2e672b073a2bdf159a16fd901f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0a0fbfb7a81acf63b4deabef68d55dd843092bce1c00c27c127955417bede44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f0a0fbfb7a81acf63b4deabef68d55dd843092bce1c00c27c127955417bede44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dea8cf8adbb0c946731fdc71c2b0d80ace50b919cf9c776eb70cb6ff36529401\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dea8cf8adbb0c946731fdc71c2b0d80ace50b919cf9c776eb70cb6ff36529401\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f717c0691a541e3a8310cd520a96e92144902511c74835a5ef8ba9536cd65657\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f717c0691a541e3a8310cd520a96e92144902511c74835a5ef8ba9536cd65657\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b8b67d281ed50a87ac086c5ce0bcc082c30a41d80074b638340326c334fa4f0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b8b67d281ed50a87ac086c5ce0bcc082c30a41d80074b638340326c334fa4f0c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4ac7e0a6a8340f45399bb4b576bbcd90d4eee54c571e6d64846c521486a2607\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f4ac7e0a6a8340f45399bb4b576bbcd90d4eee54c571e6d64846c521486a2607\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e3e1a16aa714e8c2b82811d71b1845942712cad84d7dd465c02a3c4478419af\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0e3e1a16aa714e8c2b82811d71b1845942712cad84d7dd465c02a3c4478419af\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-58mh7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:36Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:36 crc kubenswrapper[4838]: I1128 09:58:36.866496 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-sft2b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"200cdb91-cc86-40be-a5b6-30f7b9beba6d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c4f94520c534ecc5a5a92d18b6047c6df131fa7d2a9b8712c021a74c3ffc18ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lpt6k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-sft2b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:36Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:36 crc kubenswrapper[4838]: I1128 09:58:36.875015 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-p69l6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2a223cc8-af33-4e83-8bfc-2676c5700447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h25xx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h25xx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:58:05Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-p69l6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:36Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:36 crc kubenswrapper[4838]: I1128 09:58:36.887607 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://05cb23094534a9fdebbe33d8d34a79412ee49437c25e185c6dfa99384cbf9629\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:36Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:36 crc kubenswrapper[4838]: I1128 09:58:36.918175 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/2a223cc8-af33-4e83-8bfc-2676c5700447-metrics-certs\") pod \"network-metrics-daemon-p69l6\" (UID: \"2a223cc8-af33-4e83-8bfc-2676c5700447\") " pod="openshift-multus/network-metrics-daemon-p69l6" Nov 28 09:58:36 crc kubenswrapper[4838]: E1128 09:58:36.918373 4838 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 28 09:58:36 crc kubenswrapper[4838]: E1128 09:58:36.918493 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/2a223cc8-af33-4e83-8bfc-2676c5700447-metrics-certs podName:2a223cc8-af33-4e83-8bfc-2676c5700447 nodeName:}" failed. No retries permitted until 2025-11-28 09:59:08.91846378 +0000 UTC m=+120.617438010 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/2a223cc8-af33-4e83-8bfc-2676c5700447-metrics-certs") pod "network-metrics-daemon-p69l6" (UID: "2a223cc8-af33-4e83-8bfc-2676c5700447") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 28 09:58:36 crc kubenswrapper[4838]: I1128 09:58:36.919526 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:36 crc kubenswrapper[4838]: I1128 09:58:36.919567 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:36 crc kubenswrapper[4838]: I1128 09:58:36.919576 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:36 crc kubenswrapper[4838]: I1128 09:58:36.919594 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:36 crc kubenswrapper[4838]: I1128 09:58:36.919606 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:36Z","lastTransitionTime":"2025-11-28T09:58:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:37 crc kubenswrapper[4838]: I1128 09:58:37.022445 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:37 crc kubenswrapper[4838]: I1128 09:58:37.022480 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:37 crc kubenswrapper[4838]: I1128 09:58:37.022488 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:37 crc kubenswrapper[4838]: I1128 09:58:37.022502 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:37 crc kubenswrapper[4838]: I1128 09:58:37.022514 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:37Z","lastTransitionTime":"2025-11-28T09:58:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:37 crc kubenswrapper[4838]: I1128 09:58:37.125082 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:37 crc kubenswrapper[4838]: I1128 09:58:37.125122 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:37 crc kubenswrapper[4838]: I1128 09:58:37.125131 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:37 crc kubenswrapper[4838]: I1128 09:58:37.125160 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:37 crc kubenswrapper[4838]: I1128 09:58:37.125171 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:37Z","lastTransitionTime":"2025-11-28T09:58:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:37 crc kubenswrapper[4838]: I1128 09:58:37.154554 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-gmhsj_41b01f7d-5c75-49de-86f7-87e04bf71194/ovnkube-controller/1.log" Nov 28 09:58:37 crc kubenswrapper[4838]: I1128 09:58:37.157986 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gmhsj" event={"ID":"41b01f7d-5c75-49de-86f7-87e04bf71194","Type":"ContainerStarted","Data":"973017e2b3a339f98d3439e256810a83b425061d827e74edd127aee640a9c0eb"} Nov 28 09:58:37 crc kubenswrapper[4838]: I1128 09:58:37.158512 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-gmhsj" Nov 28 09:58:37 crc kubenswrapper[4838]: I1128 09:58:37.175487 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42be35de-5c72-4cc2-a5e4-fb7872425cdf\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5a39765f9493a3a9454db77d07855624ff5645e9dbd898e6dcb880d7a01a8c42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://426741a23e7b1b9fae6769b581c0d44694f957b0da985476923801395fad082f\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T09:57:46Z\\\",\\\"message\\\":\\\"+ timeout 3m /bin/bash -exuo pipefail -c 'while [ -n \\\\\\\"$(ss -Htanop \\\\\\\\( sport = 10357 \\\\\\\\))\\\\\\\" ]; do sleep 1; done'\\\\n++ ss -Htanop '(' sport = 10357 ')'\\\\n+ '[' -n '' ']'\\\\n+ exec cluster-policy-controller start --config=/etc/kubernetes/static-pod-resources/configmaps/cluster-policy-controller-config/config.yaml --kubeconfig=/etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig --namespace=openshift-kube-controller-manager -v=2\\\\nI1128 09:57:12.554377 1 leaderelection.go:121] The leader election gives 4 retries and allows for 30s of clock skew. The kube-apiserver downtime tolerance is 78s. Worst non-graceful lease acquisition is 2m43s. Worst graceful lease acquisition is {26s}.\\\\nI1128 09:57:12.555793 1 observer_polling.go:159] Starting file observer\\\\nI1128 09:57:12.567187 1 builder.go:298] cluster-policy-controller version 4.18.0-202501230001.p0.g5fd8525.assembly.stream.el9-5fd8525-5fd852525909ce6eab52972ba9ce8fcf56528eb9\\\\nI1128 09:57:12.568976 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.crt::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.key\\\\\\\"\\\\nI1128 09:57:45.000857 1 cmd.go:138] Received SIGTERM or SIGINT signal, shutting down controller.\\\\nF1128 09:57:46.203931 1 cmd.go:179] failed checking apiserver connectivity: client rate limiter Wait returned an error: context deadline exceeded\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:10Z\\\"}},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef54c8995d6bb8e077c6d1c3d796b6f2ce88370b6cadc4f040f590760103320b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fab4ce68cf732b3e6b32f68e84805013d646a9cbd6d5c55ea3d2f41a7f9db83d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2da93de71b5896e3c9ba192df2896b946b1376fefe2a87cf21adb87ea665be04\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:08Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:37Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:37 crc kubenswrapper[4838]: I1128 09:58:37.192125 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1630b1c6-63b5-4481-a711-0485765d37e3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3a04db28528da269759635186b06952f9a3dc4c2b130458354a5bf9ef994db8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://065f0c39a0f1fbdb83a6a758ddd20a4af3ddf96297ce35551b570f5e6c9deb89\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T09:57:35Z\\\",\\\"message\\\":\\\"rpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"192.168.126.11:2379\\\\\\\", ServerName: \\\\\\\"192.168.126.11:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp 192.168.126.11:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:30.193159 13 logging.go:55] [core] [Channel #1 SubChannel #5]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"192.168.126.11:2379\\\\\\\", ServerName: \\\\\\\"192.168.126.11:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp 192.168.126.11:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:30.410371 13 logging.go:55] [core] [Channel #7 SubChannel #8]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"192.168.126.11:2379\\\\\\\", ServerName: \\\\\\\"192.168.126.11:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp 192.168.126.11:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:30.766067 13 logging.go:55] [core] [Channel #7 SubChannel #9]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"localhost:2379\\\\\\\", ServerName: \\\\\\\"localhost:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp [::1]:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:30.792318 13 logging.go:55] [core] [Channel #2 SubChannel #4]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"localhost:2379\\\\\\\", ServerName: \\\\\\\"localhost:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp [::1]:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:34.548830 13 logging.go:55] [core] [Channel #1 SubChannel #6]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"localhost:2379\\\\\\\", ServerName: \\\\\\\"localhost:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: authentication handshake failed: context canceled\\\\\\\"\\\\nE1128 09:57:34.559534 13 run.go:72] \\\\\\\"command failed\\\\\\\" err=\\\\\\\"context deadline exceeded\\\\\\\"\\\\nI1128 09:57:34.572762 1 main.go:235] Termination finished with exit code 1\\\\nI1128 09:57:34.572819 1 main.go:188] Deleting termination lock file \\\\\\\"/var/log/kube-apiserver/.terminating\\\\\\\"\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25d620ea6d7c38547e89d61e7a60f227d28b21f18d563055db47256b266d5b6b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://837cc7d33d601516b4ea56a283f71167b41da7c769070c97ea77f29e97cf1555\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a821e5b105f62cf7f3b12714bfb0f58867a808d1c777f2fb711895c345d8ee9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ad3223a9346861cf1b27af8c95207349f10af6f416380747e32c4faf1d3add4\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T09:57:48Z\\\",\\\"message\\\":\\\"le observer\\\\nW1128 09:57:46.202169 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1128 09:57:46.202478 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 09:57:46.203984 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2688063589/tls.crt::/tmp/serving-cert-2688063589/tls.key\\\\\\\"\\\\nI1128 09:57:46.517349 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 09:57:46.756626 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 09:57:46.756681 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 09:57:46.756806 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 09:57:46.756825 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 09:57:46.768610 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1128 09:57:46.768654 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1128 09:57:46.768660 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 09:57:46.768697 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 09:57:46.768707 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 09:57:46.768714 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 09:57:46.768746 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 09:57:46.768752 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1128 09:57:46.772242 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:40Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c690f38f717fbfbd681f21f5dc845b94601530b4fc0860fdbeb1317042c793b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:18Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b4d4af6fc5dd97d5b6104ef79b62fd241db5659dfdfd496a38536453b207a4e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b4d4af6fc5dd97d5b6104ef79b62fd241db5659dfdfd496a38536453b207a4e5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:08Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:37Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:37 crc kubenswrapper[4838]: I1128 09:58:37.208897 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33d18a643d8d8a58731e53993b40c94bb51ff9c35242f768090dc737cac26503\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:37Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:37 crc kubenswrapper[4838]: I1128 09:58:37.227535 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:37 crc kubenswrapper[4838]: I1128 09:58:37.227567 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:37 crc kubenswrapper[4838]: I1128 09:58:37.227576 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:37 crc kubenswrapper[4838]: I1128 09:58:37.227590 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:37 crc kubenswrapper[4838]: I1128 09:58:37.227600 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:37Z","lastTransitionTime":"2025-11-28T09:58:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:37 crc kubenswrapper[4838]: I1128 09:58:37.232097 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gmhsj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"41b01f7d-5c75-49de-86f7-87e04bf71194\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ccbb0df20c7e22308632a5a2d8837d77579134973f3888355be0fe46b4e59aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://87fcac052cf598fe1999137913f0b5ab8c999dad7d8fb512c3b079fd3b49bdaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b1f5c4da04a97ccc1851e901f9a13763e45ff8c7f6aab25bcbb934541ae4a776\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5781261b70d6722e8d28b91441c4241e1b48e6ff71ae3ec8973bde50180fc146\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3a5c0b96cef205139d500d85f27d2c9230ab23a84ffc78f2587a465a7ff25e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://669de13027827632f353811e082cce7cca326651a9bf4820e66504bb59067d72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://973017e2b3a339f98d3439e256810a83b425061d827e74edd127aee640a9c0eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://33add8cb97a3d547f8fbfa22a35902ceb364337862331a17551cab1a60de5dae\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T09:58:22Z\\\",\\\"message\\\":\\\"9:58:21.338450 6507 services_controller.go:452] Built service openshift-machine-config-operator/machine-config-daemon per-node LB for network=default: []services.LB{}\\\\nI1128 09:58:21.338487 6507 services_controller.go:453] Built service openshift-machine-config-operator/machine-config-daemon template LB for network=default: []services.LB{}\\\\nI1128 09:58:21.338500 6507 services_controller.go:454] Service openshift-machine-config-operator/machine-config-daemon for network=default has 2 cluster-wide, 0 per-node configs, 0 template configs, making 1 (cluster) 0 (per node) and 0 (template) load balancers\\\\nI1128 09:58:21.338503 6507 loadbalancer.go:304] Deleted 0 stale LBs for map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-machine-api/machine-api-controllers\\\\\\\"}\\\\nI1128 09:58:21.338518 6507 services_controller.go:360] Finished syncing service machine-api-controllers on namespace openshift-machine-api for network=default : 2.046583ms\\\\nF1128 09:58:21.338532 6507 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: failed to add event handler: handler {0x1e60340 0x1e60020 0x1e5ffc0} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to \\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:20Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1ae634151347de87041ca338bcea3a34ee0c2330a3c6630a3e342f62beba0ab8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9efead1ac3ae101f3a81e0c5568645b6ac107dc126ff57bc55745b86ffacb730\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9efead1ac3ae101f3a81e0c5568645b6ac107dc126ff57bc55745b86ffacb730\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-gmhsj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:37Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:37 crc kubenswrapper[4838]: I1128 09:58:37.241565 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5c3daa53-8c4e-4e30-aeba-146602dd45cd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d582c5c17a21e943db9e4af274b6cb31d7551e545039aca20a767b35e2ca5040\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9dmrk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5374d5a62ca21176826339023247946593cc1a7bdb4fb39596f12bf598790697\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9dmrk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-5dxdd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:37Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:37 crc kubenswrapper[4838]: I1128 09:58:37.252103 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"faf44892-fdd2-4b45-8772-20049c555d3b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8f8f5484d008289a9e34ceaffd3cf2582565e7265003b0a6a913fe424760fc65\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://496ac21c6d7e650f191b3bc29ab676bab6ba40727c5ac4d517833ef9a115ae07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://698aacf6e94caf2da7095c89716d63d935ff60d95cb91c9a39dfe9282cbba005\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8352058616fa4ed90ca907e547bad2201d9aa696330f1eb8434c3c3d54d71d89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8352058616fa4ed90ca907e547bad2201d9aa696330f1eb8434c3c3d54d71d89\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:09Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:08Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:37Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:37 crc kubenswrapper[4838]: I1128 09:58:37.263470 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:37Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:37 crc kubenswrapper[4838]: I1128 09:58:37.274241 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:37Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:37 crc kubenswrapper[4838]: I1128 09:58:37.283735 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-tj8hl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cbb3c60a-bf9f-4a62-9310-30898e42be4f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e6fe3f1cbc53f02e2556c5fb44cffcf13330c3240a0ff10a8471080466416bd1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c2jkq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:54Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-tj8hl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:37Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:37 crc kubenswrapper[4838]: I1128 09:58:37.294554 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://05cb23094534a9fdebbe33d8d34a79412ee49437c25e185c6dfa99384cbf9629\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:37Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:37 crc kubenswrapper[4838]: I1128 09:58:37.305475 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3968fb11df6b2265691177838400dcb08e03d330c166dd880b3acfac7ec7938f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e91940e5398321649eac2960a545bb4bbe047113c81f152aa55894cabee55006\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:37Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:37 crc kubenswrapper[4838]: I1128 09:58:37.320455 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-58mh7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f556bd7-3b15-4d7d-b8e2-4815bb5c9c7d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://658172db64e44d752eb06fe2788dda717db3fc2e672b073a2bdf159a16fd901f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0a0fbfb7a81acf63b4deabef68d55dd843092bce1c00c27c127955417bede44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f0a0fbfb7a81acf63b4deabef68d55dd843092bce1c00c27c127955417bede44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dea8cf8adbb0c946731fdc71c2b0d80ace50b919cf9c776eb70cb6ff36529401\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dea8cf8adbb0c946731fdc71c2b0d80ace50b919cf9c776eb70cb6ff36529401\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f717c0691a541e3a8310cd520a96e92144902511c74835a5ef8ba9536cd65657\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f717c0691a541e3a8310cd520a96e92144902511c74835a5ef8ba9536cd65657\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b8b67d281ed50a87ac086c5ce0bcc082c30a41d80074b638340326c334fa4f0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b8b67d281ed50a87ac086c5ce0bcc082c30a41d80074b638340326c334fa4f0c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4ac7e0a6a8340f45399bb4b576bbcd90d4eee54c571e6d64846c521486a2607\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f4ac7e0a6a8340f45399bb4b576bbcd90d4eee54c571e6d64846c521486a2607\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e3e1a16aa714e8c2b82811d71b1845942712cad84d7dd465c02a3c4478419af\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0e3e1a16aa714e8c2b82811d71b1845942712cad84d7dd465c02a3c4478419af\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-58mh7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:37Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:37 crc kubenswrapper[4838]: I1128 09:58:37.329972 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:37 crc kubenswrapper[4838]: I1128 09:58:37.330014 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:37 crc kubenswrapper[4838]: I1128 09:58:37.330027 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:37 crc kubenswrapper[4838]: I1128 09:58:37.330048 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:37 crc kubenswrapper[4838]: I1128 09:58:37.330061 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:37Z","lastTransitionTime":"2025-11-28T09:58:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:37 crc kubenswrapper[4838]: I1128 09:58:37.348028 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-sft2b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"200cdb91-cc86-40be-a5b6-30f7b9beba6d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c4f94520c534ecc5a5a92d18b6047c6df131fa7d2a9b8712c021a74c3ffc18ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lpt6k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-sft2b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:37Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:37 crc kubenswrapper[4838]: I1128 09:58:37.363446 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-p69l6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2a223cc8-af33-4e83-8bfc-2676c5700447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h25xx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h25xx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:58:05Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-p69l6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:37Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:37 crc kubenswrapper[4838]: I1128 09:58:37.374033 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"de645de9-a5cd-4075-8bfd-402a619ea73f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://17e965aad7643d62c651c1e652be45bd914cfe3f14a0a6f43e4e4376b4cb7be0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://40b6222c83e4141c5d286efddc1b65ef732c5cadda9cebdd8d9ee114bf2eb533\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://40b6222c83e4141c5d286efddc1b65ef732c5cadda9cebdd8d9ee114bf2eb533\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:08Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:37Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:37 crc kubenswrapper[4838]: I1128 09:58:37.384704 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:37Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:37 crc kubenswrapper[4838]: I1128 09:58:37.399001 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4rv9b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"051f7e1c-2d47-4be9-bbd5-14feec16eb16\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f641ae0677a50fbc7b20fac7bb5c567f031af1b6a06fbe9e965091efa4ee4ec3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-szqtp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4rv9b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:37Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:37 crc kubenswrapper[4838]: I1128 09:58:37.408128 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-tnclp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ebace5c6-6ca4-48ff-9c50-c6b769d599b5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://572204db3ac1da6b232430bf06fb87e94638e151ea0edc2f8b111deb7d82c58c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8pxnw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://efa96991cdc29f4f075bfd7751f620c7b091510ea68d577b243811ddd4140cec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8pxnw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:58:03Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-tnclp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:37Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:37 crc kubenswrapper[4838]: I1128 09:58:37.431773 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:37 crc kubenswrapper[4838]: I1128 09:58:37.431803 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:37 crc kubenswrapper[4838]: I1128 09:58:37.431815 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:37 crc kubenswrapper[4838]: I1128 09:58:37.431831 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:37 crc kubenswrapper[4838]: I1128 09:58:37.431842 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:37Z","lastTransitionTime":"2025-11-28T09:58:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:37 crc kubenswrapper[4838]: I1128 09:58:37.534409 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:37 crc kubenswrapper[4838]: I1128 09:58:37.534440 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:37 crc kubenswrapper[4838]: I1128 09:58:37.534450 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:37 crc kubenswrapper[4838]: I1128 09:58:37.534463 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:37 crc kubenswrapper[4838]: I1128 09:58:37.534475 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:37Z","lastTransitionTime":"2025-11-28T09:58:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:37 crc kubenswrapper[4838]: I1128 09:58:37.561111 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 09:58:37 crc kubenswrapper[4838]: I1128 09:58:37.561115 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 09:58:37 crc kubenswrapper[4838]: E1128 09:58:37.561274 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 09:58:37 crc kubenswrapper[4838]: I1128 09:58:37.561132 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 09:58:37 crc kubenswrapper[4838]: E1128 09:58:37.561428 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 09:58:37 crc kubenswrapper[4838]: E1128 09:58:37.561492 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 09:58:37 crc kubenswrapper[4838]: I1128 09:58:37.637013 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:37 crc kubenswrapper[4838]: I1128 09:58:37.637108 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:37 crc kubenswrapper[4838]: I1128 09:58:37.637122 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:37 crc kubenswrapper[4838]: I1128 09:58:37.637146 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:37 crc kubenswrapper[4838]: I1128 09:58:37.637162 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:37Z","lastTransitionTime":"2025-11-28T09:58:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:37 crc kubenswrapper[4838]: I1128 09:58:37.740694 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:37 crc kubenswrapper[4838]: I1128 09:58:37.740807 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:37 crc kubenswrapper[4838]: I1128 09:58:37.740828 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:37 crc kubenswrapper[4838]: I1128 09:58:37.740855 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:37 crc kubenswrapper[4838]: I1128 09:58:37.740872 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:37Z","lastTransitionTime":"2025-11-28T09:58:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:37 crc kubenswrapper[4838]: I1128 09:58:37.845710 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:37 crc kubenswrapper[4838]: I1128 09:58:37.845856 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:37 crc kubenswrapper[4838]: I1128 09:58:37.845882 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:37 crc kubenswrapper[4838]: I1128 09:58:37.845922 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:37 crc kubenswrapper[4838]: I1128 09:58:37.845958 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:37Z","lastTransitionTime":"2025-11-28T09:58:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:37 crc kubenswrapper[4838]: I1128 09:58:37.949388 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:37 crc kubenswrapper[4838]: I1128 09:58:37.949452 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:37 crc kubenswrapper[4838]: I1128 09:58:37.949473 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:37 crc kubenswrapper[4838]: I1128 09:58:37.949500 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:37 crc kubenswrapper[4838]: I1128 09:58:37.949519 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:37Z","lastTransitionTime":"2025-11-28T09:58:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:38 crc kubenswrapper[4838]: I1128 09:58:38.051768 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:38 crc kubenswrapper[4838]: I1128 09:58:38.051834 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:38 crc kubenswrapper[4838]: I1128 09:58:38.051855 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:38 crc kubenswrapper[4838]: I1128 09:58:38.051883 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:38 crc kubenswrapper[4838]: I1128 09:58:38.051904 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:38Z","lastTransitionTime":"2025-11-28T09:58:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:38 crc kubenswrapper[4838]: I1128 09:58:38.154458 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:38 crc kubenswrapper[4838]: I1128 09:58:38.154495 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:38 crc kubenswrapper[4838]: I1128 09:58:38.154506 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:38 crc kubenswrapper[4838]: I1128 09:58:38.154526 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:38 crc kubenswrapper[4838]: I1128 09:58:38.154539 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:38Z","lastTransitionTime":"2025-11-28T09:58:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:38 crc kubenswrapper[4838]: I1128 09:58:38.164304 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-gmhsj_41b01f7d-5c75-49de-86f7-87e04bf71194/ovnkube-controller/2.log" Nov 28 09:58:38 crc kubenswrapper[4838]: I1128 09:58:38.165266 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-gmhsj_41b01f7d-5c75-49de-86f7-87e04bf71194/ovnkube-controller/1.log" Nov 28 09:58:38 crc kubenswrapper[4838]: I1128 09:58:38.168931 4838 generic.go:334] "Generic (PLEG): container finished" podID="41b01f7d-5c75-49de-86f7-87e04bf71194" containerID="973017e2b3a339f98d3439e256810a83b425061d827e74edd127aee640a9c0eb" exitCode=1 Nov 28 09:58:38 crc kubenswrapper[4838]: I1128 09:58:38.169006 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gmhsj" event={"ID":"41b01f7d-5c75-49de-86f7-87e04bf71194","Type":"ContainerDied","Data":"973017e2b3a339f98d3439e256810a83b425061d827e74edd127aee640a9c0eb"} Nov 28 09:58:38 crc kubenswrapper[4838]: I1128 09:58:38.169046 4838 scope.go:117] "RemoveContainer" containerID="33add8cb97a3d547f8fbfa22a35902ceb364337862331a17551cab1a60de5dae" Nov 28 09:58:38 crc kubenswrapper[4838]: I1128 09:58:38.169763 4838 scope.go:117] "RemoveContainer" containerID="973017e2b3a339f98d3439e256810a83b425061d827e74edd127aee640a9c0eb" Nov 28 09:58:38 crc kubenswrapper[4838]: E1128 09:58:38.169989 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-gmhsj_openshift-ovn-kubernetes(41b01f7d-5c75-49de-86f7-87e04bf71194)\"" pod="openshift-ovn-kubernetes/ovnkube-node-gmhsj" podUID="41b01f7d-5c75-49de-86f7-87e04bf71194" Nov 28 09:58:38 crc kubenswrapper[4838]: I1128 09:58:38.184488 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://05cb23094534a9fdebbe33d8d34a79412ee49437c25e185c6dfa99384cbf9629\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:38Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:38 crc kubenswrapper[4838]: I1128 09:58:38.202797 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3968fb11df6b2265691177838400dcb08e03d330c166dd880b3acfac7ec7938f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e91940e5398321649eac2960a545bb4bbe047113c81f152aa55894cabee55006\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:38Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:38 crc kubenswrapper[4838]: I1128 09:58:38.220179 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-58mh7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f556bd7-3b15-4d7d-b8e2-4815bb5c9c7d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://658172db64e44d752eb06fe2788dda717db3fc2e672b073a2bdf159a16fd901f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0a0fbfb7a81acf63b4deabef68d55dd843092bce1c00c27c127955417bede44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f0a0fbfb7a81acf63b4deabef68d55dd843092bce1c00c27c127955417bede44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dea8cf8adbb0c946731fdc71c2b0d80ace50b919cf9c776eb70cb6ff36529401\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dea8cf8adbb0c946731fdc71c2b0d80ace50b919cf9c776eb70cb6ff36529401\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f717c0691a541e3a8310cd520a96e92144902511c74835a5ef8ba9536cd65657\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f717c0691a541e3a8310cd520a96e92144902511c74835a5ef8ba9536cd65657\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b8b67d281ed50a87ac086c5ce0bcc082c30a41d80074b638340326c334fa4f0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b8b67d281ed50a87ac086c5ce0bcc082c30a41d80074b638340326c334fa4f0c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4ac7e0a6a8340f45399bb4b576bbcd90d4eee54c571e6d64846c521486a2607\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f4ac7e0a6a8340f45399bb4b576bbcd90d4eee54c571e6d64846c521486a2607\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e3e1a16aa714e8c2b82811d71b1845942712cad84d7dd465c02a3c4478419af\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0e3e1a16aa714e8c2b82811d71b1845942712cad84d7dd465c02a3c4478419af\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-58mh7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:38Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:38 crc kubenswrapper[4838]: I1128 09:58:38.235284 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-sft2b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"200cdb91-cc86-40be-a5b6-30f7b9beba6d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c4f94520c534ecc5a5a92d18b6047c6df131fa7d2a9b8712c021a74c3ffc18ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lpt6k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-sft2b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:38Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:38 crc kubenswrapper[4838]: I1128 09:58:38.249966 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-p69l6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2a223cc8-af33-4e83-8bfc-2676c5700447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h25xx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h25xx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:58:05Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-p69l6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:38Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:38 crc kubenswrapper[4838]: I1128 09:58:38.259324 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:38 crc kubenswrapper[4838]: I1128 09:58:38.259357 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:38 crc kubenswrapper[4838]: I1128 09:58:38.259369 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:38 crc kubenswrapper[4838]: I1128 09:58:38.259386 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:38 crc kubenswrapper[4838]: I1128 09:58:38.259398 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:38Z","lastTransitionTime":"2025-11-28T09:58:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:38 crc kubenswrapper[4838]: I1128 09:58:38.264400 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"de645de9-a5cd-4075-8bfd-402a619ea73f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://17e965aad7643d62c651c1e652be45bd914cfe3f14a0a6f43e4e4376b4cb7be0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://40b6222c83e4141c5d286efddc1b65ef732c5cadda9cebdd8d9ee114bf2eb533\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://40b6222c83e4141c5d286efddc1b65ef732c5cadda9cebdd8d9ee114bf2eb533\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:08Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:38Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:38 crc kubenswrapper[4838]: I1128 09:58:38.282926 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:38Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:38 crc kubenswrapper[4838]: I1128 09:58:38.303850 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4rv9b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"051f7e1c-2d47-4be9-bbd5-14feec16eb16\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f641ae0677a50fbc7b20fac7bb5c567f031af1b6a06fbe9e965091efa4ee4ec3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-szqtp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4rv9b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:38Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:38 crc kubenswrapper[4838]: I1128 09:58:38.321297 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-tnclp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ebace5c6-6ca4-48ff-9c50-c6b769d599b5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://572204db3ac1da6b232430bf06fb87e94638e151ea0edc2f8b111deb7d82c58c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8pxnw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://efa96991cdc29f4f075bfd7751f620c7b091510ea68d577b243811ddd4140cec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8pxnw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:58:03Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-tnclp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:38Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:38 crc kubenswrapper[4838]: I1128 09:58:38.344108 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42be35de-5c72-4cc2-a5e4-fb7872425cdf\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5a39765f9493a3a9454db77d07855624ff5645e9dbd898e6dcb880d7a01a8c42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://426741a23e7b1b9fae6769b581c0d44694f957b0da985476923801395fad082f\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T09:57:46Z\\\",\\\"message\\\":\\\"+ timeout 3m /bin/bash -exuo pipefail -c 'while [ -n \\\\\\\"$(ss -Htanop \\\\\\\\( sport = 10357 \\\\\\\\))\\\\\\\" ]; do sleep 1; done'\\\\n++ ss -Htanop '(' sport = 10357 ')'\\\\n+ '[' -n '' ']'\\\\n+ exec cluster-policy-controller start --config=/etc/kubernetes/static-pod-resources/configmaps/cluster-policy-controller-config/config.yaml --kubeconfig=/etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig --namespace=openshift-kube-controller-manager -v=2\\\\nI1128 09:57:12.554377 1 leaderelection.go:121] The leader election gives 4 retries and allows for 30s of clock skew. The kube-apiserver downtime tolerance is 78s. Worst non-graceful lease acquisition is 2m43s. Worst graceful lease acquisition is {26s}.\\\\nI1128 09:57:12.555793 1 observer_polling.go:159] Starting file observer\\\\nI1128 09:57:12.567187 1 builder.go:298] cluster-policy-controller version 4.18.0-202501230001.p0.g5fd8525.assembly.stream.el9-5fd8525-5fd852525909ce6eab52972ba9ce8fcf56528eb9\\\\nI1128 09:57:12.568976 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.crt::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.key\\\\\\\"\\\\nI1128 09:57:45.000857 1 cmd.go:138] Received SIGTERM or SIGINT signal, shutting down controller.\\\\nF1128 09:57:46.203931 1 cmd.go:179] failed checking apiserver connectivity: client rate limiter Wait returned an error: context deadline exceeded\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:10Z\\\"}},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef54c8995d6bb8e077c6d1c3d796b6f2ce88370b6cadc4f040f590760103320b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fab4ce68cf732b3e6b32f68e84805013d646a9cbd6d5c55ea3d2f41a7f9db83d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2da93de71b5896e3c9ba192df2896b946b1376fefe2a87cf21adb87ea665be04\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:08Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:38Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:38 crc kubenswrapper[4838]: I1128 09:58:38.353528 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:38 crc kubenswrapper[4838]: I1128 09:58:38.353592 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:38 crc kubenswrapper[4838]: I1128 09:58:38.353609 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:38 crc kubenswrapper[4838]: I1128 09:58:38.353637 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:38 crc kubenswrapper[4838]: I1128 09:58:38.353656 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:38Z","lastTransitionTime":"2025-11-28T09:58:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:38 crc kubenswrapper[4838]: I1128 09:58:38.369645 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1630b1c6-63b5-4481-a711-0485765d37e3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3a04db28528da269759635186b06952f9a3dc4c2b130458354a5bf9ef994db8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://065f0c39a0f1fbdb83a6a758ddd20a4af3ddf96297ce35551b570f5e6c9deb89\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T09:57:35Z\\\",\\\"message\\\":\\\"rpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"192.168.126.11:2379\\\\\\\", ServerName: \\\\\\\"192.168.126.11:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp 192.168.126.11:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:30.193159 13 logging.go:55] [core] [Channel #1 SubChannel #5]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"192.168.126.11:2379\\\\\\\", ServerName: \\\\\\\"192.168.126.11:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp 192.168.126.11:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:30.410371 13 logging.go:55] [core] [Channel #7 SubChannel #8]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"192.168.126.11:2379\\\\\\\", ServerName: \\\\\\\"192.168.126.11:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp 192.168.126.11:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:30.766067 13 logging.go:55] [core] [Channel #7 SubChannel #9]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"localhost:2379\\\\\\\", ServerName: \\\\\\\"localhost:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp [::1]:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:30.792318 13 logging.go:55] [core] [Channel #2 SubChannel #4]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"localhost:2379\\\\\\\", ServerName: \\\\\\\"localhost:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp [::1]:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:34.548830 13 logging.go:55] [core] [Channel #1 SubChannel #6]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"localhost:2379\\\\\\\", ServerName: \\\\\\\"localhost:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: authentication handshake failed: context canceled\\\\\\\"\\\\nE1128 09:57:34.559534 13 run.go:72] \\\\\\\"command failed\\\\\\\" err=\\\\\\\"context deadline exceeded\\\\\\\"\\\\nI1128 09:57:34.572762 1 main.go:235] Termination finished with exit code 1\\\\nI1128 09:57:34.572819 1 main.go:188] Deleting termination lock file \\\\\\\"/var/log/kube-apiserver/.terminating\\\\\\\"\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25d620ea6d7c38547e89d61e7a60f227d28b21f18d563055db47256b266d5b6b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://837cc7d33d601516b4ea56a283f71167b41da7c769070c97ea77f29e97cf1555\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a821e5b105f62cf7f3b12714bfb0f58867a808d1c777f2fb711895c345d8ee9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ad3223a9346861cf1b27af8c95207349f10af6f416380747e32c4faf1d3add4\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T09:57:48Z\\\",\\\"message\\\":\\\"le observer\\\\nW1128 09:57:46.202169 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1128 09:57:46.202478 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 09:57:46.203984 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2688063589/tls.crt::/tmp/serving-cert-2688063589/tls.key\\\\\\\"\\\\nI1128 09:57:46.517349 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 09:57:46.756626 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 09:57:46.756681 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 09:57:46.756806 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 09:57:46.756825 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 09:57:46.768610 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1128 09:57:46.768654 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1128 09:57:46.768660 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 09:57:46.768697 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 09:57:46.768707 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 09:57:46.768714 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 09:57:46.768746 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 09:57:46.768752 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1128 09:57:46.772242 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:40Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c690f38f717fbfbd681f21f5dc845b94601530b4fc0860fdbeb1317042c793b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:18Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b4d4af6fc5dd97d5b6104ef79b62fd241db5659dfdfd496a38536453b207a4e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b4d4af6fc5dd97d5b6104ef79b62fd241db5659dfdfd496a38536453b207a4e5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:08Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:38Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:38 crc kubenswrapper[4838]: E1128 09:58:38.374790 4838 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:58:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:58:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:38Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:58:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:58:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:38Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2d884793-8973-45d8-9335-b721f6accbac\\\",\\\"systemUUID\\\":\\\"e14391b4-beaf-4b9f-9de4-e3bbde3f3327\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:38Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:38 crc kubenswrapper[4838]: I1128 09:58:38.380868 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:38 crc kubenswrapper[4838]: I1128 09:58:38.380995 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:38 crc kubenswrapper[4838]: I1128 09:58:38.381020 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:38 crc kubenswrapper[4838]: I1128 09:58:38.381045 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:38 crc kubenswrapper[4838]: I1128 09:58:38.381066 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:38Z","lastTransitionTime":"2025-11-28T09:58:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:38 crc kubenswrapper[4838]: I1128 09:58:38.389211 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33d18a643d8d8a58731e53993b40c94bb51ff9c35242f768090dc737cac26503\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:38Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:38 crc kubenswrapper[4838]: E1128 09:58:38.404847 4838 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:58:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:58:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:38Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:58:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:58:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:38Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2d884793-8973-45d8-9335-b721f6accbac\\\",\\\"systemUUID\\\":\\\"e14391b4-beaf-4b9f-9de4-e3bbde3f3327\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:38Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:38 crc kubenswrapper[4838]: I1128 09:58:38.410362 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:38 crc kubenswrapper[4838]: I1128 09:58:38.410407 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:38 crc kubenswrapper[4838]: I1128 09:58:38.410423 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:38 crc kubenswrapper[4838]: I1128 09:58:38.410446 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:38 crc kubenswrapper[4838]: I1128 09:58:38.410462 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:38Z","lastTransitionTime":"2025-11-28T09:58:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:38 crc kubenswrapper[4838]: I1128 09:58:38.420867 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gmhsj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"41b01f7d-5c75-49de-86f7-87e04bf71194\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ccbb0df20c7e22308632a5a2d8837d77579134973f3888355be0fe46b4e59aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://87fcac052cf598fe1999137913f0b5ab8c999dad7d8fb512c3b079fd3b49bdaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b1f5c4da04a97ccc1851e901f9a13763e45ff8c7f6aab25bcbb934541ae4a776\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5781261b70d6722e8d28b91441c4241e1b48e6ff71ae3ec8973bde50180fc146\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3a5c0b96cef205139d500d85f27d2c9230ab23a84ffc78f2587a465a7ff25e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://669de13027827632f353811e082cce7cca326651a9bf4820e66504bb59067d72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://973017e2b3a339f98d3439e256810a83b425061d827e74edd127aee640a9c0eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://33add8cb97a3d547f8fbfa22a35902ceb364337862331a17551cab1a60de5dae\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T09:58:22Z\\\",\\\"message\\\":\\\"9:58:21.338450 6507 services_controller.go:452] Built service openshift-machine-config-operator/machine-config-daemon per-node LB for network=default: []services.LB{}\\\\nI1128 09:58:21.338487 6507 services_controller.go:453] Built service openshift-machine-config-operator/machine-config-daemon template LB for network=default: []services.LB{}\\\\nI1128 09:58:21.338500 6507 services_controller.go:454] Service openshift-machine-config-operator/machine-config-daemon for network=default has 2 cluster-wide, 0 per-node configs, 0 template configs, making 1 (cluster) 0 (per node) and 0 (template) load balancers\\\\nI1128 09:58:21.338503 6507 loadbalancer.go:304] Deleted 0 stale LBs for map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-machine-api/machine-api-controllers\\\\\\\"}\\\\nI1128 09:58:21.338518 6507 services_controller.go:360] Finished syncing service machine-api-controllers on namespace openshift-machine-api for network=default : 2.046583ms\\\\nF1128 09:58:21.338532 6507 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: failed to add event handler: handler {0x1e60340 0x1e60020 0x1e5ffc0} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to \\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:20Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://973017e2b3a339f98d3439e256810a83b425061d827e74edd127aee640a9c0eb\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T09:58:38Z\\\",\\\"message\\\":\\\"l\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1128 09:58:37.612073 6708 services_controller.go:452] Built service openshift-console/downloads per-node LB for network=default: []services.LB{}\\\\nI1128 09:58:37.612094 6708 services_controller.go:452] Built service openshift-machine-api/machine-api-operator-machine-webhook per-node LB for network=default: []services.LB{}\\\\nI1128 09:58:37.612103 6708 services_controller.go:453] Built service openshift-console/downloads template LB for network=default: []services.LB{}\\\\nI1128 09:58:37.612134 6708 services_controller.go:453] Built service openshift-machine-api/machine-api-operator-machine-webhook template LB for network=default: []services.LB{}\\\\nI1128 09:58:37.612147 6708 services_controller.go:454] Service openshift-console/downloads for network=default has 1 cluster-wide, 0 per-node configs, 0 template configs, making 1 (cluster) 0 (per node) and 0 (template) load balancers\\\\nF1128 09:58:37.612098 6708 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node netwo\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1ae634151347de87041ca338bcea3a34ee0c2330a3c6630a3e342f62beba0ab8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9efead1ac3ae101f3a81e0c5568645b6ac107dc126ff57bc55745b86ffacb730\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9efead1ac3ae101f3a81e0c5568645b6ac107dc126ff57bc55745b86ffacb730\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-gmhsj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:38Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:38 crc kubenswrapper[4838]: E1128 09:58:38.431300 4838 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:58:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:58:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:38Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:58:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:58:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:38Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2d884793-8973-45d8-9335-b721f6accbac\\\",\\\"systemUUID\\\":\\\"e14391b4-beaf-4b9f-9de4-e3bbde3f3327\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:38Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:38 crc kubenswrapper[4838]: I1128 09:58:38.435985 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5c3daa53-8c4e-4e30-aeba-146602dd45cd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d582c5c17a21e943db9e4af274b6cb31d7551e545039aca20a767b35e2ca5040\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9dmrk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5374d5a62ca21176826339023247946593cc1a7bdb4fb39596f12bf598790697\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9dmrk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-5dxdd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:38Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:38 crc kubenswrapper[4838]: I1128 09:58:38.436162 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:38 crc kubenswrapper[4838]: I1128 09:58:38.436204 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:38 crc kubenswrapper[4838]: I1128 09:58:38.436224 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:38 crc kubenswrapper[4838]: I1128 09:58:38.436248 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:38 crc kubenswrapper[4838]: I1128 09:58:38.436265 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:38Z","lastTransitionTime":"2025-11-28T09:58:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:38 crc kubenswrapper[4838]: I1128 09:58:38.453544 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"faf44892-fdd2-4b45-8772-20049c555d3b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8f8f5484d008289a9e34ceaffd3cf2582565e7265003b0a6a913fe424760fc65\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://496ac21c6d7e650f191b3bc29ab676bab6ba40727c5ac4d517833ef9a115ae07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://698aacf6e94caf2da7095c89716d63d935ff60d95cb91c9a39dfe9282cbba005\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8352058616fa4ed90ca907e547bad2201d9aa696330f1eb8434c3c3d54d71d89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8352058616fa4ed90ca907e547bad2201d9aa696330f1eb8434c3c3d54d71d89\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:09Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:08Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:38Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:38 crc kubenswrapper[4838]: E1128 09:58:38.453514 4838 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:58:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:58:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:38Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:58:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:58:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:38Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2d884793-8973-45d8-9335-b721f6accbac\\\",\\\"systemUUID\\\":\\\"e14391b4-beaf-4b9f-9de4-e3bbde3f3327\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:38Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:38 crc kubenswrapper[4838]: I1128 09:58:38.457656 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:38 crc kubenswrapper[4838]: I1128 09:58:38.457690 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:38 crc kubenswrapper[4838]: I1128 09:58:38.457701 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:38 crc kubenswrapper[4838]: I1128 09:58:38.457747 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:38 crc kubenswrapper[4838]: I1128 09:58:38.457760 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:38Z","lastTransitionTime":"2025-11-28T09:58:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:38 crc kubenswrapper[4838]: I1128 09:58:38.475333 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:38Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:38 crc kubenswrapper[4838]: E1128 09:58:38.479606 4838 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:58:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:58:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:38Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:58:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:58:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:38Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2d884793-8973-45d8-9335-b721f6accbac\\\",\\\"systemUUID\\\":\\\"e14391b4-beaf-4b9f-9de4-e3bbde3f3327\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:38Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:38 crc kubenswrapper[4838]: E1128 09:58:38.479906 4838 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 28 09:58:38 crc kubenswrapper[4838]: I1128 09:58:38.482233 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:38 crc kubenswrapper[4838]: I1128 09:58:38.482285 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:38 crc kubenswrapper[4838]: I1128 09:58:38.482302 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:38 crc kubenswrapper[4838]: I1128 09:58:38.482330 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:38 crc kubenswrapper[4838]: I1128 09:58:38.482348 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:38Z","lastTransitionTime":"2025-11-28T09:58:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:38 crc kubenswrapper[4838]: I1128 09:58:38.493194 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:38Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:38 crc kubenswrapper[4838]: I1128 09:58:38.509354 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-tj8hl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cbb3c60a-bf9f-4a62-9310-30898e42be4f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e6fe3f1cbc53f02e2556c5fb44cffcf13330c3240a0ff10a8471080466416bd1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c2jkq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:54Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-tj8hl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:38Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:38 crc kubenswrapper[4838]: I1128 09:58:38.561540 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-p69l6" Nov 28 09:58:38 crc kubenswrapper[4838]: E1128 09:58:38.561701 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-p69l6" podUID="2a223cc8-af33-4e83-8bfc-2676c5700447" Nov 28 09:58:38 crc kubenswrapper[4838]: I1128 09:58:38.586016 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:38 crc kubenswrapper[4838]: I1128 09:58:38.585930 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5c3daa53-8c4e-4e30-aeba-146602dd45cd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d582c5c17a21e943db9e4af274b6cb31d7551e545039aca20a767b35e2ca5040\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9dmrk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5374d5a62ca21176826339023247946593cc1a7bdb4fb39596f12bf598790697\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9dmrk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-5dxdd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:38Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:38 crc kubenswrapper[4838]: I1128 09:58:38.586065 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:38 crc kubenswrapper[4838]: I1128 09:58:38.586277 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:38 crc kubenswrapper[4838]: I1128 09:58:38.586303 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:38 crc kubenswrapper[4838]: I1128 09:58:38.586318 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:38Z","lastTransitionTime":"2025-11-28T09:58:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:38 crc kubenswrapper[4838]: I1128 09:58:38.611642 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42be35de-5c72-4cc2-a5e4-fb7872425cdf\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5a39765f9493a3a9454db77d07855624ff5645e9dbd898e6dcb880d7a01a8c42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://426741a23e7b1b9fae6769b581c0d44694f957b0da985476923801395fad082f\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T09:57:46Z\\\",\\\"message\\\":\\\"+ timeout 3m /bin/bash -exuo pipefail -c 'while [ -n \\\\\\\"$(ss -Htanop \\\\\\\\( sport = 10357 \\\\\\\\))\\\\\\\" ]; do sleep 1; done'\\\\n++ ss -Htanop '(' sport = 10357 ')'\\\\n+ '[' -n '' ']'\\\\n+ exec cluster-policy-controller start --config=/etc/kubernetes/static-pod-resources/configmaps/cluster-policy-controller-config/config.yaml --kubeconfig=/etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig --namespace=openshift-kube-controller-manager -v=2\\\\nI1128 09:57:12.554377 1 leaderelection.go:121] The leader election gives 4 retries and allows for 30s of clock skew. The kube-apiserver downtime tolerance is 78s. Worst non-graceful lease acquisition is 2m43s. Worst graceful lease acquisition is {26s}.\\\\nI1128 09:57:12.555793 1 observer_polling.go:159] Starting file observer\\\\nI1128 09:57:12.567187 1 builder.go:298] cluster-policy-controller version 4.18.0-202501230001.p0.g5fd8525.assembly.stream.el9-5fd8525-5fd852525909ce6eab52972ba9ce8fcf56528eb9\\\\nI1128 09:57:12.568976 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.crt::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.key\\\\\\\"\\\\nI1128 09:57:45.000857 1 cmd.go:138] Received SIGTERM or SIGINT signal, shutting down controller.\\\\nF1128 09:57:46.203931 1 cmd.go:179] failed checking apiserver connectivity: client rate limiter Wait returned an error: context deadline exceeded\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:10Z\\\"}},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef54c8995d6bb8e077c6d1c3d796b6f2ce88370b6cadc4f040f590760103320b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fab4ce68cf732b3e6b32f68e84805013d646a9cbd6d5c55ea3d2f41a7f9db83d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2da93de71b5896e3c9ba192df2896b946b1376fefe2a87cf21adb87ea665be04\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:08Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:38Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:38 crc kubenswrapper[4838]: I1128 09:58:38.632247 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1630b1c6-63b5-4481-a711-0485765d37e3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3a04db28528da269759635186b06952f9a3dc4c2b130458354a5bf9ef994db8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://065f0c39a0f1fbdb83a6a758ddd20a4af3ddf96297ce35551b570f5e6c9deb89\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T09:57:35Z\\\",\\\"message\\\":\\\"rpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"192.168.126.11:2379\\\\\\\", ServerName: \\\\\\\"192.168.126.11:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp 192.168.126.11:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:30.193159 13 logging.go:55] [core] [Channel #1 SubChannel #5]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"192.168.126.11:2379\\\\\\\", ServerName: \\\\\\\"192.168.126.11:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp 192.168.126.11:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:30.410371 13 logging.go:55] [core] [Channel #7 SubChannel #8]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"192.168.126.11:2379\\\\\\\", ServerName: \\\\\\\"192.168.126.11:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp 192.168.126.11:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:30.766067 13 logging.go:55] [core] [Channel #7 SubChannel #9]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"localhost:2379\\\\\\\", ServerName: \\\\\\\"localhost:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp [::1]:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:30.792318 13 logging.go:55] [core] [Channel #2 SubChannel #4]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"localhost:2379\\\\\\\", ServerName: \\\\\\\"localhost:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp [::1]:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:34.548830 13 logging.go:55] [core] [Channel #1 SubChannel #6]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"localhost:2379\\\\\\\", ServerName: \\\\\\\"localhost:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: authentication handshake failed: context canceled\\\\\\\"\\\\nE1128 09:57:34.559534 13 run.go:72] \\\\\\\"command failed\\\\\\\" err=\\\\\\\"context deadline exceeded\\\\\\\"\\\\nI1128 09:57:34.572762 1 main.go:235] Termination finished with exit code 1\\\\nI1128 09:57:34.572819 1 main.go:188] Deleting termination lock file \\\\\\\"/var/log/kube-apiserver/.terminating\\\\\\\"\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25d620ea6d7c38547e89d61e7a60f227d28b21f18d563055db47256b266d5b6b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://837cc7d33d601516b4ea56a283f71167b41da7c769070c97ea77f29e97cf1555\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a821e5b105f62cf7f3b12714bfb0f58867a808d1c777f2fb711895c345d8ee9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ad3223a9346861cf1b27af8c95207349f10af6f416380747e32c4faf1d3add4\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T09:57:48Z\\\",\\\"message\\\":\\\"le observer\\\\nW1128 09:57:46.202169 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1128 09:57:46.202478 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 09:57:46.203984 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2688063589/tls.crt::/tmp/serving-cert-2688063589/tls.key\\\\\\\"\\\\nI1128 09:57:46.517349 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 09:57:46.756626 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 09:57:46.756681 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 09:57:46.756806 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 09:57:46.756825 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 09:57:46.768610 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1128 09:57:46.768654 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1128 09:57:46.768660 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 09:57:46.768697 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 09:57:46.768707 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 09:57:46.768714 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 09:57:46.768746 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 09:57:46.768752 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1128 09:57:46.772242 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:40Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c690f38f717fbfbd681f21f5dc845b94601530b4fc0860fdbeb1317042c793b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:18Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b4d4af6fc5dd97d5b6104ef79b62fd241db5659dfdfd496a38536453b207a4e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b4d4af6fc5dd97d5b6104ef79b62fd241db5659dfdfd496a38536453b207a4e5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:08Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:38Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:38 crc kubenswrapper[4838]: I1128 09:58:38.653741 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33d18a643d8d8a58731e53993b40c94bb51ff9c35242f768090dc737cac26503\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:38Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:38 crc kubenswrapper[4838]: I1128 09:58:38.672396 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gmhsj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"41b01f7d-5c75-49de-86f7-87e04bf71194\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ccbb0df20c7e22308632a5a2d8837d77579134973f3888355be0fe46b4e59aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://87fcac052cf598fe1999137913f0b5ab8c999dad7d8fb512c3b079fd3b49bdaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b1f5c4da04a97ccc1851e901f9a13763e45ff8c7f6aab25bcbb934541ae4a776\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5781261b70d6722e8d28b91441c4241e1b48e6ff71ae3ec8973bde50180fc146\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3a5c0b96cef205139d500d85f27d2c9230ab23a84ffc78f2587a465a7ff25e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://669de13027827632f353811e082cce7cca326651a9bf4820e66504bb59067d72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://973017e2b3a339f98d3439e256810a83b425061d827e74edd127aee640a9c0eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://33add8cb97a3d547f8fbfa22a35902ceb364337862331a17551cab1a60de5dae\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T09:58:22Z\\\",\\\"message\\\":\\\"9:58:21.338450 6507 services_controller.go:452] Built service openshift-machine-config-operator/machine-config-daemon per-node LB for network=default: []services.LB{}\\\\nI1128 09:58:21.338487 6507 services_controller.go:453] Built service openshift-machine-config-operator/machine-config-daemon template LB for network=default: []services.LB{}\\\\nI1128 09:58:21.338500 6507 services_controller.go:454] Service openshift-machine-config-operator/machine-config-daemon for network=default has 2 cluster-wide, 0 per-node configs, 0 template configs, making 1 (cluster) 0 (per node) and 0 (template) load balancers\\\\nI1128 09:58:21.338503 6507 loadbalancer.go:304] Deleted 0 stale LBs for map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-machine-api/machine-api-controllers\\\\\\\"}\\\\nI1128 09:58:21.338518 6507 services_controller.go:360] Finished syncing service machine-api-controllers on namespace openshift-machine-api for network=default : 2.046583ms\\\\nF1128 09:58:21.338532 6507 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: failed to add event handler: handler {0x1e60340 0x1e60020 0x1e5ffc0} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to \\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:20Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://973017e2b3a339f98d3439e256810a83b425061d827e74edd127aee640a9c0eb\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T09:58:38Z\\\",\\\"message\\\":\\\"l\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1128 09:58:37.612073 6708 services_controller.go:452] Built service openshift-console/downloads per-node LB for network=default: []services.LB{}\\\\nI1128 09:58:37.612094 6708 services_controller.go:452] Built service openshift-machine-api/machine-api-operator-machine-webhook per-node LB for network=default: []services.LB{}\\\\nI1128 09:58:37.612103 6708 services_controller.go:453] Built service openshift-console/downloads template LB for network=default: []services.LB{}\\\\nI1128 09:58:37.612134 6708 services_controller.go:453] Built service openshift-machine-api/machine-api-operator-machine-webhook template LB for network=default: []services.LB{}\\\\nI1128 09:58:37.612147 6708 services_controller.go:454] Service openshift-console/downloads for network=default has 1 cluster-wide, 0 per-node configs, 0 template configs, making 1 (cluster) 0 (per node) and 0 (template) load balancers\\\\nF1128 09:58:37.612098 6708 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node netwo\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1ae634151347de87041ca338bcea3a34ee0c2330a3c6630a3e342f62beba0ab8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9efead1ac3ae101f3a81e0c5568645b6ac107dc126ff57bc55745b86ffacb730\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9efead1ac3ae101f3a81e0c5568645b6ac107dc126ff57bc55745b86ffacb730\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-gmhsj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:38Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:38 crc kubenswrapper[4838]: I1128 09:58:38.684701 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-tj8hl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cbb3c60a-bf9f-4a62-9310-30898e42be4f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e6fe3f1cbc53f02e2556c5fb44cffcf13330c3240a0ff10a8471080466416bd1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c2jkq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:54Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-tj8hl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:38Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:38 crc kubenswrapper[4838]: I1128 09:58:38.689375 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:38 crc kubenswrapper[4838]: I1128 09:58:38.689434 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:38 crc kubenswrapper[4838]: I1128 09:58:38.689454 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:38 crc kubenswrapper[4838]: I1128 09:58:38.689479 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:38 crc kubenswrapper[4838]: I1128 09:58:38.689498 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:38Z","lastTransitionTime":"2025-11-28T09:58:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:38 crc kubenswrapper[4838]: I1128 09:58:38.696677 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"faf44892-fdd2-4b45-8772-20049c555d3b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8f8f5484d008289a9e34ceaffd3cf2582565e7265003b0a6a913fe424760fc65\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://496ac21c6d7e650f191b3bc29ab676bab6ba40727c5ac4d517833ef9a115ae07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://698aacf6e94caf2da7095c89716d63d935ff60d95cb91c9a39dfe9282cbba005\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8352058616fa4ed90ca907e547bad2201d9aa696330f1eb8434c3c3d54d71d89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8352058616fa4ed90ca907e547bad2201d9aa696330f1eb8434c3c3d54d71d89\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:09Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:08Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:38Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:38 crc kubenswrapper[4838]: I1128 09:58:38.708338 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:38Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:38 crc kubenswrapper[4838]: I1128 09:58:38.720257 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:38Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:38 crc kubenswrapper[4838]: I1128 09:58:38.733194 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-p69l6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2a223cc8-af33-4e83-8bfc-2676c5700447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h25xx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h25xx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:58:05Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-p69l6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:38Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:38 crc kubenswrapper[4838]: I1128 09:58:38.753418 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://05cb23094534a9fdebbe33d8d34a79412ee49437c25e185c6dfa99384cbf9629\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:38Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:38 crc kubenswrapper[4838]: I1128 09:58:38.775263 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3968fb11df6b2265691177838400dcb08e03d330c166dd880b3acfac7ec7938f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e91940e5398321649eac2960a545bb4bbe047113c81f152aa55894cabee55006\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:38Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:38 crc kubenswrapper[4838]: I1128 09:58:38.787387 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-58mh7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f556bd7-3b15-4d7d-b8e2-4815bb5c9c7d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://658172db64e44d752eb06fe2788dda717db3fc2e672b073a2bdf159a16fd901f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0a0fbfb7a81acf63b4deabef68d55dd843092bce1c00c27c127955417bede44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f0a0fbfb7a81acf63b4deabef68d55dd843092bce1c00c27c127955417bede44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dea8cf8adbb0c946731fdc71c2b0d80ace50b919cf9c776eb70cb6ff36529401\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dea8cf8adbb0c946731fdc71c2b0d80ace50b919cf9c776eb70cb6ff36529401\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f717c0691a541e3a8310cd520a96e92144902511c74835a5ef8ba9536cd65657\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f717c0691a541e3a8310cd520a96e92144902511c74835a5ef8ba9536cd65657\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b8b67d281ed50a87ac086c5ce0bcc082c30a41d80074b638340326c334fa4f0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b8b67d281ed50a87ac086c5ce0bcc082c30a41d80074b638340326c334fa4f0c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4ac7e0a6a8340f45399bb4b576bbcd90d4eee54c571e6d64846c521486a2607\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f4ac7e0a6a8340f45399bb4b576bbcd90d4eee54c571e6d64846c521486a2607\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e3e1a16aa714e8c2b82811d71b1845942712cad84d7dd465c02a3c4478419af\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0e3e1a16aa714e8c2b82811d71b1845942712cad84d7dd465c02a3c4478419af\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-58mh7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:38Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:38 crc kubenswrapper[4838]: I1128 09:58:38.791307 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:38 crc kubenswrapper[4838]: I1128 09:58:38.791346 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:38 crc kubenswrapper[4838]: I1128 09:58:38.791356 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:38 crc kubenswrapper[4838]: I1128 09:58:38.791370 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:38 crc kubenswrapper[4838]: I1128 09:58:38.791381 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:38Z","lastTransitionTime":"2025-11-28T09:58:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:38 crc kubenswrapper[4838]: I1128 09:58:38.795861 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-sft2b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"200cdb91-cc86-40be-a5b6-30f7b9beba6d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c4f94520c534ecc5a5a92d18b6047c6df131fa7d2a9b8712c021a74c3ffc18ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lpt6k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-sft2b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:38Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:38 crc kubenswrapper[4838]: I1128 09:58:38.804505 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"de645de9-a5cd-4075-8bfd-402a619ea73f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://17e965aad7643d62c651c1e652be45bd914cfe3f14a0a6f43e4e4376b4cb7be0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://40b6222c83e4141c5d286efddc1b65ef732c5cadda9cebdd8d9ee114bf2eb533\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://40b6222c83e4141c5d286efddc1b65ef732c5cadda9cebdd8d9ee114bf2eb533\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:08Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:38Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:38 crc kubenswrapper[4838]: I1128 09:58:38.817593 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:38Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:38 crc kubenswrapper[4838]: I1128 09:58:38.829927 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4rv9b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"051f7e1c-2d47-4be9-bbd5-14feec16eb16\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f641ae0677a50fbc7b20fac7bb5c567f031af1b6a06fbe9e965091efa4ee4ec3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-szqtp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4rv9b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:38Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:38 crc kubenswrapper[4838]: I1128 09:58:38.838886 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-tnclp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ebace5c6-6ca4-48ff-9c50-c6b769d599b5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://572204db3ac1da6b232430bf06fb87e94638e151ea0edc2f8b111deb7d82c58c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8pxnw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://efa96991cdc29f4f075bfd7751f620c7b091510ea68d577b243811ddd4140cec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8pxnw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:58:03Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-tnclp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:38Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:38 crc kubenswrapper[4838]: I1128 09:58:38.892966 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:38 crc kubenswrapper[4838]: I1128 09:58:38.893003 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:38 crc kubenswrapper[4838]: I1128 09:58:38.893012 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:38 crc kubenswrapper[4838]: I1128 09:58:38.893026 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:38 crc kubenswrapper[4838]: I1128 09:58:38.893035 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:38Z","lastTransitionTime":"2025-11-28T09:58:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:38 crc kubenswrapper[4838]: I1128 09:58:38.995957 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:38 crc kubenswrapper[4838]: I1128 09:58:38.996023 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:38 crc kubenswrapper[4838]: I1128 09:58:38.996040 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:38 crc kubenswrapper[4838]: I1128 09:58:38.996064 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:38 crc kubenswrapper[4838]: I1128 09:58:38.996079 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:38Z","lastTransitionTime":"2025-11-28T09:58:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:39 crc kubenswrapper[4838]: I1128 09:58:39.099899 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:39 crc kubenswrapper[4838]: I1128 09:58:39.099988 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:39 crc kubenswrapper[4838]: I1128 09:58:39.100017 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:39 crc kubenswrapper[4838]: I1128 09:58:39.100049 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:39 crc kubenswrapper[4838]: I1128 09:58:39.100071 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:39Z","lastTransitionTime":"2025-11-28T09:58:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:39 crc kubenswrapper[4838]: I1128 09:58:39.173662 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-gmhsj_41b01f7d-5c75-49de-86f7-87e04bf71194/ovnkube-controller/2.log" Nov 28 09:58:39 crc kubenswrapper[4838]: I1128 09:58:39.177668 4838 scope.go:117] "RemoveContainer" containerID="973017e2b3a339f98d3439e256810a83b425061d827e74edd127aee640a9c0eb" Nov 28 09:58:39 crc kubenswrapper[4838]: E1128 09:58:39.177958 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-gmhsj_openshift-ovn-kubernetes(41b01f7d-5c75-49de-86f7-87e04bf71194)\"" pod="openshift-ovn-kubernetes/ovnkube-node-gmhsj" podUID="41b01f7d-5c75-49de-86f7-87e04bf71194" Nov 28 09:58:39 crc kubenswrapper[4838]: I1128 09:58:39.198194 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-58mh7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f556bd7-3b15-4d7d-b8e2-4815bb5c9c7d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://658172db64e44d752eb06fe2788dda717db3fc2e672b073a2bdf159a16fd901f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0a0fbfb7a81acf63b4deabef68d55dd843092bce1c00c27c127955417bede44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f0a0fbfb7a81acf63b4deabef68d55dd843092bce1c00c27c127955417bede44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dea8cf8adbb0c946731fdc71c2b0d80ace50b919cf9c776eb70cb6ff36529401\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dea8cf8adbb0c946731fdc71c2b0d80ace50b919cf9c776eb70cb6ff36529401\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f717c0691a541e3a8310cd520a96e92144902511c74835a5ef8ba9536cd65657\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f717c0691a541e3a8310cd520a96e92144902511c74835a5ef8ba9536cd65657\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b8b67d281ed50a87ac086c5ce0bcc082c30a41d80074b638340326c334fa4f0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b8b67d281ed50a87ac086c5ce0bcc082c30a41d80074b638340326c334fa4f0c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4ac7e0a6a8340f45399bb4b576bbcd90d4eee54c571e6d64846c521486a2607\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f4ac7e0a6a8340f45399bb4b576bbcd90d4eee54c571e6d64846c521486a2607\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e3e1a16aa714e8c2b82811d71b1845942712cad84d7dd465c02a3c4478419af\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0e3e1a16aa714e8c2b82811d71b1845942712cad84d7dd465c02a3c4478419af\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-58mh7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:39Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:39 crc kubenswrapper[4838]: I1128 09:58:39.202474 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:39 crc kubenswrapper[4838]: I1128 09:58:39.202542 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:39 crc kubenswrapper[4838]: I1128 09:58:39.202565 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:39 crc kubenswrapper[4838]: I1128 09:58:39.202593 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:39 crc kubenswrapper[4838]: I1128 09:58:39.202616 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:39Z","lastTransitionTime":"2025-11-28T09:58:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:39 crc kubenswrapper[4838]: I1128 09:58:39.211838 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-sft2b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"200cdb91-cc86-40be-a5b6-30f7b9beba6d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c4f94520c534ecc5a5a92d18b6047c6df131fa7d2a9b8712c021a74c3ffc18ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lpt6k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-sft2b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:39Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:39 crc kubenswrapper[4838]: I1128 09:58:39.224305 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-p69l6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2a223cc8-af33-4e83-8bfc-2676c5700447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h25xx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h25xx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:58:05Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-p69l6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:39Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:39 crc kubenswrapper[4838]: I1128 09:58:39.238145 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://05cb23094534a9fdebbe33d8d34a79412ee49437c25e185c6dfa99384cbf9629\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:39Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:39 crc kubenswrapper[4838]: I1128 09:58:39.255194 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3968fb11df6b2265691177838400dcb08e03d330c166dd880b3acfac7ec7938f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e91940e5398321649eac2960a545bb4bbe047113c81f152aa55894cabee55006\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:39Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:39 crc kubenswrapper[4838]: I1128 09:58:39.273398 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4rv9b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"051f7e1c-2d47-4be9-bbd5-14feec16eb16\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f641ae0677a50fbc7b20fac7bb5c567f031af1b6a06fbe9e965091efa4ee4ec3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-szqtp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4rv9b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:39Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:39 crc kubenswrapper[4838]: I1128 09:58:39.288180 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-tnclp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ebace5c6-6ca4-48ff-9c50-c6b769d599b5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://572204db3ac1da6b232430bf06fb87e94638e151ea0edc2f8b111deb7d82c58c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8pxnw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://efa96991cdc29f4f075bfd7751f620c7b091510ea68d577b243811ddd4140cec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8pxnw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:58:03Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-tnclp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:39Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:39 crc kubenswrapper[4838]: I1128 09:58:39.300643 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"de645de9-a5cd-4075-8bfd-402a619ea73f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://17e965aad7643d62c651c1e652be45bd914cfe3f14a0a6f43e4e4376b4cb7be0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://40b6222c83e4141c5d286efddc1b65ef732c5cadda9cebdd8d9ee114bf2eb533\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://40b6222c83e4141c5d286efddc1b65ef732c5cadda9cebdd8d9ee114bf2eb533\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:08Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:39Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:39 crc kubenswrapper[4838]: I1128 09:58:39.306294 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:39 crc kubenswrapper[4838]: I1128 09:58:39.306336 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:39 crc kubenswrapper[4838]: I1128 09:58:39.306352 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:39 crc kubenswrapper[4838]: I1128 09:58:39.306376 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:39 crc kubenswrapper[4838]: I1128 09:58:39.306393 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:39Z","lastTransitionTime":"2025-11-28T09:58:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:39 crc kubenswrapper[4838]: I1128 09:58:39.314486 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:39Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:39 crc kubenswrapper[4838]: I1128 09:58:39.330276 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33d18a643d8d8a58731e53993b40c94bb51ff9c35242f768090dc737cac26503\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:39Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:39 crc kubenswrapper[4838]: I1128 09:58:39.351734 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gmhsj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"41b01f7d-5c75-49de-86f7-87e04bf71194\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ccbb0df20c7e22308632a5a2d8837d77579134973f3888355be0fe46b4e59aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://87fcac052cf598fe1999137913f0b5ab8c999dad7d8fb512c3b079fd3b49bdaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b1f5c4da04a97ccc1851e901f9a13763e45ff8c7f6aab25bcbb934541ae4a776\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5781261b70d6722e8d28b91441c4241e1b48e6ff71ae3ec8973bde50180fc146\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3a5c0b96cef205139d500d85f27d2c9230ab23a84ffc78f2587a465a7ff25e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://669de13027827632f353811e082cce7cca326651a9bf4820e66504bb59067d72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://973017e2b3a339f98d3439e256810a83b425061d827e74edd127aee640a9c0eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://973017e2b3a339f98d3439e256810a83b425061d827e74edd127aee640a9c0eb\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T09:58:38Z\\\",\\\"message\\\":\\\"l\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1128 09:58:37.612073 6708 services_controller.go:452] Built service openshift-console/downloads per-node LB for network=default: []services.LB{}\\\\nI1128 09:58:37.612094 6708 services_controller.go:452] Built service openshift-machine-api/machine-api-operator-machine-webhook per-node LB for network=default: []services.LB{}\\\\nI1128 09:58:37.612103 6708 services_controller.go:453] Built service openshift-console/downloads template LB for network=default: []services.LB{}\\\\nI1128 09:58:37.612134 6708 services_controller.go:453] Built service openshift-machine-api/machine-api-operator-machine-webhook template LB for network=default: []services.LB{}\\\\nI1128 09:58:37.612147 6708 services_controller.go:454] Service openshift-console/downloads for network=default has 1 cluster-wide, 0 per-node configs, 0 template configs, making 1 (cluster) 0 (per node) and 0 (template) load balancers\\\\nF1128 09:58:37.612098 6708 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node netwo\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:36Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-gmhsj_openshift-ovn-kubernetes(41b01f7d-5c75-49de-86f7-87e04bf71194)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1ae634151347de87041ca338bcea3a34ee0c2330a3c6630a3e342f62beba0ab8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9efead1ac3ae101f3a81e0c5568645b6ac107dc126ff57bc55745b86ffacb730\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9efead1ac3ae101f3a81e0c5568645b6ac107dc126ff57bc55745b86ffacb730\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-gmhsj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:39Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:39 crc kubenswrapper[4838]: I1128 09:58:39.363413 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5c3daa53-8c4e-4e30-aeba-146602dd45cd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d582c5c17a21e943db9e4af274b6cb31d7551e545039aca20a767b35e2ca5040\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9dmrk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5374d5a62ca21176826339023247946593cc1a7bdb4fb39596f12bf598790697\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9dmrk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-5dxdd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:39Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:39 crc kubenswrapper[4838]: I1128 09:58:39.382350 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42be35de-5c72-4cc2-a5e4-fb7872425cdf\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5a39765f9493a3a9454db77d07855624ff5645e9dbd898e6dcb880d7a01a8c42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://426741a23e7b1b9fae6769b581c0d44694f957b0da985476923801395fad082f\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T09:57:46Z\\\",\\\"message\\\":\\\"+ timeout 3m /bin/bash -exuo pipefail -c 'while [ -n \\\\\\\"$(ss -Htanop \\\\\\\\( sport = 10357 \\\\\\\\))\\\\\\\" ]; do sleep 1; done'\\\\n++ ss -Htanop '(' sport = 10357 ')'\\\\n+ '[' -n '' ']'\\\\n+ exec cluster-policy-controller start --config=/etc/kubernetes/static-pod-resources/configmaps/cluster-policy-controller-config/config.yaml --kubeconfig=/etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig --namespace=openshift-kube-controller-manager -v=2\\\\nI1128 09:57:12.554377 1 leaderelection.go:121] The leader election gives 4 retries and allows for 30s of clock skew. The kube-apiserver downtime tolerance is 78s. Worst non-graceful lease acquisition is 2m43s. Worst graceful lease acquisition is {26s}.\\\\nI1128 09:57:12.555793 1 observer_polling.go:159] Starting file observer\\\\nI1128 09:57:12.567187 1 builder.go:298] cluster-policy-controller version 4.18.0-202501230001.p0.g5fd8525.assembly.stream.el9-5fd8525-5fd852525909ce6eab52972ba9ce8fcf56528eb9\\\\nI1128 09:57:12.568976 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.crt::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.key\\\\\\\"\\\\nI1128 09:57:45.000857 1 cmd.go:138] Received SIGTERM or SIGINT signal, shutting down controller.\\\\nF1128 09:57:46.203931 1 cmd.go:179] failed checking apiserver connectivity: client rate limiter Wait returned an error: context deadline exceeded\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:10Z\\\"}},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef54c8995d6bb8e077c6d1c3d796b6f2ce88370b6cadc4f040f590760103320b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fab4ce68cf732b3e6b32f68e84805013d646a9cbd6d5c55ea3d2f41a7f9db83d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2da93de71b5896e3c9ba192df2896b946b1376fefe2a87cf21adb87ea665be04\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:08Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:39Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:39 crc kubenswrapper[4838]: I1128 09:58:39.402409 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1630b1c6-63b5-4481-a711-0485765d37e3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3a04db28528da269759635186b06952f9a3dc4c2b130458354a5bf9ef994db8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://065f0c39a0f1fbdb83a6a758ddd20a4af3ddf96297ce35551b570f5e6c9deb89\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T09:57:35Z\\\",\\\"message\\\":\\\"rpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"192.168.126.11:2379\\\\\\\", ServerName: \\\\\\\"192.168.126.11:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp 192.168.126.11:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:30.193159 13 logging.go:55] [core] [Channel #1 SubChannel #5]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"192.168.126.11:2379\\\\\\\", ServerName: \\\\\\\"192.168.126.11:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp 192.168.126.11:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:30.410371 13 logging.go:55] [core] [Channel #7 SubChannel #8]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"192.168.126.11:2379\\\\\\\", ServerName: \\\\\\\"192.168.126.11:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp 192.168.126.11:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:30.766067 13 logging.go:55] [core] [Channel #7 SubChannel #9]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"localhost:2379\\\\\\\", ServerName: \\\\\\\"localhost:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp [::1]:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:30.792318 13 logging.go:55] [core] [Channel #2 SubChannel #4]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"localhost:2379\\\\\\\", ServerName: \\\\\\\"localhost:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp [::1]:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:34.548830 13 logging.go:55] [core] [Channel #1 SubChannel #6]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"localhost:2379\\\\\\\", ServerName: \\\\\\\"localhost:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: authentication handshake failed: context canceled\\\\\\\"\\\\nE1128 09:57:34.559534 13 run.go:72] \\\\\\\"command failed\\\\\\\" err=\\\\\\\"context deadline exceeded\\\\\\\"\\\\nI1128 09:57:34.572762 1 main.go:235] Termination finished with exit code 1\\\\nI1128 09:57:34.572819 1 main.go:188] Deleting termination lock file \\\\\\\"/var/log/kube-apiserver/.terminating\\\\\\\"\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25d620ea6d7c38547e89d61e7a60f227d28b21f18d563055db47256b266d5b6b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://837cc7d33d601516b4ea56a283f71167b41da7c769070c97ea77f29e97cf1555\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a821e5b105f62cf7f3b12714bfb0f58867a808d1c777f2fb711895c345d8ee9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ad3223a9346861cf1b27af8c95207349f10af6f416380747e32c4faf1d3add4\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T09:57:48Z\\\",\\\"message\\\":\\\"le observer\\\\nW1128 09:57:46.202169 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1128 09:57:46.202478 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 09:57:46.203984 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2688063589/tls.crt::/tmp/serving-cert-2688063589/tls.key\\\\\\\"\\\\nI1128 09:57:46.517349 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 09:57:46.756626 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 09:57:46.756681 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 09:57:46.756806 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 09:57:46.756825 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 09:57:46.768610 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1128 09:57:46.768654 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1128 09:57:46.768660 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 09:57:46.768697 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 09:57:46.768707 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 09:57:46.768714 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 09:57:46.768746 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 09:57:46.768752 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1128 09:57:46.772242 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:40Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c690f38f717fbfbd681f21f5dc845b94601530b4fc0860fdbeb1317042c793b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:18Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b4d4af6fc5dd97d5b6104ef79b62fd241db5659dfdfd496a38536453b207a4e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b4d4af6fc5dd97d5b6104ef79b62fd241db5659dfdfd496a38536453b207a4e5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:08Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:39Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:39 crc kubenswrapper[4838]: I1128 09:58:39.412207 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:39 crc kubenswrapper[4838]: I1128 09:58:39.412265 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:39 crc kubenswrapper[4838]: I1128 09:58:39.412283 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:39 crc kubenswrapper[4838]: I1128 09:58:39.412310 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:39 crc kubenswrapper[4838]: I1128 09:58:39.412335 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:39Z","lastTransitionTime":"2025-11-28T09:58:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:39 crc kubenswrapper[4838]: I1128 09:58:39.420586 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:39Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:39 crc kubenswrapper[4838]: I1128 09:58:39.435299 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:39Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:39 crc kubenswrapper[4838]: I1128 09:58:39.446195 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-tj8hl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cbb3c60a-bf9f-4a62-9310-30898e42be4f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e6fe3f1cbc53f02e2556c5fb44cffcf13330c3240a0ff10a8471080466416bd1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c2jkq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:54Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-tj8hl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:39Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:39 crc kubenswrapper[4838]: I1128 09:58:39.461165 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"faf44892-fdd2-4b45-8772-20049c555d3b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8f8f5484d008289a9e34ceaffd3cf2582565e7265003b0a6a913fe424760fc65\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://496ac21c6d7e650f191b3bc29ab676bab6ba40727c5ac4d517833ef9a115ae07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://698aacf6e94caf2da7095c89716d63d935ff60d95cb91c9a39dfe9282cbba005\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8352058616fa4ed90ca907e547bad2201d9aa696330f1eb8434c3c3d54d71d89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8352058616fa4ed90ca907e547bad2201d9aa696330f1eb8434c3c3d54d71d89\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:09Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:08Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:39Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:39 crc kubenswrapper[4838]: I1128 09:58:39.515574 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:39 crc kubenswrapper[4838]: I1128 09:58:39.515622 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:39 crc kubenswrapper[4838]: I1128 09:58:39.515640 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:39 crc kubenswrapper[4838]: I1128 09:58:39.515666 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:39 crc kubenswrapper[4838]: I1128 09:58:39.515683 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:39Z","lastTransitionTime":"2025-11-28T09:58:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:39 crc kubenswrapper[4838]: I1128 09:58:39.562108 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 09:58:39 crc kubenswrapper[4838]: I1128 09:58:39.562165 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 09:58:39 crc kubenswrapper[4838]: I1128 09:58:39.562115 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 09:58:39 crc kubenswrapper[4838]: E1128 09:58:39.562328 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 09:58:39 crc kubenswrapper[4838]: E1128 09:58:39.562415 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 09:58:39 crc kubenswrapper[4838]: E1128 09:58:39.562549 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 09:58:39 crc kubenswrapper[4838]: I1128 09:58:39.617948 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:39 crc kubenswrapper[4838]: I1128 09:58:39.618022 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:39 crc kubenswrapper[4838]: I1128 09:58:39.618041 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:39 crc kubenswrapper[4838]: I1128 09:58:39.618065 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:39 crc kubenswrapper[4838]: I1128 09:58:39.618085 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:39Z","lastTransitionTime":"2025-11-28T09:58:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:39 crc kubenswrapper[4838]: I1128 09:58:39.720817 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:39 crc kubenswrapper[4838]: I1128 09:58:39.720883 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:39 crc kubenswrapper[4838]: I1128 09:58:39.720902 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:39 crc kubenswrapper[4838]: I1128 09:58:39.720924 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:39 crc kubenswrapper[4838]: I1128 09:58:39.720941 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:39Z","lastTransitionTime":"2025-11-28T09:58:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:39 crc kubenswrapper[4838]: I1128 09:58:39.823932 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:39 crc kubenswrapper[4838]: I1128 09:58:39.823994 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:39 crc kubenswrapper[4838]: I1128 09:58:39.824011 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:39 crc kubenswrapper[4838]: I1128 09:58:39.824036 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:39 crc kubenswrapper[4838]: I1128 09:58:39.824054 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:39Z","lastTransitionTime":"2025-11-28T09:58:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:39 crc kubenswrapper[4838]: I1128 09:58:39.926999 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:39 crc kubenswrapper[4838]: I1128 09:58:39.927047 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:39 crc kubenswrapper[4838]: I1128 09:58:39.927058 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:39 crc kubenswrapper[4838]: I1128 09:58:39.927079 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:39 crc kubenswrapper[4838]: I1128 09:58:39.927091 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:39Z","lastTransitionTime":"2025-11-28T09:58:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:40 crc kubenswrapper[4838]: I1128 09:58:40.029885 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:40 crc kubenswrapper[4838]: I1128 09:58:40.029942 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:40 crc kubenswrapper[4838]: I1128 09:58:40.029958 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:40 crc kubenswrapper[4838]: I1128 09:58:40.029982 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:40 crc kubenswrapper[4838]: I1128 09:58:40.030001 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:40Z","lastTransitionTime":"2025-11-28T09:58:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:40 crc kubenswrapper[4838]: I1128 09:58:40.133022 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:40 crc kubenswrapper[4838]: I1128 09:58:40.133062 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:40 crc kubenswrapper[4838]: I1128 09:58:40.133076 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:40 crc kubenswrapper[4838]: I1128 09:58:40.133094 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:40 crc kubenswrapper[4838]: I1128 09:58:40.133104 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:40Z","lastTransitionTime":"2025-11-28T09:58:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:40 crc kubenswrapper[4838]: I1128 09:58:40.235481 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:40 crc kubenswrapper[4838]: I1128 09:58:40.235534 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:40 crc kubenswrapper[4838]: I1128 09:58:40.235551 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:40 crc kubenswrapper[4838]: I1128 09:58:40.235575 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:40 crc kubenswrapper[4838]: I1128 09:58:40.235592 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:40Z","lastTransitionTime":"2025-11-28T09:58:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:40 crc kubenswrapper[4838]: I1128 09:58:40.338083 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:40 crc kubenswrapper[4838]: I1128 09:58:40.338142 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:40 crc kubenswrapper[4838]: I1128 09:58:40.338160 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:40 crc kubenswrapper[4838]: I1128 09:58:40.338184 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:40 crc kubenswrapper[4838]: I1128 09:58:40.338201 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:40Z","lastTransitionTime":"2025-11-28T09:58:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:40 crc kubenswrapper[4838]: I1128 09:58:40.441447 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:40 crc kubenswrapper[4838]: I1128 09:58:40.441507 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:40 crc kubenswrapper[4838]: I1128 09:58:40.441530 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:40 crc kubenswrapper[4838]: I1128 09:58:40.441560 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:40 crc kubenswrapper[4838]: I1128 09:58:40.441581 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:40Z","lastTransitionTime":"2025-11-28T09:58:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:40 crc kubenswrapper[4838]: I1128 09:58:40.545906 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:40 crc kubenswrapper[4838]: I1128 09:58:40.545957 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:40 crc kubenswrapper[4838]: I1128 09:58:40.545973 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:40 crc kubenswrapper[4838]: I1128 09:58:40.545997 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:40 crc kubenswrapper[4838]: I1128 09:58:40.546016 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:40Z","lastTransitionTime":"2025-11-28T09:58:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:40 crc kubenswrapper[4838]: I1128 09:58:40.561675 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-p69l6" Nov 28 09:58:40 crc kubenswrapper[4838]: E1128 09:58:40.561943 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-p69l6" podUID="2a223cc8-af33-4e83-8bfc-2676c5700447" Nov 28 09:58:40 crc kubenswrapper[4838]: I1128 09:58:40.648146 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:40 crc kubenswrapper[4838]: I1128 09:58:40.648212 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:40 crc kubenswrapper[4838]: I1128 09:58:40.648239 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:40 crc kubenswrapper[4838]: I1128 09:58:40.648268 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:40 crc kubenswrapper[4838]: I1128 09:58:40.648290 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:40Z","lastTransitionTime":"2025-11-28T09:58:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:40 crc kubenswrapper[4838]: I1128 09:58:40.751843 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:40 crc kubenswrapper[4838]: I1128 09:58:40.751928 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:40 crc kubenswrapper[4838]: I1128 09:58:40.751951 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:40 crc kubenswrapper[4838]: I1128 09:58:40.751980 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:40 crc kubenswrapper[4838]: I1128 09:58:40.751999 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:40Z","lastTransitionTime":"2025-11-28T09:58:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:40 crc kubenswrapper[4838]: I1128 09:58:40.855089 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:40 crc kubenswrapper[4838]: I1128 09:58:40.855178 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:40 crc kubenswrapper[4838]: I1128 09:58:40.855191 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:40 crc kubenswrapper[4838]: I1128 09:58:40.855209 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:40 crc kubenswrapper[4838]: I1128 09:58:40.855221 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:40Z","lastTransitionTime":"2025-11-28T09:58:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:40 crc kubenswrapper[4838]: I1128 09:58:40.958355 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:40 crc kubenswrapper[4838]: I1128 09:58:40.958429 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:40 crc kubenswrapper[4838]: I1128 09:58:40.958450 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:40 crc kubenswrapper[4838]: I1128 09:58:40.958479 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:40 crc kubenswrapper[4838]: I1128 09:58:40.958495 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:40Z","lastTransitionTime":"2025-11-28T09:58:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:41 crc kubenswrapper[4838]: I1128 09:58:41.061484 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:41 crc kubenswrapper[4838]: I1128 09:58:41.061537 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:41 crc kubenswrapper[4838]: I1128 09:58:41.061552 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:41 crc kubenswrapper[4838]: I1128 09:58:41.061574 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:41 crc kubenswrapper[4838]: I1128 09:58:41.061586 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:41Z","lastTransitionTime":"2025-11-28T09:58:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:41 crc kubenswrapper[4838]: I1128 09:58:41.174969 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:41 crc kubenswrapper[4838]: I1128 09:58:41.175006 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:41 crc kubenswrapper[4838]: I1128 09:58:41.175018 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:41 crc kubenswrapper[4838]: I1128 09:58:41.175036 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:41 crc kubenswrapper[4838]: I1128 09:58:41.175051 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:41Z","lastTransitionTime":"2025-11-28T09:58:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:41 crc kubenswrapper[4838]: I1128 09:58:41.278014 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:41 crc kubenswrapper[4838]: I1128 09:58:41.278107 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:41 crc kubenswrapper[4838]: I1128 09:58:41.278119 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:41 crc kubenswrapper[4838]: I1128 09:58:41.278136 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:41 crc kubenswrapper[4838]: I1128 09:58:41.278151 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:41Z","lastTransitionTime":"2025-11-28T09:58:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:41 crc kubenswrapper[4838]: I1128 09:58:41.388364 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:41 crc kubenswrapper[4838]: I1128 09:58:41.388409 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:41 crc kubenswrapper[4838]: I1128 09:58:41.388421 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:41 crc kubenswrapper[4838]: I1128 09:58:41.388467 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:41 crc kubenswrapper[4838]: I1128 09:58:41.388485 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:41Z","lastTransitionTime":"2025-11-28T09:58:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:41 crc kubenswrapper[4838]: I1128 09:58:41.491632 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:41 crc kubenswrapper[4838]: I1128 09:58:41.491708 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:41 crc kubenswrapper[4838]: I1128 09:58:41.491769 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:41 crc kubenswrapper[4838]: I1128 09:58:41.491797 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:41 crc kubenswrapper[4838]: I1128 09:58:41.491814 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:41Z","lastTransitionTime":"2025-11-28T09:58:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:41 crc kubenswrapper[4838]: I1128 09:58:41.561171 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 09:58:41 crc kubenswrapper[4838]: I1128 09:58:41.561233 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 09:58:41 crc kubenswrapper[4838]: I1128 09:58:41.561204 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 09:58:41 crc kubenswrapper[4838]: E1128 09:58:41.561407 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 09:58:41 crc kubenswrapper[4838]: E1128 09:58:41.561553 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 09:58:41 crc kubenswrapper[4838]: E1128 09:58:41.561688 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 09:58:41 crc kubenswrapper[4838]: I1128 09:58:41.596042 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:41 crc kubenswrapper[4838]: I1128 09:58:41.596109 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:41 crc kubenswrapper[4838]: I1128 09:58:41.596121 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:41 crc kubenswrapper[4838]: I1128 09:58:41.596142 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:41 crc kubenswrapper[4838]: I1128 09:58:41.596159 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:41Z","lastTransitionTime":"2025-11-28T09:58:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:41 crc kubenswrapper[4838]: I1128 09:58:41.700446 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:41 crc kubenswrapper[4838]: I1128 09:58:41.700501 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:41 crc kubenswrapper[4838]: I1128 09:58:41.700510 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:41 crc kubenswrapper[4838]: I1128 09:58:41.700531 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:41 crc kubenswrapper[4838]: I1128 09:58:41.700543 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:41Z","lastTransitionTime":"2025-11-28T09:58:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:41 crc kubenswrapper[4838]: I1128 09:58:41.809396 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:41 crc kubenswrapper[4838]: I1128 09:58:41.809454 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:41 crc kubenswrapper[4838]: I1128 09:58:41.809468 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:41 crc kubenswrapper[4838]: I1128 09:58:41.809491 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:41 crc kubenswrapper[4838]: I1128 09:58:41.809505 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:41Z","lastTransitionTime":"2025-11-28T09:58:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:41 crc kubenswrapper[4838]: I1128 09:58:41.912914 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:41 crc kubenswrapper[4838]: I1128 09:58:41.912985 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:41 crc kubenswrapper[4838]: I1128 09:58:41.913002 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:41 crc kubenswrapper[4838]: I1128 09:58:41.913029 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:41 crc kubenswrapper[4838]: I1128 09:58:41.913048 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:41Z","lastTransitionTime":"2025-11-28T09:58:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:42 crc kubenswrapper[4838]: I1128 09:58:42.016290 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:42 crc kubenswrapper[4838]: I1128 09:58:42.017004 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:42 crc kubenswrapper[4838]: I1128 09:58:42.017050 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:42 crc kubenswrapper[4838]: I1128 09:58:42.017082 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:42 crc kubenswrapper[4838]: I1128 09:58:42.017101 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:42Z","lastTransitionTime":"2025-11-28T09:58:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:42 crc kubenswrapper[4838]: I1128 09:58:42.120177 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:42 crc kubenswrapper[4838]: I1128 09:58:42.120920 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:42 crc kubenswrapper[4838]: I1128 09:58:42.120957 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:42 crc kubenswrapper[4838]: I1128 09:58:42.120980 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:42 crc kubenswrapper[4838]: I1128 09:58:42.120993 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:42Z","lastTransitionTime":"2025-11-28T09:58:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:42 crc kubenswrapper[4838]: I1128 09:58:42.223287 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:42 crc kubenswrapper[4838]: I1128 09:58:42.223340 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:42 crc kubenswrapper[4838]: I1128 09:58:42.223353 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:42 crc kubenswrapper[4838]: I1128 09:58:42.223372 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:42 crc kubenswrapper[4838]: I1128 09:58:42.223384 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:42Z","lastTransitionTime":"2025-11-28T09:58:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:42 crc kubenswrapper[4838]: I1128 09:58:42.325816 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:42 crc kubenswrapper[4838]: I1128 09:58:42.326225 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:42 crc kubenswrapper[4838]: I1128 09:58:42.326351 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:42 crc kubenswrapper[4838]: I1128 09:58:42.326474 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:42 crc kubenswrapper[4838]: I1128 09:58:42.326588 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:42Z","lastTransitionTime":"2025-11-28T09:58:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:42 crc kubenswrapper[4838]: I1128 09:58:42.429145 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:42 crc kubenswrapper[4838]: I1128 09:58:42.429215 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:42 crc kubenswrapper[4838]: I1128 09:58:42.429239 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:42 crc kubenswrapper[4838]: I1128 09:58:42.429267 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:42 crc kubenswrapper[4838]: I1128 09:58:42.429284 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:42Z","lastTransitionTime":"2025-11-28T09:58:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:42 crc kubenswrapper[4838]: I1128 09:58:42.531820 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:42 crc kubenswrapper[4838]: I1128 09:58:42.532102 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:42 crc kubenswrapper[4838]: I1128 09:58:42.532258 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:42 crc kubenswrapper[4838]: I1128 09:58:42.532406 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:42 crc kubenswrapper[4838]: I1128 09:58:42.532527 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:42Z","lastTransitionTime":"2025-11-28T09:58:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:42 crc kubenswrapper[4838]: I1128 09:58:42.561329 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-p69l6" Nov 28 09:58:42 crc kubenswrapper[4838]: E1128 09:58:42.561533 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-p69l6" podUID="2a223cc8-af33-4e83-8bfc-2676c5700447" Nov 28 09:58:42 crc kubenswrapper[4838]: I1128 09:58:42.636128 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:42 crc kubenswrapper[4838]: I1128 09:58:42.636192 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:42 crc kubenswrapper[4838]: I1128 09:58:42.636207 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:42 crc kubenswrapper[4838]: I1128 09:58:42.636265 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:42 crc kubenswrapper[4838]: I1128 09:58:42.636279 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:42Z","lastTransitionTime":"2025-11-28T09:58:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:42 crc kubenswrapper[4838]: I1128 09:58:42.739530 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:42 crc kubenswrapper[4838]: I1128 09:58:42.739590 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:42 crc kubenswrapper[4838]: I1128 09:58:42.739607 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:42 crc kubenswrapper[4838]: I1128 09:58:42.739631 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:42 crc kubenswrapper[4838]: I1128 09:58:42.739647 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:42Z","lastTransitionTime":"2025-11-28T09:58:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:42 crc kubenswrapper[4838]: I1128 09:58:42.842218 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:42 crc kubenswrapper[4838]: I1128 09:58:42.842299 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:42 crc kubenswrapper[4838]: I1128 09:58:42.842323 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:42 crc kubenswrapper[4838]: I1128 09:58:42.842354 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:42 crc kubenswrapper[4838]: I1128 09:58:42.842376 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:42Z","lastTransitionTime":"2025-11-28T09:58:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:42 crc kubenswrapper[4838]: I1128 09:58:42.973216 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:42 crc kubenswrapper[4838]: I1128 09:58:42.973274 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:42 crc kubenswrapper[4838]: I1128 09:58:42.973291 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:42 crc kubenswrapper[4838]: I1128 09:58:42.973317 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:42 crc kubenswrapper[4838]: I1128 09:58:42.973334 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:42Z","lastTransitionTime":"2025-11-28T09:58:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:43 crc kubenswrapper[4838]: I1128 09:58:43.076430 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:43 crc kubenswrapper[4838]: I1128 09:58:43.076491 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:43 crc kubenswrapper[4838]: I1128 09:58:43.076512 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:43 crc kubenswrapper[4838]: I1128 09:58:43.076540 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:43 crc kubenswrapper[4838]: I1128 09:58:43.076560 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:43Z","lastTransitionTime":"2025-11-28T09:58:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:43 crc kubenswrapper[4838]: I1128 09:58:43.179803 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:43 crc kubenswrapper[4838]: I1128 09:58:43.180275 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:43 crc kubenswrapper[4838]: I1128 09:58:43.180440 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:43 crc kubenswrapper[4838]: I1128 09:58:43.180595 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:43 crc kubenswrapper[4838]: I1128 09:58:43.180780 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:43Z","lastTransitionTime":"2025-11-28T09:58:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:43 crc kubenswrapper[4838]: I1128 09:58:43.283776 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:43 crc kubenswrapper[4838]: I1128 09:58:43.283834 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:43 crc kubenswrapper[4838]: I1128 09:58:43.283853 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:43 crc kubenswrapper[4838]: I1128 09:58:43.283876 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:43 crc kubenswrapper[4838]: I1128 09:58:43.283893 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:43Z","lastTransitionTime":"2025-11-28T09:58:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:43 crc kubenswrapper[4838]: I1128 09:58:43.387188 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:43 crc kubenswrapper[4838]: I1128 09:58:43.387226 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:43 crc kubenswrapper[4838]: I1128 09:58:43.387236 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:43 crc kubenswrapper[4838]: I1128 09:58:43.387251 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:43 crc kubenswrapper[4838]: I1128 09:58:43.387265 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:43Z","lastTransitionTime":"2025-11-28T09:58:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:43 crc kubenswrapper[4838]: I1128 09:58:43.490894 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:43 crc kubenswrapper[4838]: I1128 09:58:43.490948 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:43 crc kubenswrapper[4838]: I1128 09:58:43.490965 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:43 crc kubenswrapper[4838]: I1128 09:58:43.490988 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:43 crc kubenswrapper[4838]: I1128 09:58:43.491005 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:43Z","lastTransitionTime":"2025-11-28T09:58:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:43 crc kubenswrapper[4838]: I1128 09:58:43.561979 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 09:58:43 crc kubenswrapper[4838]: I1128 09:58:43.562000 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 09:58:43 crc kubenswrapper[4838]: E1128 09:58:43.562219 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 09:58:43 crc kubenswrapper[4838]: I1128 09:58:43.562000 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 09:58:43 crc kubenswrapper[4838]: E1128 09:58:43.562324 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 09:58:43 crc kubenswrapper[4838]: E1128 09:58:43.562408 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 09:58:43 crc kubenswrapper[4838]: I1128 09:58:43.613076 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:43 crc kubenswrapper[4838]: I1128 09:58:43.613140 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:43 crc kubenswrapper[4838]: I1128 09:58:43.613163 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:43 crc kubenswrapper[4838]: I1128 09:58:43.613192 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:43 crc kubenswrapper[4838]: I1128 09:58:43.613213 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:43Z","lastTransitionTime":"2025-11-28T09:58:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:43 crc kubenswrapper[4838]: I1128 09:58:43.716471 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:43 crc kubenswrapper[4838]: I1128 09:58:43.716524 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:43 crc kubenswrapper[4838]: I1128 09:58:43.716541 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:43 crc kubenswrapper[4838]: I1128 09:58:43.716561 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:43 crc kubenswrapper[4838]: I1128 09:58:43.716576 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:43Z","lastTransitionTime":"2025-11-28T09:58:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:43 crc kubenswrapper[4838]: I1128 09:58:43.820575 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:43 crc kubenswrapper[4838]: I1128 09:58:43.820639 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:43 crc kubenswrapper[4838]: I1128 09:58:43.820662 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:43 crc kubenswrapper[4838]: I1128 09:58:43.820694 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:43 crc kubenswrapper[4838]: I1128 09:58:43.820762 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:43Z","lastTransitionTime":"2025-11-28T09:58:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:43 crc kubenswrapper[4838]: I1128 09:58:43.923448 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:43 crc kubenswrapper[4838]: I1128 09:58:43.923484 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:43 crc kubenswrapper[4838]: I1128 09:58:43.923493 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:43 crc kubenswrapper[4838]: I1128 09:58:43.923508 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:43 crc kubenswrapper[4838]: I1128 09:58:43.923519 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:43Z","lastTransitionTime":"2025-11-28T09:58:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:44 crc kubenswrapper[4838]: I1128 09:58:44.026334 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:44 crc kubenswrapper[4838]: I1128 09:58:44.026398 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:44 crc kubenswrapper[4838]: I1128 09:58:44.026415 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:44 crc kubenswrapper[4838]: I1128 09:58:44.026440 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:44 crc kubenswrapper[4838]: I1128 09:58:44.026457 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:44Z","lastTransitionTime":"2025-11-28T09:58:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:44 crc kubenswrapper[4838]: I1128 09:58:44.129517 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:44 crc kubenswrapper[4838]: I1128 09:58:44.129574 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:44 crc kubenswrapper[4838]: I1128 09:58:44.129592 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:44 crc kubenswrapper[4838]: I1128 09:58:44.129614 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:44 crc kubenswrapper[4838]: I1128 09:58:44.129630 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:44Z","lastTransitionTime":"2025-11-28T09:58:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:44 crc kubenswrapper[4838]: I1128 09:58:44.232800 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:44 crc kubenswrapper[4838]: I1128 09:58:44.232849 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:44 crc kubenswrapper[4838]: I1128 09:58:44.232866 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:44 crc kubenswrapper[4838]: I1128 09:58:44.232890 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:44 crc kubenswrapper[4838]: I1128 09:58:44.232907 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:44Z","lastTransitionTime":"2025-11-28T09:58:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:44 crc kubenswrapper[4838]: I1128 09:58:44.336229 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:44 crc kubenswrapper[4838]: I1128 09:58:44.336378 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:44 crc kubenswrapper[4838]: I1128 09:58:44.336411 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:44 crc kubenswrapper[4838]: I1128 09:58:44.336444 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:44 crc kubenswrapper[4838]: I1128 09:58:44.336472 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:44Z","lastTransitionTime":"2025-11-28T09:58:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:44 crc kubenswrapper[4838]: I1128 09:58:44.444194 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:44 crc kubenswrapper[4838]: I1128 09:58:44.444274 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:44 crc kubenswrapper[4838]: I1128 09:58:44.444299 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:44 crc kubenswrapper[4838]: I1128 09:58:44.444329 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:44 crc kubenswrapper[4838]: I1128 09:58:44.444354 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:44Z","lastTransitionTime":"2025-11-28T09:58:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:44 crc kubenswrapper[4838]: I1128 09:58:44.547381 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:44 crc kubenswrapper[4838]: I1128 09:58:44.547437 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:44 crc kubenswrapper[4838]: I1128 09:58:44.547453 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:44 crc kubenswrapper[4838]: I1128 09:58:44.547476 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:44 crc kubenswrapper[4838]: I1128 09:58:44.547494 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:44Z","lastTransitionTime":"2025-11-28T09:58:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:44 crc kubenswrapper[4838]: I1128 09:58:44.562135 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-p69l6" Nov 28 09:58:44 crc kubenswrapper[4838]: E1128 09:58:44.562313 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-p69l6" podUID="2a223cc8-af33-4e83-8bfc-2676c5700447" Nov 28 09:58:44 crc kubenswrapper[4838]: I1128 09:58:44.651111 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:44 crc kubenswrapper[4838]: I1128 09:58:44.651200 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:44 crc kubenswrapper[4838]: I1128 09:58:44.651223 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:44 crc kubenswrapper[4838]: I1128 09:58:44.651255 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:44 crc kubenswrapper[4838]: I1128 09:58:44.651277 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:44Z","lastTransitionTime":"2025-11-28T09:58:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:44 crc kubenswrapper[4838]: I1128 09:58:44.753902 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:44 crc kubenswrapper[4838]: I1128 09:58:44.753951 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:44 crc kubenswrapper[4838]: I1128 09:58:44.753969 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:44 crc kubenswrapper[4838]: I1128 09:58:44.753994 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:44 crc kubenswrapper[4838]: I1128 09:58:44.754011 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:44Z","lastTransitionTime":"2025-11-28T09:58:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:44 crc kubenswrapper[4838]: I1128 09:58:44.857269 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:44 crc kubenswrapper[4838]: I1128 09:58:44.857354 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:44 crc kubenswrapper[4838]: I1128 09:58:44.857377 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:44 crc kubenswrapper[4838]: I1128 09:58:44.857406 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:44 crc kubenswrapper[4838]: I1128 09:58:44.857428 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:44Z","lastTransitionTime":"2025-11-28T09:58:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:44 crc kubenswrapper[4838]: I1128 09:58:44.960591 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:44 crc kubenswrapper[4838]: I1128 09:58:44.960643 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:44 crc kubenswrapper[4838]: I1128 09:58:44.960659 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:44 crc kubenswrapper[4838]: I1128 09:58:44.960681 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:44 crc kubenswrapper[4838]: I1128 09:58:44.960699 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:44Z","lastTransitionTime":"2025-11-28T09:58:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:45 crc kubenswrapper[4838]: I1128 09:58:45.063740 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:45 crc kubenswrapper[4838]: I1128 09:58:45.063789 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:45 crc kubenswrapper[4838]: I1128 09:58:45.063801 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:45 crc kubenswrapper[4838]: I1128 09:58:45.063843 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:45 crc kubenswrapper[4838]: I1128 09:58:45.063855 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:45Z","lastTransitionTime":"2025-11-28T09:58:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:45 crc kubenswrapper[4838]: I1128 09:58:45.166372 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:45 crc kubenswrapper[4838]: I1128 09:58:45.166417 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:45 crc kubenswrapper[4838]: I1128 09:58:45.166428 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:45 crc kubenswrapper[4838]: I1128 09:58:45.166444 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:45 crc kubenswrapper[4838]: I1128 09:58:45.166455 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:45Z","lastTransitionTime":"2025-11-28T09:58:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:45 crc kubenswrapper[4838]: I1128 09:58:45.268490 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:45 crc kubenswrapper[4838]: I1128 09:58:45.268543 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:45 crc kubenswrapper[4838]: I1128 09:58:45.268553 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:45 crc kubenswrapper[4838]: I1128 09:58:45.268569 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:45 crc kubenswrapper[4838]: I1128 09:58:45.268579 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:45Z","lastTransitionTime":"2025-11-28T09:58:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:45 crc kubenswrapper[4838]: I1128 09:58:45.371501 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:45 crc kubenswrapper[4838]: I1128 09:58:45.371554 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:45 crc kubenswrapper[4838]: I1128 09:58:45.371570 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:45 crc kubenswrapper[4838]: I1128 09:58:45.371591 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:45 crc kubenswrapper[4838]: I1128 09:58:45.371607 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:45Z","lastTransitionTime":"2025-11-28T09:58:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:45 crc kubenswrapper[4838]: I1128 09:58:45.474079 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:45 crc kubenswrapper[4838]: I1128 09:58:45.474123 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:45 crc kubenswrapper[4838]: I1128 09:58:45.474132 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:45 crc kubenswrapper[4838]: I1128 09:58:45.474147 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:45 crc kubenswrapper[4838]: I1128 09:58:45.474157 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:45Z","lastTransitionTime":"2025-11-28T09:58:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:45 crc kubenswrapper[4838]: I1128 09:58:45.561410 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 09:58:45 crc kubenswrapper[4838]: I1128 09:58:45.561517 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 09:58:45 crc kubenswrapper[4838]: E1128 09:58:45.561558 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 09:58:45 crc kubenswrapper[4838]: I1128 09:58:45.561632 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 09:58:45 crc kubenswrapper[4838]: E1128 09:58:45.561708 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 09:58:45 crc kubenswrapper[4838]: E1128 09:58:45.561886 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 09:58:45 crc kubenswrapper[4838]: I1128 09:58:45.578043 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:45 crc kubenswrapper[4838]: I1128 09:58:45.578171 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:45 crc kubenswrapper[4838]: I1128 09:58:45.578194 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:45 crc kubenswrapper[4838]: I1128 09:58:45.578218 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:45 crc kubenswrapper[4838]: I1128 09:58:45.578235 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:45Z","lastTransitionTime":"2025-11-28T09:58:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:45 crc kubenswrapper[4838]: I1128 09:58:45.681304 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:45 crc kubenswrapper[4838]: I1128 09:58:45.681367 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:45 crc kubenswrapper[4838]: I1128 09:58:45.681389 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:45 crc kubenswrapper[4838]: I1128 09:58:45.681470 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:45 crc kubenswrapper[4838]: I1128 09:58:45.681541 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:45Z","lastTransitionTime":"2025-11-28T09:58:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:45 crc kubenswrapper[4838]: I1128 09:58:45.783995 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:45 crc kubenswrapper[4838]: I1128 09:58:45.784031 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:45 crc kubenswrapper[4838]: I1128 09:58:45.784040 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:45 crc kubenswrapper[4838]: I1128 09:58:45.784052 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:45 crc kubenswrapper[4838]: I1128 09:58:45.784061 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:45Z","lastTransitionTime":"2025-11-28T09:58:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:45 crc kubenswrapper[4838]: I1128 09:58:45.886906 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:45 crc kubenswrapper[4838]: I1128 09:58:45.886981 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:45 crc kubenswrapper[4838]: I1128 09:58:45.887008 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:45 crc kubenswrapper[4838]: I1128 09:58:45.887041 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:45 crc kubenswrapper[4838]: I1128 09:58:45.887064 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:45Z","lastTransitionTime":"2025-11-28T09:58:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:45 crc kubenswrapper[4838]: I1128 09:58:45.990046 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:45 crc kubenswrapper[4838]: I1128 09:58:45.990103 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:45 crc kubenswrapper[4838]: I1128 09:58:45.990120 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:45 crc kubenswrapper[4838]: I1128 09:58:45.990142 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:45 crc kubenswrapper[4838]: I1128 09:58:45.990159 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:45Z","lastTransitionTime":"2025-11-28T09:58:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:46 crc kubenswrapper[4838]: I1128 09:58:46.092679 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:46 crc kubenswrapper[4838]: I1128 09:58:46.092768 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:46 crc kubenswrapper[4838]: I1128 09:58:46.092791 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:46 crc kubenswrapper[4838]: I1128 09:58:46.092823 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:46 crc kubenswrapper[4838]: I1128 09:58:46.092846 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:46Z","lastTransitionTime":"2025-11-28T09:58:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:46 crc kubenswrapper[4838]: I1128 09:58:46.196205 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:46 crc kubenswrapper[4838]: I1128 09:58:46.196273 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:46 crc kubenswrapper[4838]: I1128 09:58:46.196292 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:46 crc kubenswrapper[4838]: I1128 09:58:46.196317 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:46 crc kubenswrapper[4838]: I1128 09:58:46.196334 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:46Z","lastTransitionTime":"2025-11-28T09:58:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:46 crc kubenswrapper[4838]: I1128 09:58:46.298405 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:46 crc kubenswrapper[4838]: I1128 09:58:46.298457 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:46 crc kubenswrapper[4838]: I1128 09:58:46.298470 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:46 crc kubenswrapper[4838]: I1128 09:58:46.298489 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:46 crc kubenswrapper[4838]: I1128 09:58:46.298500 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:46Z","lastTransitionTime":"2025-11-28T09:58:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:46 crc kubenswrapper[4838]: I1128 09:58:46.400941 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:46 crc kubenswrapper[4838]: I1128 09:58:46.400981 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:46 crc kubenswrapper[4838]: I1128 09:58:46.401021 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:46 crc kubenswrapper[4838]: I1128 09:58:46.401039 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:46 crc kubenswrapper[4838]: I1128 09:58:46.401057 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:46Z","lastTransitionTime":"2025-11-28T09:58:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:46 crc kubenswrapper[4838]: I1128 09:58:46.503090 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:46 crc kubenswrapper[4838]: I1128 09:58:46.503147 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:46 crc kubenswrapper[4838]: I1128 09:58:46.503164 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:46 crc kubenswrapper[4838]: I1128 09:58:46.503188 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:46 crc kubenswrapper[4838]: I1128 09:58:46.503206 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:46Z","lastTransitionTime":"2025-11-28T09:58:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:46 crc kubenswrapper[4838]: I1128 09:58:46.561084 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-p69l6" Nov 28 09:58:46 crc kubenswrapper[4838]: E1128 09:58:46.561231 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-p69l6" podUID="2a223cc8-af33-4e83-8bfc-2676c5700447" Nov 28 09:58:46 crc kubenswrapper[4838]: I1128 09:58:46.605709 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:46 crc kubenswrapper[4838]: I1128 09:58:46.605793 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:46 crc kubenswrapper[4838]: I1128 09:58:46.605809 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:46 crc kubenswrapper[4838]: I1128 09:58:46.605829 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:46 crc kubenswrapper[4838]: I1128 09:58:46.605846 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:46Z","lastTransitionTime":"2025-11-28T09:58:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:46 crc kubenswrapper[4838]: I1128 09:58:46.709402 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:46 crc kubenswrapper[4838]: I1128 09:58:46.709503 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:46 crc kubenswrapper[4838]: I1128 09:58:46.709524 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:46 crc kubenswrapper[4838]: I1128 09:58:46.709548 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:46 crc kubenswrapper[4838]: I1128 09:58:46.709564 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:46Z","lastTransitionTime":"2025-11-28T09:58:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:46 crc kubenswrapper[4838]: I1128 09:58:46.812843 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:46 crc kubenswrapper[4838]: I1128 09:58:46.812914 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:46 crc kubenswrapper[4838]: I1128 09:58:46.812936 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:46 crc kubenswrapper[4838]: I1128 09:58:46.812963 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:46 crc kubenswrapper[4838]: I1128 09:58:46.812981 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:46Z","lastTransitionTime":"2025-11-28T09:58:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:46 crc kubenswrapper[4838]: I1128 09:58:46.916264 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:46 crc kubenswrapper[4838]: I1128 09:58:46.916369 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:46 crc kubenswrapper[4838]: I1128 09:58:46.916394 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:46 crc kubenswrapper[4838]: I1128 09:58:46.916433 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:46 crc kubenswrapper[4838]: I1128 09:58:46.916454 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:46Z","lastTransitionTime":"2025-11-28T09:58:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:47 crc kubenswrapper[4838]: I1128 09:58:47.019665 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:47 crc kubenswrapper[4838]: I1128 09:58:47.019758 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:47 crc kubenswrapper[4838]: I1128 09:58:47.019777 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:47 crc kubenswrapper[4838]: I1128 09:58:47.019803 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:47 crc kubenswrapper[4838]: I1128 09:58:47.019825 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:47Z","lastTransitionTime":"2025-11-28T09:58:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:47 crc kubenswrapper[4838]: I1128 09:58:47.124508 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:47 crc kubenswrapper[4838]: I1128 09:58:47.124568 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:47 crc kubenswrapper[4838]: I1128 09:58:47.124584 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:47 crc kubenswrapper[4838]: I1128 09:58:47.124605 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:47 crc kubenswrapper[4838]: I1128 09:58:47.124618 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:47Z","lastTransitionTime":"2025-11-28T09:58:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:47 crc kubenswrapper[4838]: I1128 09:58:47.228366 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:47 crc kubenswrapper[4838]: I1128 09:58:47.228454 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:47 crc kubenswrapper[4838]: I1128 09:58:47.228474 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:47 crc kubenswrapper[4838]: I1128 09:58:47.228504 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:47 crc kubenswrapper[4838]: I1128 09:58:47.228522 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:47Z","lastTransitionTime":"2025-11-28T09:58:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:47 crc kubenswrapper[4838]: I1128 09:58:47.331595 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:47 crc kubenswrapper[4838]: I1128 09:58:47.331642 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:47 crc kubenswrapper[4838]: I1128 09:58:47.331654 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:47 crc kubenswrapper[4838]: I1128 09:58:47.331680 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:47 crc kubenswrapper[4838]: I1128 09:58:47.331698 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:47Z","lastTransitionTime":"2025-11-28T09:58:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:47 crc kubenswrapper[4838]: I1128 09:58:47.435349 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:47 crc kubenswrapper[4838]: I1128 09:58:47.435451 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:47 crc kubenswrapper[4838]: I1128 09:58:47.435462 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:47 crc kubenswrapper[4838]: I1128 09:58:47.435479 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:47 crc kubenswrapper[4838]: I1128 09:58:47.435491 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:47Z","lastTransitionTime":"2025-11-28T09:58:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:47 crc kubenswrapper[4838]: I1128 09:58:47.538377 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:47 crc kubenswrapper[4838]: I1128 09:58:47.538413 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:47 crc kubenswrapper[4838]: I1128 09:58:47.538424 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:47 crc kubenswrapper[4838]: I1128 09:58:47.538441 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:47 crc kubenswrapper[4838]: I1128 09:58:47.538452 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:47Z","lastTransitionTime":"2025-11-28T09:58:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:47 crc kubenswrapper[4838]: I1128 09:58:47.561375 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 09:58:47 crc kubenswrapper[4838]: I1128 09:58:47.561403 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 09:58:47 crc kubenswrapper[4838]: I1128 09:58:47.561436 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 09:58:47 crc kubenswrapper[4838]: E1128 09:58:47.561514 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 09:58:47 crc kubenswrapper[4838]: E1128 09:58:47.561627 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 09:58:47 crc kubenswrapper[4838]: E1128 09:58:47.561691 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 09:58:47 crc kubenswrapper[4838]: I1128 09:58:47.641400 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:47 crc kubenswrapper[4838]: I1128 09:58:47.641954 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:47 crc kubenswrapper[4838]: I1128 09:58:47.642058 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:47 crc kubenswrapper[4838]: I1128 09:58:47.642163 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:47 crc kubenswrapper[4838]: I1128 09:58:47.642254 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:47Z","lastTransitionTime":"2025-11-28T09:58:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:47 crc kubenswrapper[4838]: I1128 09:58:47.745032 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:47 crc kubenswrapper[4838]: I1128 09:58:47.745274 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:47 crc kubenswrapper[4838]: I1128 09:58:47.745504 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:47 crc kubenswrapper[4838]: I1128 09:58:47.745774 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:47 crc kubenswrapper[4838]: I1128 09:58:47.745988 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:47Z","lastTransitionTime":"2025-11-28T09:58:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:47 crc kubenswrapper[4838]: I1128 09:58:47.848813 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:47 crc kubenswrapper[4838]: I1128 09:58:47.849143 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:47 crc kubenswrapper[4838]: I1128 09:58:47.849254 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:47 crc kubenswrapper[4838]: I1128 09:58:47.849433 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:47 crc kubenswrapper[4838]: I1128 09:58:47.849528 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:47Z","lastTransitionTime":"2025-11-28T09:58:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:47 crc kubenswrapper[4838]: I1128 09:58:47.953612 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:47 crc kubenswrapper[4838]: I1128 09:58:47.953995 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:47 crc kubenswrapper[4838]: I1128 09:58:47.954202 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:47 crc kubenswrapper[4838]: I1128 09:58:47.954421 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:47 crc kubenswrapper[4838]: I1128 09:58:47.954634 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:47Z","lastTransitionTime":"2025-11-28T09:58:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:48 crc kubenswrapper[4838]: I1128 09:58:48.058026 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:48 crc kubenswrapper[4838]: I1128 09:58:48.058088 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:48 crc kubenswrapper[4838]: I1128 09:58:48.058104 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:48 crc kubenswrapper[4838]: I1128 09:58:48.058122 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:48 crc kubenswrapper[4838]: I1128 09:58:48.058167 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:48Z","lastTransitionTime":"2025-11-28T09:58:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:48 crc kubenswrapper[4838]: I1128 09:58:48.162237 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:48 crc kubenswrapper[4838]: I1128 09:58:48.162287 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:48 crc kubenswrapper[4838]: I1128 09:58:48.162298 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:48 crc kubenswrapper[4838]: I1128 09:58:48.162315 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:48 crc kubenswrapper[4838]: I1128 09:58:48.162328 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:48Z","lastTransitionTime":"2025-11-28T09:58:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:48 crc kubenswrapper[4838]: I1128 09:58:48.264359 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:48 crc kubenswrapper[4838]: I1128 09:58:48.264433 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:48 crc kubenswrapper[4838]: I1128 09:58:48.264451 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:48 crc kubenswrapper[4838]: I1128 09:58:48.264476 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:48 crc kubenswrapper[4838]: I1128 09:58:48.264494 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:48Z","lastTransitionTime":"2025-11-28T09:58:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:48 crc kubenswrapper[4838]: I1128 09:58:48.366940 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:48 crc kubenswrapper[4838]: I1128 09:58:48.366976 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:48 crc kubenswrapper[4838]: I1128 09:58:48.366984 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:48 crc kubenswrapper[4838]: I1128 09:58:48.366998 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:48 crc kubenswrapper[4838]: I1128 09:58:48.367006 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:48Z","lastTransitionTime":"2025-11-28T09:58:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:48 crc kubenswrapper[4838]: I1128 09:58:48.469323 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:48 crc kubenswrapper[4838]: I1128 09:58:48.469364 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:48 crc kubenswrapper[4838]: I1128 09:58:48.469375 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:48 crc kubenswrapper[4838]: I1128 09:58:48.469394 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:48 crc kubenswrapper[4838]: I1128 09:58:48.469406 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:48Z","lastTransitionTime":"2025-11-28T09:58:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:48 crc kubenswrapper[4838]: I1128 09:58:48.561762 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-p69l6" Nov 28 09:58:48 crc kubenswrapper[4838]: E1128 09:58:48.561921 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-p69l6" podUID="2a223cc8-af33-4e83-8bfc-2676c5700447" Nov 28 09:58:48 crc kubenswrapper[4838]: I1128 09:58:48.575104 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:48 crc kubenswrapper[4838]: I1128 09:58:48.575175 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:48 crc kubenswrapper[4838]: I1128 09:58:48.575192 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:48 crc kubenswrapper[4838]: I1128 09:58:48.575245 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:48 crc kubenswrapper[4838]: I1128 09:58:48.575267 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:48Z","lastTransitionTime":"2025-11-28T09:58:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:48 crc kubenswrapper[4838]: I1128 09:58:48.577350 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"de645de9-a5cd-4075-8bfd-402a619ea73f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://17e965aad7643d62c651c1e652be45bd914cfe3f14a0a6f43e4e4376b4cb7be0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://40b6222c83e4141c5d286efddc1b65ef732c5cadda9cebdd8d9ee114bf2eb533\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://40b6222c83e4141c5d286efddc1b65ef732c5cadda9cebdd8d9ee114bf2eb533\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:08Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:48Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:48 crc kubenswrapper[4838]: I1128 09:58:48.592136 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:48Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:48 crc kubenswrapper[4838]: I1128 09:58:48.608942 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4rv9b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"051f7e1c-2d47-4be9-bbd5-14feec16eb16\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f641ae0677a50fbc7b20fac7bb5c567f031af1b6a06fbe9e965091efa4ee4ec3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-szqtp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4rv9b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:48Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:48 crc kubenswrapper[4838]: I1128 09:58:48.635348 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-tnclp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ebace5c6-6ca4-48ff-9c50-c6b769d599b5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://572204db3ac1da6b232430bf06fb87e94638e151ea0edc2f8b111deb7d82c58c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8pxnw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://efa96991cdc29f4f075bfd7751f620c7b091510ea68d577b243811ddd4140cec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8pxnw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:58:03Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-tnclp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:48Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:48 crc kubenswrapper[4838]: I1128 09:58:48.638115 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:48 crc kubenswrapper[4838]: I1128 09:58:48.638149 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:48 crc kubenswrapper[4838]: I1128 09:58:48.638161 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:48 crc kubenswrapper[4838]: I1128 09:58:48.638178 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:48 crc kubenswrapper[4838]: I1128 09:58:48.638189 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:48Z","lastTransitionTime":"2025-11-28T09:58:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:48 crc kubenswrapper[4838]: I1128 09:58:48.655004 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42be35de-5c72-4cc2-a5e4-fb7872425cdf\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5a39765f9493a3a9454db77d07855624ff5645e9dbd898e6dcb880d7a01a8c42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://426741a23e7b1b9fae6769b581c0d44694f957b0da985476923801395fad082f\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T09:57:46Z\\\",\\\"message\\\":\\\"+ timeout 3m /bin/bash -exuo pipefail -c 'while [ -n \\\\\\\"$(ss -Htanop \\\\\\\\( sport = 10357 \\\\\\\\))\\\\\\\" ]; do sleep 1; done'\\\\n++ ss -Htanop '(' sport = 10357 ')'\\\\n+ '[' -n '' ']'\\\\n+ exec cluster-policy-controller start --config=/etc/kubernetes/static-pod-resources/configmaps/cluster-policy-controller-config/config.yaml --kubeconfig=/etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig --namespace=openshift-kube-controller-manager -v=2\\\\nI1128 09:57:12.554377 1 leaderelection.go:121] The leader election gives 4 retries and allows for 30s of clock skew. The kube-apiserver downtime tolerance is 78s. Worst non-graceful lease acquisition is 2m43s. Worst graceful lease acquisition is {26s}.\\\\nI1128 09:57:12.555793 1 observer_polling.go:159] Starting file observer\\\\nI1128 09:57:12.567187 1 builder.go:298] cluster-policy-controller version 4.18.0-202501230001.p0.g5fd8525.assembly.stream.el9-5fd8525-5fd852525909ce6eab52972ba9ce8fcf56528eb9\\\\nI1128 09:57:12.568976 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.crt::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.key\\\\\\\"\\\\nI1128 09:57:45.000857 1 cmd.go:138] Received SIGTERM or SIGINT signal, shutting down controller.\\\\nF1128 09:57:46.203931 1 cmd.go:179] failed checking apiserver connectivity: client rate limiter Wait returned an error: context deadline exceeded\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:10Z\\\"}},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef54c8995d6bb8e077c6d1c3d796b6f2ce88370b6cadc4f040f590760103320b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fab4ce68cf732b3e6b32f68e84805013d646a9cbd6d5c55ea3d2f41a7f9db83d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2da93de71b5896e3c9ba192df2896b946b1376fefe2a87cf21adb87ea665be04\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:08Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:48Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:48 crc kubenswrapper[4838]: E1128 09:58:48.657745 4838 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:58:48Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:48Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:58:48Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:48Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:58:48Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:48Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:58:48Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:48Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2d884793-8973-45d8-9335-b721f6accbac\\\",\\\"systemUUID\\\":\\\"e14391b4-beaf-4b9f-9de4-e3bbde3f3327\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:48Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:48 crc kubenswrapper[4838]: I1128 09:58:48.661970 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:48 crc kubenswrapper[4838]: I1128 09:58:48.662010 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:48 crc kubenswrapper[4838]: I1128 09:58:48.662022 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:48 crc kubenswrapper[4838]: I1128 09:58:48.662044 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:48 crc kubenswrapper[4838]: I1128 09:58:48.662058 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:48Z","lastTransitionTime":"2025-11-28T09:58:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:48 crc kubenswrapper[4838]: I1128 09:58:48.681868 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1630b1c6-63b5-4481-a711-0485765d37e3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3a04db28528da269759635186b06952f9a3dc4c2b130458354a5bf9ef994db8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://065f0c39a0f1fbdb83a6a758ddd20a4af3ddf96297ce35551b570f5e6c9deb89\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T09:57:35Z\\\",\\\"message\\\":\\\"rpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"192.168.126.11:2379\\\\\\\", ServerName: \\\\\\\"192.168.126.11:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp 192.168.126.11:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:30.193159 13 logging.go:55] [core] [Channel #1 SubChannel #5]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"192.168.126.11:2379\\\\\\\", ServerName: \\\\\\\"192.168.126.11:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp 192.168.126.11:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:30.410371 13 logging.go:55] [core] [Channel #7 SubChannel #8]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"192.168.126.11:2379\\\\\\\", ServerName: \\\\\\\"192.168.126.11:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp 192.168.126.11:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:30.766067 13 logging.go:55] [core] [Channel #7 SubChannel #9]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"localhost:2379\\\\\\\", ServerName: \\\\\\\"localhost:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp [::1]:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:30.792318 13 logging.go:55] [core] [Channel #2 SubChannel #4]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"localhost:2379\\\\\\\", ServerName: \\\\\\\"localhost:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp [::1]:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:34.548830 13 logging.go:55] [core] [Channel #1 SubChannel #6]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"localhost:2379\\\\\\\", ServerName: \\\\\\\"localhost:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: authentication handshake failed: context canceled\\\\\\\"\\\\nE1128 09:57:34.559534 13 run.go:72] \\\\\\\"command failed\\\\\\\" err=\\\\\\\"context deadline exceeded\\\\\\\"\\\\nI1128 09:57:34.572762 1 main.go:235] Termination finished with exit code 1\\\\nI1128 09:57:34.572819 1 main.go:188] Deleting termination lock file \\\\\\\"/var/log/kube-apiserver/.terminating\\\\\\\"\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25d620ea6d7c38547e89d61e7a60f227d28b21f18d563055db47256b266d5b6b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://837cc7d33d601516b4ea56a283f71167b41da7c769070c97ea77f29e97cf1555\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a821e5b105f62cf7f3b12714bfb0f58867a808d1c777f2fb711895c345d8ee9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ad3223a9346861cf1b27af8c95207349f10af6f416380747e32c4faf1d3add4\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T09:57:48Z\\\",\\\"message\\\":\\\"le observer\\\\nW1128 09:57:46.202169 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1128 09:57:46.202478 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 09:57:46.203984 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2688063589/tls.crt::/tmp/serving-cert-2688063589/tls.key\\\\\\\"\\\\nI1128 09:57:46.517349 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 09:57:46.756626 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 09:57:46.756681 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 09:57:46.756806 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 09:57:46.756825 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 09:57:46.768610 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1128 09:57:46.768654 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1128 09:57:46.768660 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 09:57:46.768697 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 09:57:46.768707 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 09:57:46.768714 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 09:57:46.768746 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 09:57:46.768752 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1128 09:57:46.772242 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:40Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c690f38f717fbfbd681f21f5dc845b94601530b4fc0860fdbeb1317042c793b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:18Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b4d4af6fc5dd97d5b6104ef79b62fd241db5659dfdfd496a38536453b207a4e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b4d4af6fc5dd97d5b6104ef79b62fd241db5659dfdfd496a38536453b207a4e5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:08Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:48Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:48 crc kubenswrapper[4838]: E1128 09:58:48.683430 4838 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:58:48Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:48Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:58:48Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:48Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:58:48Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:48Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:58:48Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:48Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2d884793-8973-45d8-9335-b721f6accbac\\\",\\\"systemUUID\\\":\\\"e14391b4-beaf-4b9f-9de4-e3bbde3f3327\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:48Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:48 crc kubenswrapper[4838]: I1128 09:58:48.688613 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:48 crc kubenswrapper[4838]: I1128 09:58:48.688651 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:48 crc kubenswrapper[4838]: I1128 09:58:48.688668 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:48 crc kubenswrapper[4838]: I1128 09:58:48.688691 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:48 crc kubenswrapper[4838]: I1128 09:58:48.688708 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:48Z","lastTransitionTime":"2025-11-28T09:58:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:48 crc kubenswrapper[4838]: I1128 09:58:48.704668 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33d18a643d8d8a58731e53993b40c94bb51ff9c35242f768090dc737cac26503\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:48Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:48 crc kubenswrapper[4838]: E1128 09:58:48.727538 4838 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:58:48Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:48Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:58:48Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:48Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:58:48Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:48Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:58:48Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:48Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2d884793-8973-45d8-9335-b721f6accbac\\\",\\\"systemUUID\\\":\\\"e14391b4-beaf-4b9f-9de4-e3bbde3f3327\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:48Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:48 crc kubenswrapper[4838]: I1128 09:58:48.731509 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:48 crc kubenswrapper[4838]: I1128 09:58:48.731548 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:48 crc kubenswrapper[4838]: I1128 09:58:48.731560 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:48 crc kubenswrapper[4838]: I1128 09:58:48.731578 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:48 crc kubenswrapper[4838]: I1128 09:58:48.731592 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:48Z","lastTransitionTime":"2025-11-28T09:58:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:48 crc kubenswrapper[4838]: E1128 09:58:48.750483 4838 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:58:48Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:48Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:58:48Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:48Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:58:48Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:48Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:58:48Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:48Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2d884793-8973-45d8-9335-b721f6accbac\\\",\\\"systemUUID\\\":\\\"e14391b4-beaf-4b9f-9de4-e3bbde3f3327\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:48Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:48 crc kubenswrapper[4838]: I1128 09:58:48.754710 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gmhsj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"41b01f7d-5c75-49de-86f7-87e04bf71194\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ccbb0df20c7e22308632a5a2d8837d77579134973f3888355be0fe46b4e59aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://87fcac052cf598fe1999137913f0b5ab8c999dad7d8fb512c3b079fd3b49bdaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b1f5c4da04a97ccc1851e901f9a13763e45ff8c7f6aab25bcbb934541ae4a776\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5781261b70d6722e8d28b91441c4241e1b48e6ff71ae3ec8973bde50180fc146\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3a5c0b96cef205139d500d85f27d2c9230ab23a84ffc78f2587a465a7ff25e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://669de13027827632f353811e082cce7cca326651a9bf4820e66504bb59067d72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://973017e2b3a339f98d3439e256810a83b425061d827e74edd127aee640a9c0eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://973017e2b3a339f98d3439e256810a83b425061d827e74edd127aee640a9c0eb\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T09:58:38Z\\\",\\\"message\\\":\\\"l\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1128 09:58:37.612073 6708 services_controller.go:452] Built service openshift-console/downloads per-node LB for network=default: []services.LB{}\\\\nI1128 09:58:37.612094 6708 services_controller.go:452] Built service openshift-machine-api/machine-api-operator-machine-webhook per-node LB for network=default: []services.LB{}\\\\nI1128 09:58:37.612103 6708 services_controller.go:453] Built service openshift-console/downloads template LB for network=default: []services.LB{}\\\\nI1128 09:58:37.612134 6708 services_controller.go:453] Built service openshift-machine-api/machine-api-operator-machine-webhook template LB for network=default: []services.LB{}\\\\nI1128 09:58:37.612147 6708 services_controller.go:454] Service openshift-console/downloads for network=default has 1 cluster-wide, 0 per-node configs, 0 template configs, making 1 (cluster) 0 (per node) and 0 (template) load balancers\\\\nF1128 09:58:37.612098 6708 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node netwo\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:36Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-gmhsj_openshift-ovn-kubernetes(41b01f7d-5c75-49de-86f7-87e04bf71194)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1ae634151347de87041ca338bcea3a34ee0c2330a3c6630a3e342f62beba0ab8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9efead1ac3ae101f3a81e0c5568645b6ac107dc126ff57bc55745b86ffacb730\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9efead1ac3ae101f3a81e0c5568645b6ac107dc126ff57bc55745b86ffacb730\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-gmhsj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:48Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:48 crc kubenswrapper[4838]: I1128 09:58:48.755795 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:48 crc kubenswrapper[4838]: I1128 09:58:48.755834 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:48 crc kubenswrapper[4838]: I1128 09:58:48.755842 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:48 crc kubenswrapper[4838]: I1128 09:58:48.755859 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:48 crc kubenswrapper[4838]: I1128 09:58:48.755868 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:48Z","lastTransitionTime":"2025-11-28T09:58:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:48 crc kubenswrapper[4838]: E1128 09:58:48.772529 4838 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:58:48Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:48Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:58:48Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:48Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:58:48Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:48Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:58:48Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:48Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2d884793-8973-45d8-9335-b721f6accbac\\\",\\\"systemUUID\\\":\\\"e14391b4-beaf-4b9f-9de4-e3bbde3f3327\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:48Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:48 crc kubenswrapper[4838]: E1128 09:58:48.772657 4838 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 28 09:58:48 crc kubenswrapper[4838]: I1128 09:58:48.774060 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:48 crc kubenswrapper[4838]: I1128 09:58:48.774093 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:48 crc kubenswrapper[4838]: I1128 09:58:48.774101 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:48 crc kubenswrapper[4838]: I1128 09:58:48.774115 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:48 crc kubenswrapper[4838]: I1128 09:58:48.774124 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:48Z","lastTransitionTime":"2025-11-28T09:58:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:48 crc kubenswrapper[4838]: I1128 09:58:48.777219 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5c3daa53-8c4e-4e30-aeba-146602dd45cd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d582c5c17a21e943db9e4af274b6cb31d7551e545039aca20a767b35e2ca5040\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9dmrk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5374d5a62ca21176826339023247946593cc1a7bdb4fb39596f12bf598790697\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9dmrk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-5dxdd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:48Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:48 crc kubenswrapper[4838]: I1128 09:58:48.790927 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"faf44892-fdd2-4b45-8772-20049c555d3b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8f8f5484d008289a9e34ceaffd3cf2582565e7265003b0a6a913fe424760fc65\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://496ac21c6d7e650f191b3bc29ab676bab6ba40727c5ac4d517833ef9a115ae07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://698aacf6e94caf2da7095c89716d63d935ff60d95cb91c9a39dfe9282cbba005\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8352058616fa4ed90ca907e547bad2201d9aa696330f1eb8434c3c3d54d71d89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8352058616fa4ed90ca907e547bad2201d9aa696330f1eb8434c3c3d54d71d89\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:09Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:08Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:48Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:48 crc kubenswrapper[4838]: I1128 09:58:48.801536 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:48Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:48 crc kubenswrapper[4838]: I1128 09:58:48.812825 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:48Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:48 crc kubenswrapper[4838]: I1128 09:58:48.825379 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-tj8hl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cbb3c60a-bf9f-4a62-9310-30898e42be4f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e6fe3f1cbc53f02e2556c5fb44cffcf13330c3240a0ff10a8471080466416bd1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c2jkq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:54Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-tj8hl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:48Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:48 crc kubenswrapper[4838]: I1128 09:58:48.837735 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://05cb23094534a9fdebbe33d8d34a79412ee49437c25e185c6dfa99384cbf9629\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:48Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:48 crc kubenswrapper[4838]: I1128 09:58:48.848893 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3968fb11df6b2265691177838400dcb08e03d330c166dd880b3acfac7ec7938f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e91940e5398321649eac2960a545bb4bbe047113c81f152aa55894cabee55006\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:48Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:48 crc kubenswrapper[4838]: I1128 09:58:48.868461 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-58mh7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f556bd7-3b15-4d7d-b8e2-4815bb5c9c7d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://658172db64e44d752eb06fe2788dda717db3fc2e672b073a2bdf159a16fd901f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0a0fbfb7a81acf63b4deabef68d55dd843092bce1c00c27c127955417bede44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f0a0fbfb7a81acf63b4deabef68d55dd843092bce1c00c27c127955417bede44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dea8cf8adbb0c946731fdc71c2b0d80ace50b919cf9c776eb70cb6ff36529401\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dea8cf8adbb0c946731fdc71c2b0d80ace50b919cf9c776eb70cb6ff36529401\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f717c0691a541e3a8310cd520a96e92144902511c74835a5ef8ba9536cd65657\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f717c0691a541e3a8310cd520a96e92144902511c74835a5ef8ba9536cd65657\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b8b67d281ed50a87ac086c5ce0bcc082c30a41d80074b638340326c334fa4f0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b8b67d281ed50a87ac086c5ce0bcc082c30a41d80074b638340326c334fa4f0c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4ac7e0a6a8340f45399bb4b576bbcd90d4eee54c571e6d64846c521486a2607\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f4ac7e0a6a8340f45399bb4b576bbcd90d4eee54c571e6d64846c521486a2607\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e3e1a16aa714e8c2b82811d71b1845942712cad84d7dd465c02a3c4478419af\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0e3e1a16aa714e8c2b82811d71b1845942712cad84d7dd465c02a3c4478419af\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-58mh7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:48Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:48 crc kubenswrapper[4838]: I1128 09:58:48.876286 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:48 crc kubenswrapper[4838]: I1128 09:58:48.876602 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:48 crc kubenswrapper[4838]: I1128 09:58:48.876699 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:48 crc kubenswrapper[4838]: I1128 09:58:48.876829 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:48 crc kubenswrapper[4838]: I1128 09:58:48.876912 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:48Z","lastTransitionTime":"2025-11-28T09:58:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:48 crc kubenswrapper[4838]: I1128 09:58:48.881048 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-sft2b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"200cdb91-cc86-40be-a5b6-30f7b9beba6d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c4f94520c534ecc5a5a92d18b6047c6df131fa7d2a9b8712c021a74c3ffc18ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lpt6k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-sft2b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:48Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:48 crc kubenswrapper[4838]: I1128 09:58:48.900280 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-p69l6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2a223cc8-af33-4e83-8bfc-2676c5700447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h25xx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h25xx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:58:05Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-p69l6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:48Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:48 crc kubenswrapper[4838]: I1128 09:58:48.979404 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:48 crc kubenswrapper[4838]: I1128 09:58:48.979476 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:48 crc kubenswrapper[4838]: I1128 09:58:48.979495 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:48 crc kubenswrapper[4838]: I1128 09:58:48.979524 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:48 crc kubenswrapper[4838]: I1128 09:58:48.979543 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:48Z","lastTransitionTime":"2025-11-28T09:58:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:49 crc kubenswrapper[4838]: I1128 09:58:49.081975 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:49 crc kubenswrapper[4838]: I1128 09:58:49.082031 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:49 crc kubenswrapper[4838]: I1128 09:58:49.082047 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:49 crc kubenswrapper[4838]: I1128 09:58:49.082070 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:49 crc kubenswrapper[4838]: I1128 09:58:49.082086 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:49Z","lastTransitionTime":"2025-11-28T09:58:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:49 crc kubenswrapper[4838]: I1128 09:58:49.185467 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:49 crc kubenswrapper[4838]: I1128 09:58:49.185510 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:49 crc kubenswrapper[4838]: I1128 09:58:49.185523 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:49 crc kubenswrapper[4838]: I1128 09:58:49.185541 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:49 crc kubenswrapper[4838]: I1128 09:58:49.185555 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:49Z","lastTransitionTime":"2025-11-28T09:58:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:49 crc kubenswrapper[4838]: I1128 09:58:49.288946 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:49 crc kubenswrapper[4838]: I1128 09:58:49.289003 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:49 crc kubenswrapper[4838]: I1128 09:58:49.289014 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:49 crc kubenswrapper[4838]: I1128 09:58:49.289032 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:49 crc kubenswrapper[4838]: I1128 09:58:49.289047 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:49Z","lastTransitionTime":"2025-11-28T09:58:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:49 crc kubenswrapper[4838]: I1128 09:58:49.393383 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:49 crc kubenswrapper[4838]: I1128 09:58:49.393443 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:49 crc kubenswrapper[4838]: I1128 09:58:49.393455 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:49 crc kubenswrapper[4838]: I1128 09:58:49.393473 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:49 crc kubenswrapper[4838]: I1128 09:58:49.393491 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:49Z","lastTransitionTime":"2025-11-28T09:58:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:49 crc kubenswrapper[4838]: I1128 09:58:49.496905 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:49 crc kubenswrapper[4838]: I1128 09:58:49.496961 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:49 crc kubenswrapper[4838]: I1128 09:58:49.496973 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:49 crc kubenswrapper[4838]: I1128 09:58:49.496997 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:49 crc kubenswrapper[4838]: I1128 09:58:49.497011 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:49Z","lastTransitionTime":"2025-11-28T09:58:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:49 crc kubenswrapper[4838]: I1128 09:58:49.561360 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 09:58:49 crc kubenswrapper[4838]: I1128 09:58:49.561429 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 09:58:49 crc kubenswrapper[4838]: I1128 09:58:49.561493 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 09:58:49 crc kubenswrapper[4838]: E1128 09:58:49.561548 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 09:58:49 crc kubenswrapper[4838]: E1128 09:58:49.561682 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 09:58:49 crc kubenswrapper[4838]: E1128 09:58:49.561899 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 09:58:49 crc kubenswrapper[4838]: I1128 09:58:49.600396 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:49 crc kubenswrapper[4838]: I1128 09:58:49.600444 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:49 crc kubenswrapper[4838]: I1128 09:58:49.600454 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:49 crc kubenswrapper[4838]: I1128 09:58:49.600471 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:49 crc kubenswrapper[4838]: I1128 09:58:49.600483 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:49Z","lastTransitionTime":"2025-11-28T09:58:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:49 crc kubenswrapper[4838]: I1128 09:58:49.704591 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:49 crc kubenswrapper[4838]: I1128 09:58:49.704639 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:49 crc kubenswrapper[4838]: I1128 09:58:49.704655 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:49 crc kubenswrapper[4838]: I1128 09:58:49.704678 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:49 crc kubenswrapper[4838]: I1128 09:58:49.704695 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:49Z","lastTransitionTime":"2025-11-28T09:58:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:49 crc kubenswrapper[4838]: I1128 09:58:49.807638 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:49 crc kubenswrapper[4838]: I1128 09:58:49.807699 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:49 crc kubenswrapper[4838]: I1128 09:58:49.807739 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:49 crc kubenswrapper[4838]: I1128 09:58:49.807765 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:49 crc kubenswrapper[4838]: I1128 09:58:49.807784 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:49Z","lastTransitionTime":"2025-11-28T09:58:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:49 crc kubenswrapper[4838]: I1128 09:58:49.910683 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:49 crc kubenswrapper[4838]: I1128 09:58:49.911027 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:49 crc kubenswrapper[4838]: I1128 09:58:49.911118 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:49 crc kubenswrapper[4838]: I1128 09:58:49.911215 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:49 crc kubenswrapper[4838]: I1128 09:58:49.911295 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:49Z","lastTransitionTime":"2025-11-28T09:58:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:50 crc kubenswrapper[4838]: I1128 09:58:50.014263 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:50 crc kubenswrapper[4838]: I1128 09:58:50.014555 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:50 crc kubenswrapper[4838]: I1128 09:58:50.014657 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:50 crc kubenswrapper[4838]: I1128 09:58:50.014788 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:50 crc kubenswrapper[4838]: I1128 09:58:50.014885 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:50Z","lastTransitionTime":"2025-11-28T09:58:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:50 crc kubenswrapper[4838]: I1128 09:58:50.118027 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:50 crc kubenswrapper[4838]: I1128 09:58:50.118084 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:50 crc kubenswrapper[4838]: I1128 09:58:50.118103 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:50 crc kubenswrapper[4838]: I1128 09:58:50.118128 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:50 crc kubenswrapper[4838]: I1128 09:58:50.118147 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:50Z","lastTransitionTime":"2025-11-28T09:58:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:50 crc kubenswrapper[4838]: I1128 09:58:50.221373 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:50 crc kubenswrapper[4838]: I1128 09:58:50.221656 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:50 crc kubenswrapper[4838]: I1128 09:58:50.221761 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:50 crc kubenswrapper[4838]: I1128 09:58:50.221864 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:50 crc kubenswrapper[4838]: I1128 09:58:50.221987 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:50Z","lastTransitionTime":"2025-11-28T09:58:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:50 crc kubenswrapper[4838]: I1128 09:58:50.324979 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:50 crc kubenswrapper[4838]: I1128 09:58:50.325898 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:50 crc kubenswrapper[4838]: I1128 09:58:50.326129 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:50 crc kubenswrapper[4838]: I1128 09:58:50.326243 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:50 crc kubenswrapper[4838]: I1128 09:58:50.326395 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:50Z","lastTransitionTime":"2025-11-28T09:58:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:50 crc kubenswrapper[4838]: I1128 09:58:50.429567 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:50 crc kubenswrapper[4838]: I1128 09:58:50.429617 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:50 crc kubenswrapper[4838]: I1128 09:58:50.429630 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:50 crc kubenswrapper[4838]: I1128 09:58:50.429651 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:50 crc kubenswrapper[4838]: I1128 09:58:50.429666 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:50Z","lastTransitionTime":"2025-11-28T09:58:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:50 crc kubenswrapper[4838]: I1128 09:58:50.534017 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:50 crc kubenswrapper[4838]: I1128 09:58:50.534313 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:50 crc kubenswrapper[4838]: I1128 09:58:50.534400 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:50 crc kubenswrapper[4838]: I1128 09:58:50.534505 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:50 crc kubenswrapper[4838]: I1128 09:58:50.534584 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:50Z","lastTransitionTime":"2025-11-28T09:58:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:50 crc kubenswrapper[4838]: I1128 09:58:50.561488 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-p69l6" Nov 28 09:58:50 crc kubenswrapper[4838]: E1128 09:58:50.562259 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-p69l6" podUID="2a223cc8-af33-4e83-8bfc-2676c5700447" Nov 28 09:58:50 crc kubenswrapper[4838]: I1128 09:58:50.562781 4838 scope.go:117] "RemoveContainer" containerID="973017e2b3a339f98d3439e256810a83b425061d827e74edd127aee640a9c0eb" Nov 28 09:58:50 crc kubenswrapper[4838]: E1128 09:58:50.563217 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-gmhsj_openshift-ovn-kubernetes(41b01f7d-5c75-49de-86f7-87e04bf71194)\"" pod="openshift-ovn-kubernetes/ovnkube-node-gmhsj" podUID="41b01f7d-5c75-49de-86f7-87e04bf71194" Nov 28 09:58:50 crc kubenswrapper[4838]: I1128 09:58:50.637706 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:50 crc kubenswrapper[4838]: I1128 09:58:50.637798 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:50 crc kubenswrapper[4838]: I1128 09:58:50.637814 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:50 crc kubenswrapper[4838]: I1128 09:58:50.637837 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:50 crc kubenswrapper[4838]: I1128 09:58:50.637853 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:50Z","lastTransitionTime":"2025-11-28T09:58:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:50 crc kubenswrapper[4838]: I1128 09:58:50.740189 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:50 crc kubenswrapper[4838]: I1128 09:58:50.740249 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:50 crc kubenswrapper[4838]: I1128 09:58:50.740267 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:50 crc kubenswrapper[4838]: I1128 09:58:50.740290 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:50 crc kubenswrapper[4838]: I1128 09:58:50.740305 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:50Z","lastTransitionTime":"2025-11-28T09:58:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:50 crc kubenswrapper[4838]: I1128 09:58:50.843401 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:50 crc kubenswrapper[4838]: I1128 09:58:50.843461 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:50 crc kubenswrapper[4838]: I1128 09:58:50.843478 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:50 crc kubenswrapper[4838]: I1128 09:58:50.843500 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:50 crc kubenswrapper[4838]: I1128 09:58:50.843517 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:50Z","lastTransitionTime":"2025-11-28T09:58:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:50 crc kubenswrapper[4838]: I1128 09:58:50.947287 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:50 crc kubenswrapper[4838]: I1128 09:58:50.947335 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:50 crc kubenswrapper[4838]: I1128 09:58:50.947345 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:50 crc kubenswrapper[4838]: I1128 09:58:50.947364 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:50 crc kubenswrapper[4838]: I1128 09:58:50.947377 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:50Z","lastTransitionTime":"2025-11-28T09:58:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:51 crc kubenswrapper[4838]: I1128 09:58:51.050742 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:51 crc kubenswrapper[4838]: I1128 09:58:51.050839 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:51 crc kubenswrapper[4838]: I1128 09:58:51.050854 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:51 crc kubenswrapper[4838]: I1128 09:58:51.051367 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:51 crc kubenswrapper[4838]: I1128 09:58:51.051413 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:51Z","lastTransitionTime":"2025-11-28T09:58:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:51 crc kubenswrapper[4838]: I1128 09:58:51.154230 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:51 crc kubenswrapper[4838]: I1128 09:58:51.154279 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:51 crc kubenswrapper[4838]: I1128 09:58:51.154293 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:51 crc kubenswrapper[4838]: I1128 09:58:51.154311 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:51 crc kubenswrapper[4838]: I1128 09:58:51.154323 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:51Z","lastTransitionTime":"2025-11-28T09:58:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:51 crc kubenswrapper[4838]: I1128 09:58:51.257700 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:51 crc kubenswrapper[4838]: I1128 09:58:51.257782 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:51 crc kubenswrapper[4838]: I1128 09:58:51.257799 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:51 crc kubenswrapper[4838]: I1128 09:58:51.257819 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:51 crc kubenswrapper[4838]: I1128 09:58:51.257832 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:51Z","lastTransitionTime":"2025-11-28T09:58:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:51 crc kubenswrapper[4838]: I1128 09:58:51.360773 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:51 crc kubenswrapper[4838]: I1128 09:58:51.360833 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:51 crc kubenswrapper[4838]: I1128 09:58:51.360845 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:51 crc kubenswrapper[4838]: I1128 09:58:51.360863 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:51 crc kubenswrapper[4838]: I1128 09:58:51.360873 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:51Z","lastTransitionTime":"2025-11-28T09:58:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:51 crc kubenswrapper[4838]: I1128 09:58:51.463604 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:51 crc kubenswrapper[4838]: I1128 09:58:51.463656 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:51 crc kubenswrapper[4838]: I1128 09:58:51.463665 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:51 crc kubenswrapper[4838]: I1128 09:58:51.463685 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:51 crc kubenswrapper[4838]: I1128 09:58:51.463694 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:51Z","lastTransitionTime":"2025-11-28T09:58:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:51 crc kubenswrapper[4838]: I1128 09:58:51.561551 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 09:58:51 crc kubenswrapper[4838]: I1128 09:58:51.561626 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 09:58:51 crc kubenswrapper[4838]: I1128 09:58:51.561648 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 09:58:51 crc kubenswrapper[4838]: E1128 09:58:51.561764 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 09:58:51 crc kubenswrapper[4838]: E1128 09:58:51.561829 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 09:58:51 crc kubenswrapper[4838]: E1128 09:58:51.561920 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 09:58:51 crc kubenswrapper[4838]: I1128 09:58:51.571057 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:51 crc kubenswrapper[4838]: I1128 09:58:51.571142 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:51 crc kubenswrapper[4838]: I1128 09:58:51.571160 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:51 crc kubenswrapper[4838]: I1128 09:58:51.571216 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:51 crc kubenswrapper[4838]: I1128 09:58:51.571234 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:51Z","lastTransitionTime":"2025-11-28T09:58:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:51 crc kubenswrapper[4838]: I1128 09:58:51.673602 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:51 crc kubenswrapper[4838]: I1128 09:58:51.673662 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:51 crc kubenswrapper[4838]: I1128 09:58:51.673681 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:51 crc kubenswrapper[4838]: I1128 09:58:51.673705 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:51 crc kubenswrapper[4838]: I1128 09:58:51.673753 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:51Z","lastTransitionTime":"2025-11-28T09:58:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:51 crc kubenswrapper[4838]: I1128 09:58:51.777992 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:51 crc kubenswrapper[4838]: I1128 09:58:51.778058 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:51 crc kubenswrapper[4838]: I1128 09:58:51.778075 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:51 crc kubenswrapper[4838]: I1128 09:58:51.778100 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:51 crc kubenswrapper[4838]: I1128 09:58:51.778118 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:51Z","lastTransitionTime":"2025-11-28T09:58:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:51 crc kubenswrapper[4838]: I1128 09:58:51.880919 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:51 crc kubenswrapper[4838]: I1128 09:58:51.880976 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:51 crc kubenswrapper[4838]: I1128 09:58:51.880992 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:51 crc kubenswrapper[4838]: I1128 09:58:51.881022 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:51 crc kubenswrapper[4838]: I1128 09:58:51.881040 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:51Z","lastTransitionTime":"2025-11-28T09:58:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:51 crc kubenswrapper[4838]: I1128 09:58:51.984092 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:51 crc kubenswrapper[4838]: I1128 09:58:51.984147 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:51 crc kubenswrapper[4838]: I1128 09:58:51.984165 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:51 crc kubenswrapper[4838]: I1128 09:58:51.984189 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:51 crc kubenswrapper[4838]: I1128 09:58:51.984206 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:51Z","lastTransitionTime":"2025-11-28T09:58:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:52 crc kubenswrapper[4838]: I1128 09:58:52.087307 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:52 crc kubenswrapper[4838]: I1128 09:58:52.087364 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:52 crc kubenswrapper[4838]: I1128 09:58:52.087379 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:52 crc kubenswrapper[4838]: I1128 09:58:52.087404 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:52 crc kubenswrapper[4838]: I1128 09:58:52.087456 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:52Z","lastTransitionTime":"2025-11-28T09:58:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:52 crc kubenswrapper[4838]: I1128 09:58:52.189764 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:52 crc kubenswrapper[4838]: I1128 09:58:52.189818 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:52 crc kubenswrapper[4838]: I1128 09:58:52.189833 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:52 crc kubenswrapper[4838]: I1128 09:58:52.189857 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:52 crc kubenswrapper[4838]: I1128 09:58:52.189874 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:52Z","lastTransitionTime":"2025-11-28T09:58:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:52 crc kubenswrapper[4838]: I1128 09:58:52.292535 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:52 crc kubenswrapper[4838]: I1128 09:58:52.292603 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:52 crc kubenswrapper[4838]: I1128 09:58:52.292618 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:52 crc kubenswrapper[4838]: I1128 09:58:52.292635 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:52 crc kubenswrapper[4838]: I1128 09:58:52.292672 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:52Z","lastTransitionTime":"2025-11-28T09:58:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:52 crc kubenswrapper[4838]: I1128 09:58:52.319406 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-4rv9b_051f7e1c-2d47-4be9-bbd5-14feec16eb16/kube-multus/0.log" Nov 28 09:58:52 crc kubenswrapper[4838]: I1128 09:58:52.319464 4838 generic.go:334] "Generic (PLEG): container finished" podID="051f7e1c-2d47-4be9-bbd5-14feec16eb16" containerID="f641ae0677a50fbc7b20fac7bb5c567f031af1b6a06fbe9e965091efa4ee4ec3" exitCode=1 Nov 28 09:58:52 crc kubenswrapper[4838]: I1128 09:58:52.319504 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-4rv9b" event={"ID":"051f7e1c-2d47-4be9-bbd5-14feec16eb16","Type":"ContainerDied","Data":"f641ae0677a50fbc7b20fac7bb5c567f031af1b6a06fbe9e965091efa4ee4ec3"} Nov 28 09:58:52 crc kubenswrapper[4838]: I1128 09:58:52.320009 4838 scope.go:117] "RemoveContainer" containerID="f641ae0677a50fbc7b20fac7bb5c567f031af1b6a06fbe9e965091efa4ee4ec3" Nov 28 09:58:52 crc kubenswrapper[4838]: I1128 09:58:52.338859 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:52Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:52 crc kubenswrapper[4838]: I1128 09:58:52.350074 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-tj8hl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cbb3c60a-bf9f-4a62-9310-30898e42be4f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e6fe3f1cbc53f02e2556c5fb44cffcf13330c3240a0ff10a8471080466416bd1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c2jkq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:54Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-tj8hl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:52Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:52 crc kubenswrapper[4838]: I1128 09:58:52.364066 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"faf44892-fdd2-4b45-8772-20049c555d3b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8f8f5484d008289a9e34ceaffd3cf2582565e7265003b0a6a913fe424760fc65\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://496ac21c6d7e650f191b3bc29ab676bab6ba40727c5ac4d517833ef9a115ae07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://698aacf6e94caf2da7095c89716d63d935ff60d95cb91c9a39dfe9282cbba005\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8352058616fa4ed90ca907e547bad2201d9aa696330f1eb8434c3c3d54d71d89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8352058616fa4ed90ca907e547bad2201d9aa696330f1eb8434c3c3d54d71d89\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:09Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:08Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:52Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:52 crc kubenswrapper[4838]: I1128 09:58:52.378537 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:52Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:52 crc kubenswrapper[4838]: I1128 09:58:52.391603 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-sft2b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"200cdb91-cc86-40be-a5b6-30f7b9beba6d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c4f94520c534ecc5a5a92d18b6047c6df131fa7d2a9b8712c021a74c3ffc18ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lpt6k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-sft2b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:52Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:52 crc kubenswrapper[4838]: I1128 09:58:52.395423 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:52 crc kubenswrapper[4838]: I1128 09:58:52.395460 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:52 crc kubenswrapper[4838]: I1128 09:58:52.395474 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:52 crc kubenswrapper[4838]: I1128 09:58:52.395495 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:52 crc kubenswrapper[4838]: I1128 09:58:52.395508 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:52Z","lastTransitionTime":"2025-11-28T09:58:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:52 crc kubenswrapper[4838]: I1128 09:58:52.401861 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-p69l6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2a223cc8-af33-4e83-8bfc-2676c5700447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h25xx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h25xx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:58:05Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-p69l6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:52Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:52 crc kubenswrapper[4838]: I1128 09:58:52.417946 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://05cb23094534a9fdebbe33d8d34a79412ee49437c25e185c6dfa99384cbf9629\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:52Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:52 crc kubenswrapper[4838]: I1128 09:58:52.432536 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3968fb11df6b2265691177838400dcb08e03d330c166dd880b3acfac7ec7938f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e91940e5398321649eac2960a545bb4bbe047113c81f152aa55894cabee55006\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:52Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:52 crc kubenswrapper[4838]: I1128 09:58:52.448182 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-58mh7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f556bd7-3b15-4d7d-b8e2-4815bb5c9c7d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://658172db64e44d752eb06fe2788dda717db3fc2e672b073a2bdf159a16fd901f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0a0fbfb7a81acf63b4deabef68d55dd843092bce1c00c27c127955417bede44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f0a0fbfb7a81acf63b4deabef68d55dd843092bce1c00c27c127955417bede44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dea8cf8adbb0c946731fdc71c2b0d80ace50b919cf9c776eb70cb6ff36529401\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dea8cf8adbb0c946731fdc71c2b0d80ace50b919cf9c776eb70cb6ff36529401\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f717c0691a541e3a8310cd520a96e92144902511c74835a5ef8ba9536cd65657\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f717c0691a541e3a8310cd520a96e92144902511c74835a5ef8ba9536cd65657\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b8b67d281ed50a87ac086c5ce0bcc082c30a41d80074b638340326c334fa4f0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b8b67d281ed50a87ac086c5ce0bcc082c30a41d80074b638340326c334fa4f0c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4ac7e0a6a8340f45399bb4b576bbcd90d4eee54c571e6d64846c521486a2607\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f4ac7e0a6a8340f45399bb4b576bbcd90d4eee54c571e6d64846c521486a2607\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e3e1a16aa714e8c2b82811d71b1845942712cad84d7dd465c02a3c4478419af\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0e3e1a16aa714e8c2b82811d71b1845942712cad84d7dd465c02a3c4478419af\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-58mh7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:52Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:52 crc kubenswrapper[4838]: I1128 09:58:52.458064 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-tnclp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ebace5c6-6ca4-48ff-9c50-c6b769d599b5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://572204db3ac1da6b232430bf06fb87e94638e151ea0edc2f8b111deb7d82c58c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8pxnw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://efa96991cdc29f4f075bfd7751f620c7b091510ea68d577b243811ddd4140cec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8pxnw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:58:03Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-tnclp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:52Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:52 crc kubenswrapper[4838]: I1128 09:58:52.467621 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"de645de9-a5cd-4075-8bfd-402a619ea73f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://17e965aad7643d62c651c1e652be45bd914cfe3f14a0a6f43e4e4376b4cb7be0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://40b6222c83e4141c5d286efddc1b65ef732c5cadda9cebdd8d9ee114bf2eb533\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://40b6222c83e4141c5d286efddc1b65ef732c5cadda9cebdd8d9ee114bf2eb533\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:08Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:52Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:52 crc kubenswrapper[4838]: I1128 09:58:52.479701 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:52Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:52 crc kubenswrapper[4838]: I1128 09:58:52.491955 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4rv9b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"051f7e1c-2d47-4be9-bbd5-14feec16eb16\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f641ae0677a50fbc7b20fac7bb5c567f031af1b6a06fbe9e965091efa4ee4ec3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f641ae0677a50fbc7b20fac7bb5c567f031af1b6a06fbe9e965091efa4ee4ec3\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T09:58:51Z\\\",\\\"message\\\":\\\"2025-11-28T09:58:06+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_42560742-ad80-4917-aa9d-aab5fda94562\\\\n2025-11-28T09:58:06+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_42560742-ad80-4917-aa9d-aab5fda94562 to /host/opt/cni/bin/\\\\n2025-11-28T09:58:06Z [verbose] multus-daemon started\\\\n2025-11-28T09:58:06Z [verbose] Readiness Indicator file check\\\\n2025-11-28T09:58:51Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-szqtp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4rv9b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:52Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:52 crc kubenswrapper[4838]: I1128 09:58:52.498014 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:52 crc kubenswrapper[4838]: I1128 09:58:52.498238 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:52 crc kubenswrapper[4838]: I1128 09:58:52.498332 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:52 crc kubenswrapper[4838]: I1128 09:58:52.498427 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:52 crc kubenswrapper[4838]: I1128 09:58:52.498503 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:52Z","lastTransitionTime":"2025-11-28T09:58:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:52 crc kubenswrapper[4838]: I1128 09:58:52.515203 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gmhsj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"41b01f7d-5c75-49de-86f7-87e04bf71194\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ccbb0df20c7e22308632a5a2d8837d77579134973f3888355be0fe46b4e59aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://87fcac052cf598fe1999137913f0b5ab8c999dad7d8fb512c3b079fd3b49bdaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b1f5c4da04a97ccc1851e901f9a13763e45ff8c7f6aab25bcbb934541ae4a776\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5781261b70d6722e8d28b91441c4241e1b48e6ff71ae3ec8973bde50180fc146\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3a5c0b96cef205139d500d85f27d2c9230ab23a84ffc78f2587a465a7ff25e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://669de13027827632f353811e082cce7cca326651a9bf4820e66504bb59067d72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://973017e2b3a339f98d3439e256810a83b425061d827e74edd127aee640a9c0eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://973017e2b3a339f98d3439e256810a83b425061d827e74edd127aee640a9c0eb\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T09:58:38Z\\\",\\\"message\\\":\\\"l\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1128 09:58:37.612073 6708 services_controller.go:452] Built service openshift-console/downloads per-node LB for network=default: []services.LB{}\\\\nI1128 09:58:37.612094 6708 services_controller.go:452] Built service openshift-machine-api/machine-api-operator-machine-webhook per-node LB for network=default: []services.LB{}\\\\nI1128 09:58:37.612103 6708 services_controller.go:453] Built service openshift-console/downloads template LB for network=default: []services.LB{}\\\\nI1128 09:58:37.612134 6708 services_controller.go:453] Built service openshift-machine-api/machine-api-operator-machine-webhook template LB for network=default: []services.LB{}\\\\nI1128 09:58:37.612147 6708 services_controller.go:454] Service openshift-console/downloads for network=default has 1 cluster-wide, 0 per-node configs, 0 template configs, making 1 (cluster) 0 (per node) and 0 (template) load balancers\\\\nF1128 09:58:37.612098 6708 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node netwo\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:36Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-gmhsj_openshift-ovn-kubernetes(41b01f7d-5c75-49de-86f7-87e04bf71194)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1ae634151347de87041ca338bcea3a34ee0c2330a3c6630a3e342f62beba0ab8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9efead1ac3ae101f3a81e0c5568645b6ac107dc126ff57bc55745b86ffacb730\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9efead1ac3ae101f3a81e0c5568645b6ac107dc126ff57bc55745b86ffacb730\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-gmhsj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:52Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:52 crc kubenswrapper[4838]: I1128 09:58:52.528119 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5c3daa53-8c4e-4e30-aeba-146602dd45cd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d582c5c17a21e943db9e4af274b6cb31d7551e545039aca20a767b35e2ca5040\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9dmrk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5374d5a62ca21176826339023247946593cc1a7bdb4fb39596f12bf598790697\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9dmrk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-5dxdd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:52Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:52 crc kubenswrapper[4838]: I1128 09:58:52.542455 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42be35de-5c72-4cc2-a5e4-fb7872425cdf\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5a39765f9493a3a9454db77d07855624ff5645e9dbd898e6dcb880d7a01a8c42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://426741a23e7b1b9fae6769b581c0d44694f957b0da985476923801395fad082f\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T09:57:46Z\\\",\\\"message\\\":\\\"+ timeout 3m /bin/bash -exuo pipefail -c 'while [ -n \\\\\\\"$(ss -Htanop \\\\\\\\( sport = 10357 \\\\\\\\))\\\\\\\" ]; do sleep 1; done'\\\\n++ ss -Htanop '(' sport = 10357 ')'\\\\n+ '[' -n '' ']'\\\\n+ exec cluster-policy-controller start --config=/etc/kubernetes/static-pod-resources/configmaps/cluster-policy-controller-config/config.yaml --kubeconfig=/etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig --namespace=openshift-kube-controller-manager -v=2\\\\nI1128 09:57:12.554377 1 leaderelection.go:121] The leader election gives 4 retries and allows for 30s of clock skew. The kube-apiserver downtime tolerance is 78s. Worst non-graceful lease acquisition is 2m43s. Worst graceful lease acquisition is {26s}.\\\\nI1128 09:57:12.555793 1 observer_polling.go:159] Starting file observer\\\\nI1128 09:57:12.567187 1 builder.go:298] cluster-policy-controller version 4.18.0-202501230001.p0.g5fd8525.assembly.stream.el9-5fd8525-5fd852525909ce6eab52972ba9ce8fcf56528eb9\\\\nI1128 09:57:12.568976 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.crt::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.key\\\\\\\"\\\\nI1128 09:57:45.000857 1 cmd.go:138] Received SIGTERM or SIGINT signal, shutting down controller.\\\\nF1128 09:57:46.203931 1 cmd.go:179] failed checking apiserver connectivity: client rate limiter Wait returned an error: context deadline exceeded\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:10Z\\\"}},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef54c8995d6bb8e077c6d1c3d796b6f2ce88370b6cadc4f040f590760103320b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fab4ce68cf732b3e6b32f68e84805013d646a9cbd6d5c55ea3d2f41a7f9db83d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2da93de71b5896e3c9ba192df2896b946b1376fefe2a87cf21adb87ea665be04\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:08Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:52Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:52 crc kubenswrapper[4838]: I1128 09:58:52.556113 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1630b1c6-63b5-4481-a711-0485765d37e3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3a04db28528da269759635186b06952f9a3dc4c2b130458354a5bf9ef994db8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://065f0c39a0f1fbdb83a6a758ddd20a4af3ddf96297ce35551b570f5e6c9deb89\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T09:57:35Z\\\",\\\"message\\\":\\\"rpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"192.168.126.11:2379\\\\\\\", ServerName: \\\\\\\"192.168.126.11:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp 192.168.126.11:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:30.193159 13 logging.go:55] [core] [Channel #1 SubChannel #5]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"192.168.126.11:2379\\\\\\\", ServerName: \\\\\\\"192.168.126.11:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp 192.168.126.11:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:30.410371 13 logging.go:55] [core] [Channel #7 SubChannel #8]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"192.168.126.11:2379\\\\\\\", ServerName: \\\\\\\"192.168.126.11:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp 192.168.126.11:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:30.766067 13 logging.go:55] [core] [Channel #7 SubChannel #9]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"localhost:2379\\\\\\\", ServerName: \\\\\\\"localhost:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp [::1]:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:30.792318 13 logging.go:55] [core] [Channel #2 SubChannel #4]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"localhost:2379\\\\\\\", ServerName: \\\\\\\"localhost:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp [::1]:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:34.548830 13 logging.go:55] [core] [Channel #1 SubChannel #6]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"localhost:2379\\\\\\\", ServerName: \\\\\\\"localhost:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: authentication handshake failed: context canceled\\\\\\\"\\\\nE1128 09:57:34.559534 13 run.go:72] \\\\\\\"command failed\\\\\\\" err=\\\\\\\"context deadline exceeded\\\\\\\"\\\\nI1128 09:57:34.572762 1 main.go:235] Termination finished with exit code 1\\\\nI1128 09:57:34.572819 1 main.go:188] Deleting termination lock file \\\\\\\"/var/log/kube-apiserver/.terminating\\\\\\\"\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25d620ea6d7c38547e89d61e7a60f227d28b21f18d563055db47256b266d5b6b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://837cc7d33d601516b4ea56a283f71167b41da7c769070c97ea77f29e97cf1555\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a821e5b105f62cf7f3b12714bfb0f58867a808d1c777f2fb711895c345d8ee9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ad3223a9346861cf1b27af8c95207349f10af6f416380747e32c4faf1d3add4\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T09:57:48Z\\\",\\\"message\\\":\\\"le observer\\\\nW1128 09:57:46.202169 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1128 09:57:46.202478 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 09:57:46.203984 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2688063589/tls.crt::/tmp/serving-cert-2688063589/tls.key\\\\\\\"\\\\nI1128 09:57:46.517349 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 09:57:46.756626 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 09:57:46.756681 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 09:57:46.756806 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 09:57:46.756825 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 09:57:46.768610 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1128 09:57:46.768654 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1128 09:57:46.768660 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 09:57:46.768697 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 09:57:46.768707 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 09:57:46.768714 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 09:57:46.768746 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 09:57:46.768752 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1128 09:57:46.772242 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:40Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c690f38f717fbfbd681f21f5dc845b94601530b4fc0860fdbeb1317042c793b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:18Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b4d4af6fc5dd97d5b6104ef79b62fd241db5659dfdfd496a38536453b207a4e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b4d4af6fc5dd97d5b6104ef79b62fd241db5659dfdfd496a38536453b207a4e5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:08Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:52Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:52 crc kubenswrapper[4838]: I1128 09:58:52.561279 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-p69l6" Nov 28 09:58:52 crc kubenswrapper[4838]: E1128 09:58:52.561392 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-p69l6" podUID="2a223cc8-af33-4e83-8bfc-2676c5700447" Nov 28 09:58:52 crc kubenswrapper[4838]: I1128 09:58:52.571024 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33d18a643d8d8a58731e53993b40c94bb51ff9c35242f768090dc737cac26503\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:52Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:52 crc kubenswrapper[4838]: I1128 09:58:52.601152 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:52 crc kubenswrapper[4838]: I1128 09:58:52.601190 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:52 crc kubenswrapper[4838]: I1128 09:58:52.601200 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:52 crc kubenswrapper[4838]: I1128 09:58:52.601234 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:52 crc kubenswrapper[4838]: I1128 09:58:52.601244 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:52Z","lastTransitionTime":"2025-11-28T09:58:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:52 crc kubenswrapper[4838]: I1128 09:58:52.704825 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:52 crc kubenswrapper[4838]: I1128 09:58:52.704891 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:52 crc kubenswrapper[4838]: I1128 09:58:52.704915 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:52 crc kubenswrapper[4838]: I1128 09:58:52.704946 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:52 crc kubenswrapper[4838]: I1128 09:58:52.704965 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:52Z","lastTransitionTime":"2025-11-28T09:58:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:52 crc kubenswrapper[4838]: I1128 09:58:52.807621 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:52 crc kubenswrapper[4838]: I1128 09:58:52.807674 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:52 crc kubenswrapper[4838]: I1128 09:58:52.807691 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:52 crc kubenswrapper[4838]: I1128 09:58:52.807742 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:52 crc kubenswrapper[4838]: I1128 09:58:52.807766 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:52Z","lastTransitionTime":"2025-11-28T09:58:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:52 crc kubenswrapper[4838]: I1128 09:58:52.909963 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:52 crc kubenswrapper[4838]: I1128 09:58:52.910014 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:52 crc kubenswrapper[4838]: I1128 09:58:52.910026 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:52 crc kubenswrapper[4838]: I1128 09:58:52.910046 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:52 crc kubenswrapper[4838]: I1128 09:58:52.910060 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:52Z","lastTransitionTime":"2025-11-28T09:58:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:53 crc kubenswrapper[4838]: I1128 09:58:53.013396 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:53 crc kubenswrapper[4838]: I1128 09:58:53.013441 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:53 crc kubenswrapper[4838]: I1128 09:58:53.013451 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:53 crc kubenswrapper[4838]: I1128 09:58:53.013469 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:53 crc kubenswrapper[4838]: I1128 09:58:53.013481 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:53Z","lastTransitionTime":"2025-11-28T09:58:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:53 crc kubenswrapper[4838]: I1128 09:58:53.116694 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:53 crc kubenswrapper[4838]: I1128 09:58:53.116777 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:53 crc kubenswrapper[4838]: I1128 09:58:53.116790 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:53 crc kubenswrapper[4838]: I1128 09:58:53.116808 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:53 crc kubenswrapper[4838]: I1128 09:58:53.116822 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:53Z","lastTransitionTime":"2025-11-28T09:58:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:53 crc kubenswrapper[4838]: I1128 09:58:53.219926 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:53 crc kubenswrapper[4838]: I1128 09:58:53.219991 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:53 crc kubenswrapper[4838]: I1128 09:58:53.220004 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:53 crc kubenswrapper[4838]: I1128 09:58:53.220026 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:53 crc kubenswrapper[4838]: I1128 09:58:53.220045 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:53Z","lastTransitionTime":"2025-11-28T09:58:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:53 crc kubenswrapper[4838]: I1128 09:58:53.323215 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:53 crc kubenswrapper[4838]: I1128 09:58:53.323283 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:53 crc kubenswrapper[4838]: I1128 09:58:53.323299 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:53 crc kubenswrapper[4838]: I1128 09:58:53.323323 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:53 crc kubenswrapper[4838]: I1128 09:58:53.323338 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:53Z","lastTransitionTime":"2025-11-28T09:58:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:53 crc kubenswrapper[4838]: I1128 09:58:53.326204 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-4rv9b_051f7e1c-2d47-4be9-bbd5-14feec16eb16/kube-multus/0.log" Nov 28 09:58:53 crc kubenswrapper[4838]: I1128 09:58:53.326270 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-4rv9b" event={"ID":"051f7e1c-2d47-4be9-bbd5-14feec16eb16","Type":"ContainerStarted","Data":"262d73384f8aa0c5e8405e70d091fbc0003217ee2c08a4776048649b9a6eda59"} Nov 28 09:58:53 crc kubenswrapper[4838]: I1128 09:58:53.341083 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-tnclp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ebace5c6-6ca4-48ff-9c50-c6b769d599b5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://572204db3ac1da6b232430bf06fb87e94638e151ea0edc2f8b111deb7d82c58c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8pxnw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://efa96991cdc29f4f075bfd7751f620c7b091510ea68d577b243811ddd4140cec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8pxnw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:58:03Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-tnclp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:53Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:53 crc kubenswrapper[4838]: I1128 09:58:53.354289 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"de645de9-a5cd-4075-8bfd-402a619ea73f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://17e965aad7643d62c651c1e652be45bd914cfe3f14a0a6f43e4e4376b4cb7be0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://40b6222c83e4141c5d286efddc1b65ef732c5cadda9cebdd8d9ee114bf2eb533\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://40b6222c83e4141c5d286efddc1b65ef732c5cadda9cebdd8d9ee114bf2eb533\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:08Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:53Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:53 crc kubenswrapper[4838]: I1128 09:58:53.366161 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:53Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:53 crc kubenswrapper[4838]: I1128 09:58:53.381684 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4rv9b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"051f7e1c-2d47-4be9-bbd5-14feec16eb16\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://262d73384f8aa0c5e8405e70d091fbc0003217ee2c08a4776048649b9a6eda59\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f641ae0677a50fbc7b20fac7bb5c567f031af1b6a06fbe9e965091efa4ee4ec3\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T09:58:51Z\\\",\\\"message\\\":\\\"2025-11-28T09:58:06+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_42560742-ad80-4917-aa9d-aab5fda94562\\\\n2025-11-28T09:58:06+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_42560742-ad80-4917-aa9d-aab5fda94562 to /host/opt/cni/bin/\\\\n2025-11-28T09:58:06Z [verbose] multus-daemon started\\\\n2025-11-28T09:58:06Z [verbose] Readiness Indicator file check\\\\n2025-11-28T09:58:51Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:05Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-szqtp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4rv9b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:53Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:53 crc kubenswrapper[4838]: I1128 09:58:53.402465 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gmhsj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"41b01f7d-5c75-49de-86f7-87e04bf71194\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ccbb0df20c7e22308632a5a2d8837d77579134973f3888355be0fe46b4e59aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://87fcac052cf598fe1999137913f0b5ab8c999dad7d8fb512c3b079fd3b49bdaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b1f5c4da04a97ccc1851e901f9a13763e45ff8c7f6aab25bcbb934541ae4a776\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5781261b70d6722e8d28b91441c4241e1b48e6ff71ae3ec8973bde50180fc146\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3a5c0b96cef205139d500d85f27d2c9230ab23a84ffc78f2587a465a7ff25e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://669de13027827632f353811e082cce7cca326651a9bf4820e66504bb59067d72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://973017e2b3a339f98d3439e256810a83b425061d827e74edd127aee640a9c0eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://973017e2b3a339f98d3439e256810a83b425061d827e74edd127aee640a9c0eb\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T09:58:38Z\\\",\\\"message\\\":\\\"l\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1128 09:58:37.612073 6708 services_controller.go:452] Built service openshift-console/downloads per-node LB for network=default: []services.LB{}\\\\nI1128 09:58:37.612094 6708 services_controller.go:452] Built service openshift-machine-api/machine-api-operator-machine-webhook per-node LB for network=default: []services.LB{}\\\\nI1128 09:58:37.612103 6708 services_controller.go:453] Built service openshift-console/downloads template LB for network=default: []services.LB{}\\\\nI1128 09:58:37.612134 6708 services_controller.go:453] Built service openshift-machine-api/machine-api-operator-machine-webhook template LB for network=default: []services.LB{}\\\\nI1128 09:58:37.612147 6708 services_controller.go:454] Service openshift-console/downloads for network=default has 1 cluster-wide, 0 per-node configs, 0 template configs, making 1 (cluster) 0 (per node) and 0 (template) load balancers\\\\nF1128 09:58:37.612098 6708 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node netwo\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:36Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-gmhsj_openshift-ovn-kubernetes(41b01f7d-5c75-49de-86f7-87e04bf71194)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1ae634151347de87041ca338bcea3a34ee0c2330a3c6630a3e342f62beba0ab8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9efead1ac3ae101f3a81e0c5568645b6ac107dc126ff57bc55745b86ffacb730\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9efead1ac3ae101f3a81e0c5568645b6ac107dc126ff57bc55745b86ffacb730\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-gmhsj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:53Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:53 crc kubenswrapper[4838]: I1128 09:58:53.417424 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5c3daa53-8c4e-4e30-aeba-146602dd45cd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d582c5c17a21e943db9e4af274b6cb31d7551e545039aca20a767b35e2ca5040\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9dmrk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5374d5a62ca21176826339023247946593cc1a7bdb4fb39596f12bf598790697\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9dmrk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-5dxdd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:53Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:53 crc kubenswrapper[4838]: I1128 09:58:53.426142 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:53 crc kubenswrapper[4838]: I1128 09:58:53.426169 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:53 crc kubenswrapper[4838]: I1128 09:58:53.426177 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:53 crc kubenswrapper[4838]: I1128 09:58:53.426191 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:53 crc kubenswrapper[4838]: I1128 09:58:53.426201 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:53Z","lastTransitionTime":"2025-11-28T09:58:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:53 crc kubenswrapper[4838]: I1128 09:58:53.432688 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42be35de-5c72-4cc2-a5e4-fb7872425cdf\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5a39765f9493a3a9454db77d07855624ff5645e9dbd898e6dcb880d7a01a8c42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://426741a23e7b1b9fae6769b581c0d44694f957b0da985476923801395fad082f\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T09:57:46Z\\\",\\\"message\\\":\\\"+ timeout 3m /bin/bash -exuo pipefail -c 'while [ -n \\\\\\\"$(ss -Htanop \\\\\\\\( sport = 10357 \\\\\\\\))\\\\\\\" ]; do sleep 1; done'\\\\n++ ss -Htanop '(' sport = 10357 ')'\\\\n+ '[' -n '' ']'\\\\n+ exec cluster-policy-controller start --config=/etc/kubernetes/static-pod-resources/configmaps/cluster-policy-controller-config/config.yaml --kubeconfig=/etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig --namespace=openshift-kube-controller-manager -v=2\\\\nI1128 09:57:12.554377 1 leaderelection.go:121] The leader election gives 4 retries and allows for 30s of clock skew. The kube-apiserver downtime tolerance is 78s. Worst non-graceful lease acquisition is 2m43s. Worst graceful lease acquisition is {26s}.\\\\nI1128 09:57:12.555793 1 observer_polling.go:159] Starting file observer\\\\nI1128 09:57:12.567187 1 builder.go:298] cluster-policy-controller version 4.18.0-202501230001.p0.g5fd8525.assembly.stream.el9-5fd8525-5fd852525909ce6eab52972ba9ce8fcf56528eb9\\\\nI1128 09:57:12.568976 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.crt::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.key\\\\\\\"\\\\nI1128 09:57:45.000857 1 cmd.go:138] Received SIGTERM or SIGINT signal, shutting down controller.\\\\nF1128 09:57:46.203931 1 cmd.go:179] failed checking apiserver connectivity: client rate limiter Wait returned an error: context deadline exceeded\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:10Z\\\"}},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef54c8995d6bb8e077c6d1c3d796b6f2ce88370b6cadc4f040f590760103320b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fab4ce68cf732b3e6b32f68e84805013d646a9cbd6d5c55ea3d2f41a7f9db83d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2da93de71b5896e3c9ba192df2896b946b1376fefe2a87cf21adb87ea665be04\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:08Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:53Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:53 crc kubenswrapper[4838]: I1128 09:58:53.447230 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1630b1c6-63b5-4481-a711-0485765d37e3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3a04db28528da269759635186b06952f9a3dc4c2b130458354a5bf9ef994db8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://065f0c39a0f1fbdb83a6a758ddd20a4af3ddf96297ce35551b570f5e6c9deb89\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T09:57:35Z\\\",\\\"message\\\":\\\"rpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"192.168.126.11:2379\\\\\\\", ServerName: \\\\\\\"192.168.126.11:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp 192.168.126.11:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:30.193159 13 logging.go:55] [core] [Channel #1 SubChannel #5]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"192.168.126.11:2379\\\\\\\", ServerName: \\\\\\\"192.168.126.11:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp 192.168.126.11:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:30.410371 13 logging.go:55] [core] [Channel #7 SubChannel #8]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"192.168.126.11:2379\\\\\\\", ServerName: \\\\\\\"192.168.126.11:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp 192.168.126.11:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:30.766067 13 logging.go:55] [core] [Channel #7 SubChannel #9]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"localhost:2379\\\\\\\", ServerName: \\\\\\\"localhost:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp [::1]:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:30.792318 13 logging.go:55] [core] [Channel #2 SubChannel #4]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"localhost:2379\\\\\\\", ServerName: \\\\\\\"localhost:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp [::1]:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:34.548830 13 logging.go:55] [core] [Channel #1 SubChannel #6]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"localhost:2379\\\\\\\", ServerName: \\\\\\\"localhost:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: authentication handshake failed: context canceled\\\\\\\"\\\\nE1128 09:57:34.559534 13 run.go:72] \\\\\\\"command failed\\\\\\\" err=\\\\\\\"context deadline exceeded\\\\\\\"\\\\nI1128 09:57:34.572762 1 main.go:235] Termination finished with exit code 1\\\\nI1128 09:57:34.572819 1 main.go:188] Deleting termination lock file \\\\\\\"/var/log/kube-apiserver/.terminating\\\\\\\"\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25d620ea6d7c38547e89d61e7a60f227d28b21f18d563055db47256b266d5b6b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://837cc7d33d601516b4ea56a283f71167b41da7c769070c97ea77f29e97cf1555\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a821e5b105f62cf7f3b12714bfb0f58867a808d1c777f2fb711895c345d8ee9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ad3223a9346861cf1b27af8c95207349f10af6f416380747e32c4faf1d3add4\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T09:57:48Z\\\",\\\"message\\\":\\\"le observer\\\\nW1128 09:57:46.202169 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1128 09:57:46.202478 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 09:57:46.203984 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2688063589/tls.crt::/tmp/serving-cert-2688063589/tls.key\\\\\\\"\\\\nI1128 09:57:46.517349 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 09:57:46.756626 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 09:57:46.756681 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 09:57:46.756806 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 09:57:46.756825 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 09:57:46.768610 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1128 09:57:46.768654 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1128 09:57:46.768660 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 09:57:46.768697 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 09:57:46.768707 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 09:57:46.768714 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 09:57:46.768746 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 09:57:46.768752 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1128 09:57:46.772242 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:40Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c690f38f717fbfbd681f21f5dc845b94601530b4fc0860fdbeb1317042c793b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:18Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b4d4af6fc5dd97d5b6104ef79b62fd241db5659dfdfd496a38536453b207a4e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b4d4af6fc5dd97d5b6104ef79b62fd241db5659dfdfd496a38536453b207a4e5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:08Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:53Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:53 crc kubenswrapper[4838]: I1128 09:58:53.458246 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33d18a643d8d8a58731e53993b40c94bb51ff9c35242f768090dc737cac26503\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:53Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:53 crc kubenswrapper[4838]: I1128 09:58:53.470099 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:53Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:53 crc kubenswrapper[4838]: I1128 09:58:53.480565 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-tj8hl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cbb3c60a-bf9f-4a62-9310-30898e42be4f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e6fe3f1cbc53f02e2556c5fb44cffcf13330c3240a0ff10a8471080466416bd1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c2jkq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:54Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-tj8hl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:53Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:53 crc kubenswrapper[4838]: I1128 09:58:53.491538 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"faf44892-fdd2-4b45-8772-20049c555d3b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8f8f5484d008289a9e34ceaffd3cf2582565e7265003b0a6a913fe424760fc65\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://496ac21c6d7e650f191b3bc29ab676bab6ba40727c5ac4d517833ef9a115ae07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://698aacf6e94caf2da7095c89716d63d935ff60d95cb91c9a39dfe9282cbba005\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8352058616fa4ed90ca907e547bad2201d9aa696330f1eb8434c3c3d54d71d89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8352058616fa4ed90ca907e547bad2201d9aa696330f1eb8434c3c3d54d71d89\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:09Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:08Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:53Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:53 crc kubenswrapper[4838]: I1128 09:58:53.504764 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:53Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:53 crc kubenswrapper[4838]: I1128 09:58:53.519216 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-sft2b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"200cdb91-cc86-40be-a5b6-30f7b9beba6d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c4f94520c534ecc5a5a92d18b6047c6df131fa7d2a9b8712c021a74c3ffc18ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lpt6k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-sft2b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:53Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:53 crc kubenswrapper[4838]: I1128 09:58:53.529375 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:53 crc kubenswrapper[4838]: I1128 09:58:53.529446 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:53 crc kubenswrapper[4838]: I1128 09:58:53.529466 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:53 crc kubenswrapper[4838]: I1128 09:58:53.529490 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:53 crc kubenswrapper[4838]: I1128 09:58:53.529508 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:53Z","lastTransitionTime":"2025-11-28T09:58:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:53 crc kubenswrapper[4838]: I1128 09:58:53.535932 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-p69l6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2a223cc8-af33-4e83-8bfc-2676c5700447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h25xx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h25xx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:58:05Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-p69l6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:53Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:53 crc kubenswrapper[4838]: I1128 09:58:53.554499 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://05cb23094534a9fdebbe33d8d34a79412ee49437c25e185c6dfa99384cbf9629\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:53Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:53 crc kubenswrapper[4838]: I1128 09:58:53.561675 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 09:58:53 crc kubenswrapper[4838]: I1128 09:58:53.561704 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 09:58:53 crc kubenswrapper[4838]: I1128 09:58:53.561878 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 09:58:53 crc kubenswrapper[4838]: E1128 09:58:53.562040 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 09:58:53 crc kubenswrapper[4838]: E1128 09:58:53.562225 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 09:58:53 crc kubenswrapper[4838]: E1128 09:58:53.562339 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 09:58:53 crc kubenswrapper[4838]: I1128 09:58:53.570295 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3968fb11df6b2265691177838400dcb08e03d330c166dd880b3acfac7ec7938f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e91940e5398321649eac2960a545bb4bbe047113c81f152aa55894cabee55006\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:53Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:53 crc kubenswrapper[4838]: I1128 09:58:53.592239 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-58mh7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f556bd7-3b15-4d7d-b8e2-4815bb5c9c7d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://658172db64e44d752eb06fe2788dda717db3fc2e672b073a2bdf159a16fd901f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0a0fbfb7a81acf63b4deabef68d55dd843092bce1c00c27c127955417bede44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f0a0fbfb7a81acf63b4deabef68d55dd843092bce1c00c27c127955417bede44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dea8cf8adbb0c946731fdc71c2b0d80ace50b919cf9c776eb70cb6ff36529401\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dea8cf8adbb0c946731fdc71c2b0d80ace50b919cf9c776eb70cb6ff36529401\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f717c0691a541e3a8310cd520a96e92144902511c74835a5ef8ba9536cd65657\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f717c0691a541e3a8310cd520a96e92144902511c74835a5ef8ba9536cd65657\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b8b67d281ed50a87ac086c5ce0bcc082c30a41d80074b638340326c334fa4f0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b8b67d281ed50a87ac086c5ce0bcc082c30a41d80074b638340326c334fa4f0c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4ac7e0a6a8340f45399bb4b576bbcd90d4eee54c571e6d64846c521486a2607\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f4ac7e0a6a8340f45399bb4b576bbcd90d4eee54c571e6d64846c521486a2607\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e3e1a16aa714e8c2b82811d71b1845942712cad84d7dd465c02a3c4478419af\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0e3e1a16aa714e8c2b82811d71b1845942712cad84d7dd465c02a3c4478419af\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-58mh7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:53Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:53 crc kubenswrapper[4838]: I1128 09:58:53.632312 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:53 crc kubenswrapper[4838]: I1128 09:58:53.632385 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:53 crc kubenswrapper[4838]: I1128 09:58:53.632395 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:53 crc kubenswrapper[4838]: I1128 09:58:53.632418 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:53 crc kubenswrapper[4838]: I1128 09:58:53.632430 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:53Z","lastTransitionTime":"2025-11-28T09:58:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:53 crc kubenswrapper[4838]: I1128 09:58:53.735758 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:53 crc kubenswrapper[4838]: I1128 09:58:53.735809 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:53 crc kubenswrapper[4838]: I1128 09:58:53.735821 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:53 crc kubenswrapper[4838]: I1128 09:58:53.735840 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:53 crc kubenswrapper[4838]: I1128 09:58:53.735851 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:53Z","lastTransitionTime":"2025-11-28T09:58:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:53 crc kubenswrapper[4838]: I1128 09:58:53.838074 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:53 crc kubenswrapper[4838]: I1128 09:58:53.838171 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:53 crc kubenswrapper[4838]: I1128 09:58:53.838185 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:53 crc kubenswrapper[4838]: I1128 09:58:53.838202 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:53 crc kubenswrapper[4838]: I1128 09:58:53.838215 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:53Z","lastTransitionTime":"2025-11-28T09:58:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:53 crc kubenswrapper[4838]: I1128 09:58:53.940590 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:53 crc kubenswrapper[4838]: I1128 09:58:53.940631 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:53 crc kubenswrapper[4838]: I1128 09:58:53.940642 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:53 crc kubenswrapper[4838]: I1128 09:58:53.940660 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:53 crc kubenswrapper[4838]: I1128 09:58:53.940674 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:53Z","lastTransitionTime":"2025-11-28T09:58:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:54 crc kubenswrapper[4838]: I1128 09:58:54.043411 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:54 crc kubenswrapper[4838]: I1128 09:58:54.043481 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:54 crc kubenswrapper[4838]: I1128 09:58:54.043503 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:54 crc kubenswrapper[4838]: I1128 09:58:54.043531 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:54 crc kubenswrapper[4838]: I1128 09:58:54.043550 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:54Z","lastTransitionTime":"2025-11-28T09:58:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:54 crc kubenswrapper[4838]: I1128 09:58:54.146707 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:54 crc kubenswrapper[4838]: I1128 09:58:54.146764 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:54 crc kubenswrapper[4838]: I1128 09:58:54.146774 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:54 crc kubenswrapper[4838]: I1128 09:58:54.146789 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:54 crc kubenswrapper[4838]: I1128 09:58:54.146799 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:54Z","lastTransitionTime":"2025-11-28T09:58:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:54 crc kubenswrapper[4838]: I1128 09:58:54.250238 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:54 crc kubenswrapper[4838]: I1128 09:58:54.250308 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:54 crc kubenswrapper[4838]: I1128 09:58:54.250320 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:54 crc kubenswrapper[4838]: I1128 09:58:54.250338 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:54 crc kubenswrapper[4838]: I1128 09:58:54.250349 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:54Z","lastTransitionTime":"2025-11-28T09:58:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:54 crc kubenswrapper[4838]: I1128 09:58:54.353424 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:54 crc kubenswrapper[4838]: I1128 09:58:54.353464 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:54 crc kubenswrapper[4838]: I1128 09:58:54.353476 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:54 crc kubenswrapper[4838]: I1128 09:58:54.353494 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:54 crc kubenswrapper[4838]: I1128 09:58:54.353507 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:54Z","lastTransitionTime":"2025-11-28T09:58:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:54 crc kubenswrapper[4838]: I1128 09:58:54.456089 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:54 crc kubenswrapper[4838]: I1128 09:58:54.456139 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:54 crc kubenswrapper[4838]: I1128 09:58:54.456150 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:54 crc kubenswrapper[4838]: I1128 09:58:54.456169 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:54 crc kubenswrapper[4838]: I1128 09:58:54.456184 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:54Z","lastTransitionTime":"2025-11-28T09:58:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:54 crc kubenswrapper[4838]: I1128 09:58:54.558387 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:54 crc kubenswrapper[4838]: I1128 09:58:54.558427 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:54 crc kubenswrapper[4838]: I1128 09:58:54.558435 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:54 crc kubenswrapper[4838]: I1128 09:58:54.558451 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:54 crc kubenswrapper[4838]: I1128 09:58:54.558459 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:54Z","lastTransitionTime":"2025-11-28T09:58:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:54 crc kubenswrapper[4838]: I1128 09:58:54.561997 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-p69l6" Nov 28 09:58:54 crc kubenswrapper[4838]: E1128 09:58:54.562129 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-p69l6" podUID="2a223cc8-af33-4e83-8bfc-2676c5700447" Nov 28 09:58:54 crc kubenswrapper[4838]: I1128 09:58:54.661109 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:54 crc kubenswrapper[4838]: I1128 09:58:54.661155 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:54 crc kubenswrapper[4838]: I1128 09:58:54.661166 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:54 crc kubenswrapper[4838]: I1128 09:58:54.661182 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:54 crc kubenswrapper[4838]: I1128 09:58:54.661193 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:54Z","lastTransitionTime":"2025-11-28T09:58:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:54 crc kubenswrapper[4838]: I1128 09:58:54.764929 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:54 crc kubenswrapper[4838]: I1128 09:58:54.764967 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:54 crc kubenswrapper[4838]: I1128 09:58:54.764978 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:54 crc kubenswrapper[4838]: I1128 09:58:54.764994 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:54 crc kubenswrapper[4838]: I1128 09:58:54.765005 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:54Z","lastTransitionTime":"2025-11-28T09:58:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:54 crc kubenswrapper[4838]: I1128 09:58:54.868025 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:54 crc kubenswrapper[4838]: I1128 09:58:54.868453 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:54 crc kubenswrapper[4838]: I1128 09:58:54.868653 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:54 crc kubenswrapper[4838]: I1128 09:58:54.868838 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:54 crc kubenswrapper[4838]: I1128 09:58:54.869005 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:54Z","lastTransitionTime":"2025-11-28T09:58:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:54 crc kubenswrapper[4838]: I1128 09:58:54.973399 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:54 crc kubenswrapper[4838]: I1128 09:58:54.973448 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:54 crc kubenswrapper[4838]: I1128 09:58:54.973461 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:54 crc kubenswrapper[4838]: I1128 09:58:54.973483 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:54 crc kubenswrapper[4838]: I1128 09:58:54.973496 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:54Z","lastTransitionTime":"2025-11-28T09:58:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:55 crc kubenswrapper[4838]: I1128 09:58:55.075703 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:55 crc kubenswrapper[4838]: I1128 09:58:55.075750 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:55 crc kubenswrapper[4838]: I1128 09:58:55.075767 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:55 crc kubenswrapper[4838]: I1128 09:58:55.075783 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:55 crc kubenswrapper[4838]: I1128 09:58:55.075796 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:55Z","lastTransitionTime":"2025-11-28T09:58:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:55 crc kubenswrapper[4838]: I1128 09:58:55.179153 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:55 crc kubenswrapper[4838]: I1128 09:58:55.179207 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:55 crc kubenswrapper[4838]: I1128 09:58:55.179218 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:55 crc kubenswrapper[4838]: I1128 09:58:55.179235 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:55 crc kubenswrapper[4838]: I1128 09:58:55.179246 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:55Z","lastTransitionTime":"2025-11-28T09:58:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:55 crc kubenswrapper[4838]: I1128 09:58:55.281953 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:55 crc kubenswrapper[4838]: I1128 09:58:55.281995 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:55 crc kubenswrapper[4838]: I1128 09:58:55.282007 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:55 crc kubenswrapper[4838]: I1128 09:58:55.282027 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:55 crc kubenswrapper[4838]: I1128 09:58:55.282038 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:55Z","lastTransitionTime":"2025-11-28T09:58:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:55 crc kubenswrapper[4838]: I1128 09:58:55.385154 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:55 crc kubenswrapper[4838]: I1128 09:58:55.385221 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:55 crc kubenswrapper[4838]: I1128 09:58:55.385238 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:55 crc kubenswrapper[4838]: I1128 09:58:55.385324 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:55 crc kubenswrapper[4838]: I1128 09:58:55.385346 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:55Z","lastTransitionTime":"2025-11-28T09:58:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:55 crc kubenswrapper[4838]: I1128 09:58:55.488334 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:55 crc kubenswrapper[4838]: I1128 09:58:55.488367 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:55 crc kubenswrapper[4838]: I1128 09:58:55.488375 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:55 crc kubenswrapper[4838]: I1128 09:58:55.488389 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:55 crc kubenswrapper[4838]: I1128 09:58:55.488398 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:55Z","lastTransitionTime":"2025-11-28T09:58:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:55 crc kubenswrapper[4838]: I1128 09:58:55.562682 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 09:58:55 crc kubenswrapper[4838]: I1128 09:58:55.562767 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 09:58:55 crc kubenswrapper[4838]: E1128 09:58:55.562847 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 09:58:55 crc kubenswrapper[4838]: I1128 09:58:55.562996 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 09:58:55 crc kubenswrapper[4838]: E1128 09:58:55.563039 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 09:58:55 crc kubenswrapper[4838]: E1128 09:58:55.563139 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 09:58:55 crc kubenswrapper[4838]: I1128 09:58:55.591225 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:55 crc kubenswrapper[4838]: I1128 09:58:55.591257 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:55 crc kubenswrapper[4838]: I1128 09:58:55.591267 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:55 crc kubenswrapper[4838]: I1128 09:58:55.591281 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:55 crc kubenswrapper[4838]: I1128 09:58:55.591291 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:55Z","lastTransitionTime":"2025-11-28T09:58:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:55 crc kubenswrapper[4838]: I1128 09:58:55.694228 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:55 crc kubenswrapper[4838]: I1128 09:58:55.694269 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:55 crc kubenswrapper[4838]: I1128 09:58:55.694279 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:55 crc kubenswrapper[4838]: I1128 09:58:55.694294 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:55 crc kubenswrapper[4838]: I1128 09:58:55.694305 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:55Z","lastTransitionTime":"2025-11-28T09:58:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:55 crc kubenswrapper[4838]: I1128 09:58:55.796600 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:55 crc kubenswrapper[4838]: I1128 09:58:55.796675 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:55 crc kubenswrapper[4838]: I1128 09:58:55.796695 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:55 crc kubenswrapper[4838]: I1128 09:58:55.796774 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:55 crc kubenswrapper[4838]: I1128 09:58:55.796801 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:55Z","lastTransitionTime":"2025-11-28T09:58:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:55 crc kubenswrapper[4838]: I1128 09:58:55.899974 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:55 crc kubenswrapper[4838]: I1128 09:58:55.900019 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:55 crc kubenswrapper[4838]: I1128 09:58:55.900030 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:55 crc kubenswrapper[4838]: I1128 09:58:55.900048 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:55 crc kubenswrapper[4838]: I1128 09:58:55.900061 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:55Z","lastTransitionTime":"2025-11-28T09:58:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:56 crc kubenswrapper[4838]: I1128 09:58:56.005069 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:56 crc kubenswrapper[4838]: I1128 09:58:56.005114 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:56 crc kubenswrapper[4838]: I1128 09:58:56.005123 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:56 crc kubenswrapper[4838]: I1128 09:58:56.005140 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:56 crc kubenswrapper[4838]: I1128 09:58:56.005153 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:56Z","lastTransitionTime":"2025-11-28T09:58:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:56 crc kubenswrapper[4838]: I1128 09:58:56.108287 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:56 crc kubenswrapper[4838]: I1128 09:58:56.108364 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:56 crc kubenswrapper[4838]: I1128 09:58:56.108382 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:56 crc kubenswrapper[4838]: I1128 09:58:56.108408 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:56 crc kubenswrapper[4838]: I1128 09:58:56.108425 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:56Z","lastTransitionTime":"2025-11-28T09:58:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:56 crc kubenswrapper[4838]: I1128 09:58:56.211347 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:56 crc kubenswrapper[4838]: I1128 09:58:56.211391 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:56 crc kubenswrapper[4838]: I1128 09:58:56.211400 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:56 crc kubenswrapper[4838]: I1128 09:58:56.211414 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:56 crc kubenswrapper[4838]: I1128 09:58:56.211425 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:56Z","lastTransitionTime":"2025-11-28T09:58:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:56 crc kubenswrapper[4838]: I1128 09:58:56.314644 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:56 crc kubenswrapper[4838]: I1128 09:58:56.314693 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:56 crc kubenswrapper[4838]: I1128 09:58:56.314704 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:56 crc kubenswrapper[4838]: I1128 09:58:56.314813 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:56 crc kubenswrapper[4838]: I1128 09:58:56.314829 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:56Z","lastTransitionTime":"2025-11-28T09:58:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:56 crc kubenswrapper[4838]: I1128 09:58:56.417858 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:56 crc kubenswrapper[4838]: I1128 09:58:56.417921 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:56 crc kubenswrapper[4838]: I1128 09:58:56.417932 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:56 crc kubenswrapper[4838]: I1128 09:58:56.417949 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:56 crc kubenswrapper[4838]: I1128 09:58:56.417961 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:56Z","lastTransitionTime":"2025-11-28T09:58:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:56 crc kubenswrapper[4838]: I1128 09:58:56.520108 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:56 crc kubenswrapper[4838]: I1128 09:58:56.520167 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:56 crc kubenswrapper[4838]: I1128 09:58:56.520190 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:56 crc kubenswrapper[4838]: I1128 09:58:56.520219 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:56 crc kubenswrapper[4838]: I1128 09:58:56.520239 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:56Z","lastTransitionTime":"2025-11-28T09:58:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:56 crc kubenswrapper[4838]: I1128 09:58:56.561106 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-p69l6" Nov 28 09:58:56 crc kubenswrapper[4838]: E1128 09:58:56.561239 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-p69l6" podUID="2a223cc8-af33-4e83-8bfc-2676c5700447" Nov 28 09:58:56 crc kubenswrapper[4838]: I1128 09:58:56.623702 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:56 crc kubenswrapper[4838]: I1128 09:58:56.623779 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:56 crc kubenswrapper[4838]: I1128 09:58:56.623792 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:56 crc kubenswrapper[4838]: I1128 09:58:56.623810 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:56 crc kubenswrapper[4838]: I1128 09:58:56.623821 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:56Z","lastTransitionTime":"2025-11-28T09:58:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:56 crc kubenswrapper[4838]: I1128 09:58:56.726887 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:56 crc kubenswrapper[4838]: I1128 09:58:56.726927 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:56 crc kubenswrapper[4838]: I1128 09:58:56.726935 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:56 crc kubenswrapper[4838]: I1128 09:58:56.726950 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:56 crc kubenswrapper[4838]: I1128 09:58:56.726963 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:56Z","lastTransitionTime":"2025-11-28T09:58:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:56 crc kubenswrapper[4838]: I1128 09:58:56.833944 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:56 crc kubenswrapper[4838]: I1128 09:58:56.833986 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:56 crc kubenswrapper[4838]: I1128 09:58:56.833998 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:56 crc kubenswrapper[4838]: I1128 09:58:56.834016 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:56 crc kubenswrapper[4838]: I1128 09:58:56.834027 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:56Z","lastTransitionTime":"2025-11-28T09:58:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:56 crc kubenswrapper[4838]: I1128 09:58:56.936056 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:56 crc kubenswrapper[4838]: I1128 09:58:56.936093 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:56 crc kubenswrapper[4838]: I1128 09:58:56.936101 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:56 crc kubenswrapper[4838]: I1128 09:58:56.936114 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:56 crc kubenswrapper[4838]: I1128 09:58:56.936124 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:56Z","lastTransitionTime":"2025-11-28T09:58:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:57 crc kubenswrapper[4838]: I1128 09:58:57.038486 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:57 crc kubenswrapper[4838]: I1128 09:58:57.038884 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:57 crc kubenswrapper[4838]: I1128 09:58:57.038922 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:57 crc kubenswrapper[4838]: I1128 09:58:57.038945 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:57 crc kubenswrapper[4838]: I1128 09:58:57.038961 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:57Z","lastTransitionTime":"2025-11-28T09:58:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:57 crc kubenswrapper[4838]: I1128 09:58:57.141020 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:57 crc kubenswrapper[4838]: I1128 09:58:57.141062 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:57 crc kubenswrapper[4838]: I1128 09:58:57.141070 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:57 crc kubenswrapper[4838]: I1128 09:58:57.141086 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:57 crc kubenswrapper[4838]: I1128 09:58:57.141095 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:57Z","lastTransitionTime":"2025-11-28T09:58:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:57 crc kubenswrapper[4838]: I1128 09:58:57.243539 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:57 crc kubenswrapper[4838]: I1128 09:58:57.243582 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:57 crc kubenswrapper[4838]: I1128 09:58:57.243593 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:57 crc kubenswrapper[4838]: I1128 09:58:57.243610 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:57 crc kubenswrapper[4838]: I1128 09:58:57.243622 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:57Z","lastTransitionTime":"2025-11-28T09:58:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:57 crc kubenswrapper[4838]: I1128 09:58:57.346693 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:57 crc kubenswrapper[4838]: I1128 09:58:57.346758 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:57 crc kubenswrapper[4838]: I1128 09:58:57.346772 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:57 crc kubenswrapper[4838]: I1128 09:58:57.346789 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:57 crc kubenswrapper[4838]: I1128 09:58:57.346801 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:57Z","lastTransitionTime":"2025-11-28T09:58:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:57 crc kubenswrapper[4838]: I1128 09:58:57.450278 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:57 crc kubenswrapper[4838]: I1128 09:58:57.450320 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:57 crc kubenswrapper[4838]: I1128 09:58:57.450329 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:57 crc kubenswrapper[4838]: I1128 09:58:57.450343 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:57 crc kubenswrapper[4838]: I1128 09:58:57.450353 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:57Z","lastTransitionTime":"2025-11-28T09:58:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:57 crc kubenswrapper[4838]: I1128 09:58:57.464933 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 09:58:57 crc kubenswrapper[4838]: I1128 09:58:57.465032 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 09:58:57 crc kubenswrapper[4838]: I1128 09:58:57.465056 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 09:58:57 crc kubenswrapper[4838]: I1128 09:58:57.465079 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 09:58:57 crc kubenswrapper[4838]: E1128 09:58:57.465179 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 10:00:01.465163547 +0000 UTC m=+173.164137717 (durationBeforeRetry 1m4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 09:58:57 crc kubenswrapper[4838]: E1128 09:58:57.465228 4838 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 28 09:58:57 crc kubenswrapper[4838]: E1128 09:58:57.465225 4838 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 28 09:58:57 crc kubenswrapper[4838]: E1128 09:58:57.465261 4838 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 28 09:58:57 crc kubenswrapper[4838]: E1128 09:58:57.465267 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-28 10:00:01.465258299 +0000 UTC m=+173.164232469 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 28 09:58:57 crc kubenswrapper[4838]: E1128 09:58:57.465274 4838 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 09:58:57 crc kubenswrapper[4838]: E1128 09:58:57.465314 4838 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 28 09:58:57 crc kubenswrapper[4838]: E1128 09:58:57.465351 4838 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 28 09:58:57 crc kubenswrapper[4838]: E1128 09:58:57.465373 4838 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 09:58:57 crc kubenswrapper[4838]: E1128 09:58:57.465333 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-28 10:00:01.46531223 +0000 UTC m=+173.164286500 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 09:58:57 crc kubenswrapper[4838]: E1128 09:58:57.465438 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-28 10:00:01.465426553 +0000 UTC m=+173.164400853 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 09:58:57 crc kubenswrapper[4838]: I1128 09:58:57.465557 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 09:58:57 crc kubenswrapper[4838]: E1128 09:58:57.465636 4838 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 28 09:58:57 crc kubenswrapper[4838]: E1128 09:58:57.465670 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-28 10:00:01.465663759 +0000 UTC m=+173.164637929 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 28 09:58:57 crc kubenswrapper[4838]: I1128 09:58:57.551930 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:57 crc kubenswrapper[4838]: I1128 09:58:57.551956 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:57 crc kubenswrapper[4838]: I1128 09:58:57.551964 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:57 crc kubenswrapper[4838]: I1128 09:58:57.551975 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:57 crc kubenswrapper[4838]: I1128 09:58:57.551983 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:57Z","lastTransitionTime":"2025-11-28T09:58:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:57 crc kubenswrapper[4838]: I1128 09:58:57.582580 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 09:58:57 crc kubenswrapper[4838]: I1128 09:58:57.582604 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 09:58:57 crc kubenswrapper[4838]: I1128 09:58:57.582640 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 09:58:57 crc kubenswrapper[4838]: E1128 09:58:57.582739 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 09:58:57 crc kubenswrapper[4838]: E1128 09:58:57.582844 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 09:58:57 crc kubenswrapper[4838]: E1128 09:58:57.582883 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 09:58:57 crc kubenswrapper[4838]: I1128 09:58:57.655632 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:57 crc kubenswrapper[4838]: I1128 09:58:57.655670 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:57 crc kubenswrapper[4838]: I1128 09:58:57.655681 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:57 crc kubenswrapper[4838]: I1128 09:58:57.655695 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:57 crc kubenswrapper[4838]: I1128 09:58:57.655706 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:57Z","lastTransitionTime":"2025-11-28T09:58:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:57 crc kubenswrapper[4838]: I1128 09:58:57.759108 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:57 crc kubenswrapper[4838]: I1128 09:58:57.759170 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:57 crc kubenswrapper[4838]: I1128 09:58:57.759187 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:57 crc kubenswrapper[4838]: I1128 09:58:57.759213 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:57 crc kubenswrapper[4838]: I1128 09:58:57.759231 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:57Z","lastTransitionTime":"2025-11-28T09:58:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:57 crc kubenswrapper[4838]: I1128 09:58:57.862259 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:57 crc kubenswrapper[4838]: I1128 09:58:57.862340 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:57 crc kubenswrapper[4838]: I1128 09:58:57.862365 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:57 crc kubenswrapper[4838]: I1128 09:58:57.862394 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:57 crc kubenswrapper[4838]: I1128 09:58:57.862413 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:57Z","lastTransitionTime":"2025-11-28T09:58:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:57 crc kubenswrapper[4838]: I1128 09:58:57.965842 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:57 crc kubenswrapper[4838]: I1128 09:58:57.966295 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:57 crc kubenswrapper[4838]: I1128 09:58:57.966464 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:57 crc kubenswrapper[4838]: I1128 09:58:57.966612 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:57 crc kubenswrapper[4838]: I1128 09:58:57.966783 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:57Z","lastTransitionTime":"2025-11-28T09:58:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:58 crc kubenswrapper[4838]: I1128 09:58:58.070174 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:58 crc kubenswrapper[4838]: I1128 09:58:58.070259 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:58 crc kubenswrapper[4838]: I1128 09:58:58.070279 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:58 crc kubenswrapper[4838]: I1128 09:58:58.070306 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:58 crc kubenswrapper[4838]: I1128 09:58:58.070325 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:58Z","lastTransitionTime":"2025-11-28T09:58:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:58 crc kubenswrapper[4838]: I1128 09:58:58.173307 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:58 crc kubenswrapper[4838]: I1128 09:58:58.173369 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:58 crc kubenswrapper[4838]: I1128 09:58:58.173385 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:58 crc kubenswrapper[4838]: I1128 09:58:58.173412 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:58 crc kubenswrapper[4838]: I1128 09:58:58.173432 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:58Z","lastTransitionTime":"2025-11-28T09:58:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:58 crc kubenswrapper[4838]: I1128 09:58:58.277090 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:58 crc kubenswrapper[4838]: I1128 09:58:58.277165 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:58 crc kubenswrapper[4838]: I1128 09:58:58.277188 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:58 crc kubenswrapper[4838]: I1128 09:58:58.277214 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:58 crc kubenswrapper[4838]: I1128 09:58:58.277230 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:58Z","lastTransitionTime":"2025-11-28T09:58:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:58 crc kubenswrapper[4838]: I1128 09:58:58.380587 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:58 crc kubenswrapper[4838]: I1128 09:58:58.380624 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:58 crc kubenswrapper[4838]: I1128 09:58:58.380632 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:58 crc kubenswrapper[4838]: I1128 09:58:58.380646 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:58 crc kubenswrapper[4838]: I1128 09:58:58.380655 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:58Z","lastTransitionTime":"2025-11-28T09:58:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:58 crc kubenswrapper[4838]: I1128 09:58:58.483942 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:58 crc kubenswrapper[4838]: I1128 09:58:58.483995 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:58 crc kubenswrapper[4838]: I1128 09:58:58.484010 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:58 crc kubenswrapper[4838]: I1128 09:58:58.484034 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:58 crc kubenswrapper[4838]: I1128 09:58:58.484051 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:58Z","lastTransitionTime":"2025-11-28T09:58:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:58 crc kubenswrapper[4838]: I1128 09:58:58.561564 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-p69l6" Nov 28 09:58:58 crc kubenswrapper[4838]: E1128 09:58:58.561774 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-p69l6" podUID="2a223cc8-af33-4e83-8bfc-2676c5700447" Nov 28 09:58:58 crc kubenswrapper[4838]: I1128 09:58:58.584596 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:58Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:58 crc kubenswrapper[4838]: I1128 09:58:58.587052 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:58 crc kubenswrapper[4838]: I1128 09:58:58.587102 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:58 crc kubenswrapper[4838]: I1128 09:58:58.587126 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:58 crc kubenswrapper[4838]: I1128 09:58:58.587157 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:58 crc kubenswrapper[4838]: I1128 09:58:58.587179 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:58Z","lastTransitionTime":"2025-11-28T09:58:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:58 crc kubenswrapper[4838]: I1128 09:58:58.601237 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-tj8hl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cbb3c60a-bf9f-4a62-9310-30898e42be4f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e6fe3f1cbc53f02e2556c5fb44cffcf13330c3240a0ff10a8471080466416bd1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c2jkq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:54Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-tj8hl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:58Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:58 crc kubenswrapper[4838]: I1128 09:58:58.618710 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"faf44892-fdd2-4b45-8772-20049c555d3b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8f8f5484d008289a9e34ceaffd3cf2582565e7265003b0a6a913fe424760fc65\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://496ac21c6d7e650f191b3bc29ab676bab6ba40727c5ac4d517833ef9a115ae07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://698aacf6e94caf2da7095c89716d63d935ff60d95cb91c9a39dfe9282cbba005\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8352058616fa4ed90ca907e547bad2201d9aa696330f1eb8434c3c3d54d71d89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8352058616fa4ed90ca907e547bad2201d9aa696330f1eb8434c3c3d54d71d89\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:09Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:08Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:58Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:58 crc kubenswrapper[4838]: I1128 09:58:58.638535 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:58Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:58 crc kubenswrapper[4838]: I1128 09:58:58.655655 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-sft2b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"200cdb91-cc86-40be-a5b6-30f7b9beba6d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c4f94520c534ecc5a5a92d18b6047c6df131fa7d2a9b8712c021a74c3ffc18ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lpt6k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-sft2b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:58Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:58 crc kubenswrapper[4838]: I1128 09:58:58.671898 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-p69l6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2a223cc8-af33-4e83-8bfc-2676c5700447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h25xx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h25xx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:58:05Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-p69l6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:58Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:58 crc kubenswrapper[4838]: I1128 09:58:58.689240 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://05cb23094534a9fdebbe33d8d34a79412ee49437c25e185c6dfa99384cbf9629\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:58Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:58 crc kubenswrapper[4838]: I1128 09:58:58.689944 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:58 crc kubenswrapper[4838]: I1128 09:58:58.689982 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:58 crc kubenswrapper[4838]: I1128 09:58:58.690018 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:58 crc kubenswrapper[4838]: I1128 09:58:58.690037 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:58 crc kubenswrapper[4838]: I1128 09:58:58.690051 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:58Z","lastTransitionTime":"2025-11-28T09:58:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:58 crc kubenswrapper[4838]: I1128 09:58:58.703701 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3968fb11df6b2265691177838400dcb08e03d330c166dd880b3acfac7ec7938f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e91940e5398321649eac2960a545bb4bbe047113c81f152aa55894cabee55006\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:58Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:58 crc kubenswrapper[4838]: I1128 09:58:58.717118 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-58mh7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f556bd7-3b15-4d7d-b8e2-4815bb5c9c7d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://658172db64e44d752eb06fe2788dda717db3fc2e672b073a2bdf159a16fd901f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0a0fbfb7a81acf63b4deabef68d55dd843092bce1c00c27c127955417bede44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f0a0fbfb7a81acf63b4deabef68d55dd843092bce1c00c27c127955417bede44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dea8cf8adbb0c946731fdc71c2b0d80ace50b919cf9c776eb70cb6ff36529401\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dea8cf8adbb0c946731fdc71c2b0d80ace50b919cf9c776eb70cb6ff36529401\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f717c0691a541e3a8310cd520a96e92144902511c74835a5ef8ba9536cd65657\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f717c0691a541e3a8310cd520a96e92144902511c74835a5ef8ba9536cd65657\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b8b67d281ed50a87ac086c5ce0bcc082c30a41d80074b638340326c334fa4f0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b8b67d281ed50a87ac086c5ce0bcc082c30a41d80074b638340326c334fa4f0c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4ac7e0a6a8340f45399bb4b576bbcd90d4eee54c571e6d64846c521486a2607\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f4ac7e0a6a8340f45399bb4b576bbcd90d4eee54c571e6d64846c521486a2607\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e3e1a16aa714e8c2b82811d71b1845942712cad84d7dd465c02a3c4478419af\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0e3e1a16aa714e8c2b82811d71b1845942712cad84d7dd465c02a3c4478419af\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-58mh7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:58Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:58 crc kubenswrapper[4838]: I1128 09:58:58.730938 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-tnclp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ebace5c6-6ca4-48ff-9c50-c6b769d599b5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://572204db3ac1da6b232430bf06fb87e94638e151ea0edc2f8b111deb7d82c58c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8pxnw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://efa96991cdc29f4f075bfd7751f620c7b091510ea68d577b243811ddd4140cec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8pxnw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:58:03Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-tnclp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:58Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:58 crc kubenswrapper[4838]: I1128 09:58:58.756733 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"de645de9-a5cd-4075-8bfd-402a619ea73f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://17e965aad7643d62c651c1e652be45bd914cfe3f14a0a6f43e4e4376b4cb7be0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://40b6222c83e4141c5d286efddc1b65ef732c5cadda9cebdd8d9ee114bf2eb533\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://40b6222c83e4141c5d286efddc1b65ef732c5cadda9cebdd8d9ee114bf2eb533\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:08Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:58Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:58 crc kubenswrapper[4838]: I1128 09:58:58.770678 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:58Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:58 crc kubenswrapper[4838]: I1128 09:58:58.789369 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4rv9b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"051f7e1c-2d47-4be9-bbd5-14feec16eb16\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://262d73384f8aa0c5e8405e70d091fbc0003217ee2c08a4776048649b9a6eda59\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f641ae0677a50fbc7b20fac7bb5c567f031af1b6a06fbe9e965091efa4ee4ec3\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T09:58:51Z\\\",\\\"message\\\":\\\"2025-11-28T09:58:06+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_42560742-ad80-4917-aa9d-aab5fda94562\\\\n2025-11-28T09:58:06+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_42560742-ad80-4917-aa9d-aab5fda94562 to /host/opt/cni/bin/\\\\n2025-11-28T09:58:06Z [verbose] multus-daemon started\\\\n2025-11-28T09:58:06Z [verbose] Readiness Indicator file check\\\\n2025-11-28T09:58:51Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:05Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-szqtp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4rv9b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:58Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:58 crc kubenswrapper[4838]: I1128 09:58:58.792552 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:58 crc kubenswrapper[4838]: I1128 09:58:58.792903 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:58 crc kubenswrapper[4838]: I1128 09:58:58.793031 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:58 crc kubenswrapper[4838]: I1128 09:58:58.793116 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:58 crc kubenswrapper[4838]: I1128 09:58:58.793201 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:58Z","lastTransitionTime":"2025-11-28T09:58:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:58 crc kubenswrapper[4838]: I1128 09:58:58.812806 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gmhsj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"41b01f7d-5c75-49de-86f7-87e04bf71194\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ccbb0df20c7e22308632a5a2d8837d77579134973f3888355be0fe46b4e59aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://87fcac052cf598fe1999137913f0b5ab8c999dad7d8fb512c3b079fd3b49bdaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b1f5c4da04a97ccc1851e901f9a13763e45ff8c7f6aab25bcbb934541ae4a776\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5781261b70d6722e8d28b91441c4241e1b48e6ff71ae3ec8973bde50180fc146\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3a5c0b96cef205139d500d85f27d2c9230ab23a84ffc78f2587a465a7ff25e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://669de13027827632f353811e082cce7cca326651a9bf4820e66504bb59067d72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://973017e2b3a339f98d3439e256810a83b425061d827e74edd127aee640a9c0eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://973017e2b3a339f98d3439e256810a83b425061d827e74edd127aee640a9c0eb\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T09:58:38Z\\\",\\\"message\\\":\\\"l\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1128 09:58:37.612073 6708 services_controller.go:452] Built service openshift-console/downloads per-node LB for network=default: []services.LB{}\\\\nI1128 09:58:37.612094 6708 services_controller.go:452] Built service openshift-machine-api/machine-api-operator-machine-webhook per-node LB for network=default: []services.LB{}\\\\nI1128 09:58:37.612103 6708 services_controller.go:453] Built service openshift-console/downloads template LB for network=default: []services.LB{}\\\\nI1128 09:58:37.612134 6708 services_controller.go:453] Built service openshift-machine-api/machine-api-operator-machine-webhook template LB for network=default: []services.LB{}\\\\nI1128 09:58:37.612147 6708 services_controller.go:454] Service openshift-console/downloads for network=default has 1 cluster-wide, 0 per-node configs, 0 template configs, making 1 (cluster) 0 (per node) and 0 (template) load balancers\\\\nF1128 09:58:37.612098 6708 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node netwo\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:36Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-gmhsj_openshift-ovn-kubernetes(41b01f7d-5c75-49de-86f7-87e04bf71194)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1ae634151347de87041ca338bcea3a34ee0c2330a3c6630a3e342f62beba0ab8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9efead1ac3ae101f3a81e0c5568645b6ac107dc126ff57bc55745b86ffacb730\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9efead1ac3ae101f3a81e0c5568645b6ac107dc126ff57bc55745b86ffacb730\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-gmhsj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:58Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:58 crc kubenswrapper[4838]: I1128 09:58:58.828945 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5c3daa53-8c4e-4e30-aeba-146602dd45cd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d582c5c17a21e943db9e4af274b6cb31d7551e545039aca20a767b35e2ca5040\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9dmrk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5374d5a62ca21176826339023247946593cc1a7bdb4fb39596f12bf598790697\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9dmrk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-5dxdd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:58Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:58 crc kubenswrapper[4838]: I1128 09:58:58.849794 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42be35de-5c72-4cc2-a5e4-fb7872425cdf\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5a39765f9493a3a9454db77d07855624ff5645e9dbd898e6dcb880d7a01a8c42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://426741a23e7b1b9fae6769b581c0d44694f957b0da985476923801395fad082f\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T09:57:46Z\\\",\\\"message\\\":\\\"+ timeout 3m /bin/bash -exuo pipefail -c 'while [ -n \\\\\\\"$(ss -Htanop \\\\\\\\( sport = 10357 \\\\\\\\))\\\\\\\" ]; do sleep 1; done'\\\\n++ ss -Htanop '(' sport = 10357 ')'\\\\n+ '[' -n '' ']'\\\\n+ exec cluster-policy-controller start --config=/etc/kubernetes/static-pod-resources/configmaps/cluster-policy-controller-config/config.yaml --kubeconfig=/etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig --namespace=openshift-kube-controller-manager -v=2\\\\nI1128 09:57:12.554377 1 leaderelection.go:121] The leader election gives 4 retries and allows for 30s of clock skew. The kube-apiserver downtime tolerance is 78s. Worst non-graceful lease acquisition is 2m43s. Worst graceful lease acquisition is {26s}.\\\\nI1128 09:57:12.555793 1 observer_polling.go:159] Starting file observer\\\\nI1128 09:57:12.567187 1 builder.go:298] cluster-policy-controller version 4.18.0-202501230001.p0.g5fd8525.assembly.stream.el9-5fd8525-5fd852525909ce6eab52972ba9ce8fcf56528eb9\\\\nI1128 09:57:12.568976 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.crt::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.key\\\\\\\"\\\\nI1128 09:57:45.000857 1 cmd.go:138] Received SIGTERM or SIGINT signal, shutting down controller.\\\\nF1128 09:57:46.203931 1 cmd.go:179] failed checking apiserver connectivity: client rate limiter Wait returned an error: context deadline exceeded\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:10Z\\\"}},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef54c8995d6bb8e077c6d1c3d796b6f2ce88370b6cadc4f040f590760103320b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fab4ce68cf732b3e6b32f68e84805013d646a9cbd6d5c55ea3d2f41a7f9db83d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2da93de71b5896e3c9ba192df2896b946b1376fefe2a87cf21adb87ea665be04\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:08Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:58Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:58 crc kubenswrapper[4838]: I1128 09:58:58.872707 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1630b1c6-63b5-4481-a711-0485765d37e3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3a04db28528da269759635186b06952f9a3dc4c2b130458354a5bf9ef994db8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://065f0c39a0f1fbdb83a6a758ddd20a4af3ddf96297ce35551b570f5e6c9deb89\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T09:57:35Z\\\",\\\"message\\\":\\\"rpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"192.168.126.11:2379\\\\\\\", ServerName: \\\\\\\"192.168.126.11:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp 192.168.126.11:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:30.193159 13 logging.go:55] [core] [Channel #1 SubChannel #5]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"192.168.126.11:2379\\\\\\\", ServerName: \\\\\\\"192.168.126.11:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp 192.168.126.11:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:30.410371 13 logging.go:55] [core] [Channel #7 SubChannel #8]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"192.168.126.11:2379\\\\\\\", ServerName: \\\\\\\"192.168.126.11:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp 192.168.126.11:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:30.766067 13 logging.go:55] [core] [Channel #7 SubChannel #9]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"localhost:2379\\\\\\\", ServerName: \\\\\\\"localhost:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp [::1]:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:30.792318 13 logging.go:55] [core] [Channel #2 SubChannel #4]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"localhost:2379\\\\\\\", ServerName: \\\\\\\"localhost:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp [::1]:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:34.548830 13 logging.go:55] [core] [Channel #1 SubChannel #6]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"localhost:2379\\\\\\\", ServerName: \\\\\\\"localhost:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: authentication handshake failed: context canceled\\\\\\\"\\\\nE1128 09:57:34.559534 13 run.go:72] \\\\\\\"command failed\\\\\\\" err=\\\\\\\"context deadline exceeded\\\\\\\"\\\\nI1128 09:57:34.572762 1 main.go:235] Termination finished with exit code 1\\\\nI1128 09:57:34.572819 1 main.go:188] Deleting termination lock file \\\\\\\"/var/log/kube-apiserver/.terminating\\\\\\\"\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25d620ea6d7c38547e89d61e7a60f227d28b21f18d563055db47256b266d5b6b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://837cc7d33d601516b4ea56a283f71167b41da7c769070c97ea77f29e97cf1555\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a821e5b105f62cf7f3b12714bfb0f58867a808d1c777f2fb711895c345d8ee9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ad3223a9346861cf1b27af8c95207349f10af6f416380747e32c4faf1d3add4\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T09:57:48Z\\\",\\\"message\\\":\\\"le observer\\\\nW1128 09:57:46.202169 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1128 09:57:46.202478 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 09:57:46.203984 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2688063589/tls.crt::/tmp/serving-cert-2688063589/tls.key\\\\\\\"\\\\nI1128 09:57:46.517349 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 09:57:46.756626 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 09:57:46.756681 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 09:57:46.756806 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 09:57:46.756825 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 09:57:46.768610 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1128 09:57:46.768654 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1128 09:57:46.768660 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 09:57:46.768697 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 09:57:46.768707 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 09:57:46.768714 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 09:57:46.768746 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 09:57:46.768752 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1128 09:57:46.772242 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:40Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c690f38f717fbfbd681f21f5dc845b94601530b4fc0860fdbeb1317042c793b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:18Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b4d4af6fc5dd97d5b6104ef79b62fd241db5659dfdfd496a38536453b207a4e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b4d4af6fc5dd97d5b6104ef79b62fd241db5659dfdfd496a38536453b207a4e5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:08Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:58Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:58 crc kubenswrapper[4838]: I1128 09:58:58.890791 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33d18a643d8d8a58731e53993b40c94bb51ff9c35242f768090dc737cac26503\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:58Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:58 crc kubenswrapper[4838]: I1128 09:58:58.907089 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:58 crc kubenswrapper[4838]: I1128 09:58:58.907136 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:58 crc kubenswrapper[4838]: I1128 09:58:58.907148 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:58 crc kubenswrapper[4838]: I1128 09:58:58.907167 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:58 crc kubenswrapper[4838]: I1128 09:58:58.907179 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:58Z","lastTransitionTime":"2025-11-28T09:58:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:59 crc kubenswrapper[4838]: I1128 09:58:59.009198 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:59 crc kubenswrapper[4838]: I1128 09:58:59.009447 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:59 crc kubenswrapper[4838]: I1128 09:58:59.009581 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:59 crc kubenswrapper[4838]: I1128 09:58:59.009660 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:59 crc kubenswrapper[4838]: I1128 09:58:59.009746 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:59Z","lastTransitionTime":"2025-11-28T09:58:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:59 crc kubenswrapper[4838]: I1128 09:58:59.032179 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:59 crc kubenswrapper[4838]: I1128 09:58:59.032452 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:59 crc kubenswrapper[4838]: I1128 09:58:59.032639 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:59 crc kubenswrapper[4838]: I1128 09:58:59.032837 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:59 crc kubenswrapper[4838]: I1128 09:58:59.033009 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:59Z","lastTransitionTime":"2025-11-28T09:58:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:59 crc kubenswrapper[4838]: E1128 09:58:59.047203 4838 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:58:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:59Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:58:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:59Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:58:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:59Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:58:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:59Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2d884793-8973-45d8-9335-b721f6accbac\\\",\\\"systemUUID\\\":\\\"e14391b4-beaf-4b9f-9de4-e3bbde3f3327\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:59Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:59 crc kubenswrapper[4838]: I1128 09:58:59.050173 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:59 crc kubenswrapper[4838]: I1128 09:58:59.050286 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:59 crc kubenswrapper[4838]: I1128 09:58:59.050378 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:59 crc kubenswrapper[4838]: I1128 09:58:59.050540 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:59 crc kubenswrapper[4838]: I1128 09:58:59.050605 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:59Z","lastTransitionTime":"2025-11-28T09:58:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:59 crc kubenswrapper[4838]: E1128 09:58:59.063069 4838 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:58:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:59Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:58:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:59Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:58:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:59Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:58:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:59Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2d884793-8973-45d8-9335-b721f6accbac\\\",\\\"systemUUID\\\":\\\"e14391b4-beaf-4b9f-9de4-e3bbde3f3327\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:59Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:59 crc kubenswrapper[4838]: I1128 09:58:59.066116 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:59 crc kubenswrapper[4838]: I1128 09:58:59.066244 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:59 crc kubenswrapper[4838]: I1128 09:58:59.066314 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:59 crc kubenswrapper[4838]: I1128 09:58:59.066405 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:59 crc kubenswrapper[4838]: I1128 09:58:59.066480 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:59Z","lastTransitionTime":"2025-11-28T09:58:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:59 crc kubenswrapper[4838]: E1128 09:58:59.078697 4838 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:58:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:59Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:58:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:59Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:58:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:59Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:58:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:59Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2d884793-8973-45d8-9335-b721f6accbac\\\",\\\"systemUUID\\\":\\\"e14391b4-beaf-4b9f-9de4-e3bbde3f3327\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:59Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:59 crc kubenswrapper[4838]: I1128 09:58:59.081605 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:59 crc kubenswrapper[4838]: I1128 09:58:59.081633 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:59 crc kubenswrapper[4838]: I1128 09:58:59.081668 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:59 crc kubenswrapper[4838]: I1128 09:58:59.081683 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:59 crc kubenswrapper[4838]: I1128 09:58:59.081694 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:59Z","lastTransitionTime":"2025-11-28T09:58:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:59 crc kubenswrapper[4838]: E1128 09:58:59.096397 4838 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:58:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:59Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:58:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:59Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:58:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:59Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:58:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:59Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2d884793-8973-45d8-9335-b721f6accbac\\\",\\\"systemUUID\\\":\\\"e14391b4-beaf-4b9f-9de4-e3bbde3f3327\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:59Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:59 crc kubenswrapper[4838]: I1128 09:58:59.100001 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:59 crc kubenswrapper[4838]: I1128 09:58:59.100036 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:59 crc kubenswrapper[4838]: I1128 09:58:59.100048 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:59 crc kubenswrapper[4838]: I1128 09:58:59.100065 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:59 crc kubenswrapper[4838]: I1128 09:58:59.100077 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:59Z","lastTransitionTime":"2025-11-28T09:58:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:59 crc kubenswrapper[4838]: E1128 09:58:59.111283 4838 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:58:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:59Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:58:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:59Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:58:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:59Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:58:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:59Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2d884793-8973-45d8-9335-b721f6accbac\\\",\\\"systemUUID\\\":\\\"e14391b4-beaf-4b9f-9de4-e3bbde3f3327\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:58:59Z is after 2025-08-24T17:21:41Z" Nov 28 09:58:59 crc kubenswrapper[4838]: E1128 09:58:59.111390 4838 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 28 09:58:59 crc kubenswrapper[4838]: I1128 09:58:59.112914 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:59 crc kubenswrapper[4838]: I1128 09:58:59.112943 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:59 crc kubenswrapper[4838]: I1128 09:58:59.112954 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:59 crc kubenswrapper[4838]: I1128 09:58:59.112970 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:59 crc kubenswrapper[4838]: I1128 09:58:59.112982 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:59Z","lastTransitionTime":"2025-11-28T09:58:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:59 crc kubenswrapper[4838]: I1128 09:58:59.215286 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:59 crc kubenswrapper[4838]: I1128 09:58:59.215357 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:59 crc kubenswrapper[4838]: I1128 09:58:59.215377 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:59 crc kubenswrapper[4838]: I1128 09:58:59.215404 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:59 crc kubenswrapper[4838]: I1128 09:58:59.215426 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:59Z","lastTransitionTime":"2025-11-28T09:58:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:59 crc kubenswrapper[4838]: I1128 09:58:59.332588 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:59 crc kubenswrapper[4838]: I1128 09:58:59.332632 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:59 crc kubenswrapper[4838]: I1128 09:58:59.332649 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:59 crc kubenswrapper[4838]: I1128 09:58:59.332670 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:59 crc kubenswrapper[4838]: I1128 09:58:59.332687 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:59Z","lastTransitionTime":"2025-11-28T09:58:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:59 crc kubenswrapper[4838]: I1128 09:58:59.435370 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:59 crc kubenswrapper[4838]: I1128 09:58:59.435406 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:59 crc kubenswrapper[4838]: I1128 09:58:59.435413 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:59 crc kubenswrapper[4838]: I1128 09:58:59.435427 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:59 crc kubenswrapper[4838]: I1128 09:58:59.435456 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:59Z","lastTransitionTime":"2025-11-28T09:58:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:59 crc kubenswrapper[4838]: I1128 09:58:59.538847 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:59 crc kubenswrapper[4838]: I1128 09:58:59.538918 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:59 crc kubenswrapper[4838]: I1128 09:58:59.538936 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:59 crc kubenswrapper[4838]: I1128 09:58:59.538962 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:59 crc kubenswrapper[4838]: I1128 09:58:59.538980 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:59Z","lastTransitionTime":"2025-11-28T09:58:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:59 crc kubenswrapper[4838]: I1128 09:58:59.561387 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 09:58:59 crc kubenswrapper[4838]: E1128 09:58:59.561500 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 09:58:59 crc kubenswrapper[4838]: I1128 09:58:59.561551 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 09:58:59 crc kubenswrapper[4838]: I1128 09:58:59.561572 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 09:58:59 crc kubenswrapper[4838]: E1128 09:58:59.561748 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 09:58:59 crc kubenswrapper[4838]: E1128 09:58:59.561830 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 09:58:59 crc kubenswrapper[4838]: I1128 09:58:59.642068 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:59 crc kubenswrapper[4838]: I1128 09:58:59.642161 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:59 crc kubenswrapper[4838]: I1128 09:58:59.642177 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:59 crc kubenswrapper[4838]: I1128 09:58:59.642242 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:59 crc kubenswrapper[4838]: I1128 09:58:59.642260 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:59Z","lastTransitionTime":"2025-11-28T09:58:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:59 crc kubenswrapper[4838]: I1128 09:58:59.745767 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:59 crc kubenswrapper[4838]: I1128 09:58:59.745801 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:59 crc kubenswrapper[4838]: I1128 09:58:59.745810 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:59 crc kubenswrapper[4838]: I1128 09:58:59.745825 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:59 crc kubenswrapper[4838]: I1128 09:58:59.745836 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:59Z","lastTransitionTime":"2025-11-28T09:58:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:59 crc kubenswrapper[4838]: I1128 09:58:59.848853 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:59 crc kubenswrapper[4838]: I1128 09:58:59.848887 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:59 crc kubenswrapper[4838]: I1128 09:58:59.848895 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:59 crc kubenswrapper[4838]: I1128 09:58:59.848908 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:59 crc kubenswrapper[4838]: I1128 09:58:59.848921 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:59Z","lastTransitionTime":"2025-11-28T09:58:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:58:59 crc kubenswrapper[4838]: I1128 09:58:59.951465 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:58:59 crc kubenswrapper[4838]: I1128 09:58:59.951504 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:58:59 crc kubenswrapper[4838]: I1128 09:58:59.951512 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:58:59 crc kubenswrapper[4838]: I1128 09:58:59.951527 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:58:59 crc kubenswrapper[4838]: I1128 09:58:59.951535 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:58:59Z","lastTransitionTime":"2025-11-28T09:58:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:59:00 crc kubenswrapper[4838]: I1128 09:59:00.054809 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:59:00 crc kubenswrapper[4838]: I1128 09:59:00.054838 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:59:00 crc kubenswrapper[4838]: I1128 09:59:00.054847 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:59:00 crc kubenswrapper[4838]: I1128 09:59:00.054859 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:59:00 crc kubenswrapper[4838]: I1128 09:59:00.054873 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:59:00Z","lastTransitionTime":"2025-11-28T09:59:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:59:00 crc kubenswrapper[4838]: I1128 09:59:00.157561 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:59:00 crc kubenswrapper[4838]: I1128 09:59:00.157625 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:59:00 crc kubenswrapper[4838]: I1128 09:59:00.157642 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:59:00 crc kubenswrapper[4838]: I1128 09:59:00.157666 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:59:00 crc kubenswrapper[4838]: I1128 09:59:00.157683 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:59:00Z","lastTransitionTime":"2025-11-28T09:59:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:59:00 crc kubenswrapper[4838]: I1128 09:59:00.260547 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:59:00 crc kubenswrapper[4838]: I1128 09:59:00.260623 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:59:00 crc kubenswrapper[4838]: I1128 09:59:00.260652 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:59:00 crc kubenswrapper[4838]: I1128 09:59:00.260685 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:59:00 crc kubenswrapper[4838]: I1128 09:59:00.260709 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:59:00Z","lastTransitionTime":"2025-11-28T09:59:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:59:00 crc kubenswrapper[4838]: I1128 09:59:00.363071 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:59:00 crc kubenswrapper[4838]: I1128 09:59:00.363121 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:59:00 crc kubenswrapper[4838]: I1128 09:59:00.363136 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:59:00 crc kubenswrapper[4838]: I1128 09:59:00.363158 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:59:00 crc kubenswrapper[4838]: I1128 09:59:00.363173 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:59:00Z","lastTransitionTime":"2025-11-28T09:59:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:59:00 crc kubenswrapper[4838]: I1128 09:59:00.465780 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:59:00 crc kubenswrapper[4838]: I1128 09:59:00.465818 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:59:00 crc kubenswrapper[4838]: I1128 09:59:00.465827 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:59:00 crc kubenswrapper[4838]: I1128 09:59:00.465841 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:59:00 crc kubenswrapper[4838]: I1128 09:59:00.465852 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:59:00Z","lastTransitionTime":"2025-11-28T09:59:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:59:00 crc kubenswrapper[4838]: I1128 09:59:00.561645 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-p69l6" Nov 28 09:59:00 crc kubenswrapper[4838]: E1128 09:59:00.561911 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-p69l6" podUID="2a223cc8-af33-4e83-8bfc-2676c5700447" Nov 28 09:59:00 crc kubenswrapper[4838]: I1128 09:59:00.567780 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:59:00 crc kubenswrapper[4838]: I1128 09:59:00.567838 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:59:00 crc kubenswrapper[4838]: I1128 09:59:00.567851 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:59:00 crc kubenswrapper[4838]: I1128 09:59:00.567870 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:59:00 crc kubenswrapper[4838]: I1128 09:59:00.567882 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:59:00Z","lastTransitionTime":"2025-11-28T09:59:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:59:00 crc kubenswrapper[4838]: I1128 09:59:00.671015 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:59:00 crc kubenswrapper[4838]: I1128 09:59:00.671084 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:59:00 crc kubenswrapper[4838]: I1128 09:59:00.671100 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:59:00 crc kubenswrapper[4838]: I1128 09:59:00.671122 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:59:00 crc kubenswrapper[4838]: I1128 09:59:00.671137 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:59:00Z","lastTransitionTime":"2025-11-28T09:59:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:59:00 crc kubenswrapper[4838]: I1128 09:59:00.775343 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:59:00 crc kubenswrapper[4838]: I1128 09:59:00.775409 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:59:00 crc kubenswrapper[4838]: I1128 09:59:00.775420 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:59:00 crc kubenswrapper[4838]: I1128 09:59:00.775441 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:59:00 crc kubenswrapper[4838]: I1128 09:59:00.775455 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:59:00Z","lastTransitionTime":"2025-11-28T09:59:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:59:00 crc kubenswrapper[4838]: I1128 09:59:00.879547 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:59:00 crc kubenswrapper[4838]: I1128 09:59:00.879614 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:59:00 crc kubenswrapper[4838]: I1128 09:59:00.879625 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:59:00 crc kubenswrapper[4838]: I1128 09:59:00.879654 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:59:00 crc kubenswrapper[4838]: I1128 09:59:00.879667 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:59:00Z","lastTransitionTime":"2025-11-28T09:59:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:59:00 crc kubenswrapper[4838]: I1128 09:59:00.982696 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:59:00 crc kubenswrapper[4838]: I1128 09:59:00.982790 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:59:00 crc kubenswrapper[4838]: I1128 09:59:00.982811 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:59:00 crc kubenswrapper[4838]: I1128 09:59:00.982842 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:59:00 crc kubenswrapper[4838]: I1128 09:59:00.982862 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:59:00Z","lastTransitionTime":"2025-11-28T09:59:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:59:01 crc kubenswrapper[4838]: I1128 09:59:01.085884 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:59:01 crc kubenswrapper[4838]: I1128 09:59:01.085936 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:59:01 crc kubenswrapper[4838]: I1128 09:59:01.085948 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:59:01 crc kubenswrapper[4838]: I1128 09:59:01.085969 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:59:01 crc kubenswrapper[4838]: I1128 09:59:01.085981 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:59:01Z","lastTransitionTime":"2025-11-28T09:59:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:59:01 crc kubenswrapper[4838]: I1128 09:59:01.189376 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:59:01 crc kubenswrapper[4838]: I1128 09:59:01.189460 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:59:01 crc kubenswrapper[4838]: I1128 09:59:01.189473 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:59:01 crc kubenswrapper[4838]: I1128 09:59:01.189495 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:59:01 crc kubenswrapper[4838]: I1128 09:59:01.189506 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:59:01Z","lastTransitionTime":"2025-11-28T09:59:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:59:01 crc kubenswrapper[4838]: I1128 09:59:01.293255 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:59:01 crc kubenswrapper[4838]: I1128 09:59:01.293312 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:59:01 crc kubenswrapper[4838]: I1128 09:59:01.293322 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:59:01 crc kubenswrapper[4838]: I1128 09:59:01.293344 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:59:01 crc kubenswrapper[4838]: I1128 09:59:01.293357 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:59:01Z","lastTransitionTime":"2025-11-28T09:59:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:59:01 crc kubenswrapper[4838]: I1128 09:59:01.397410 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:59:01 crc kubenswrapper[4838]: I1128 09:59:01.397491 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:59:01 crc kubenswrapper[4838]: I1128 09:59:01.397508 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:59:01 crc kubenswrapper[4838]: I1128 09:59:01.397543 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:59:01 crc kubenswrapper[4838]: I1128 09:59:01.397563 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:59:01Z","lastTransitionTime":"2025-11-28T09:59:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:59:01 crc kubenswrapper[4838]: I1128 09:59:01.501530 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:59:01 crc kubenswrapper[4838]: I1128 09:59:01.501595 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:59:01 crc kubenswrapper[4838]: I1128 09:59:01.501609 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:59:01 crc kubenswrapper[4838]: I1128 09:59:01.501645 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:59:01 crc kubenswrapper[4838]: I1128 09:59:01.501662 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:59:01Z","lastTransitionTime":"2025-11-28T09:59:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:59:01 crc kubenswrapper[4838]: I1128 09:59:01.561790 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 09:59:01 crc kubenswrapper[4838]: I1128 09:59:01.561831 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 09:59:01 crc kubenswrapper[4838]: E1128 09:59:01.561928 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 09:59:01 crc kubenswrapper[4838]: I1128 09:59:01.561986 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 09:59:01 crc kubenswrapper[4838]: E1128 09:59:01.562057 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 09:59:01 crc kubenswrapper[4838]: E1128 09:59:01.562204 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 09:59:01 crc kubenswrapper[4838]: I1128 09:59:01.604915 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:59:01 crc kubenswrapper[4838]: I1128 09:59:01.605007 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:59:01 crc kubenswrapper[4838]: I1128 09:59:01.605026 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:59:01 crc kubenswrapper[4838]: I1128 09:59:01.605049 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:59:01 crc kubenswrapper[4838]: I1128 09:59:01.605066 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:59:01Z","lastTransitionTime":"2025-11-28T09:59:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:59:01 crc kubenswrapper[4838]: I1128 09:59:01.707703 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:59:01 crc kubenswrapper[4838]: I1128 09:59:01.707799 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:59:01 crc kubenswrapper[4838]: I1128 09:59:01.707817 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:59:01 crc kubenswrapper[4838]: I1128 09:59:01.707838 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:59:01 crc kubenswrapper[4838]: I1128 09:59:01.707853 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:59:01Z","lastTransitionTime":"2025-11-28T09:59:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:59:01 crc kubenswrapper[4838]: I1128 09:59:01.810920 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:59:01 crc kubenswrapper[4838]: I1128 09:59:01.811003 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:59:01 crc kubenswrapper[4838]: I1128 09:59:01.811027 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:59:01 crc kubenswrapper[4838]: I1128 09:59:01.811059 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:59:01 crc kubenswrapper[4838]: I1128 09:59:01.811081 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:59:01Z","lastTransitionTime":"2025-11-28T09:59:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:59:01 crc kubenswrapper[4838]: I1128 09:59:01.913190 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:59:01 crc kubenswrapper[4838]: I1128 09:59:01.913229 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:59:01 crc kubenswrapper[4838]: I1128 09:59:01.913239 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:59:01 crc kubenswrapper[4838]: I1128 09:59:01.913250 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:59:01 crc kubenswrapper[4838]: I1128 09:59:01.913259 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:59:01Z","lastTransitionTime":"2025-11-28T09:59:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:59:02 crc kubenswrapper[4838]: I1128 09:59:02.016277 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:59:02 crc kubenswrapper[4838]: I1128 09:59:02.016348 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:59:02 crc kubenswrapper[4838]: I1128 09:59:02.016370 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:59:02 crc kubenswrapper[4838]: I1128 09:59:02.016401 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:59:02 crc kubenswrapper[4838]: I1128 09:59:02.016426 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:59:02Z","lastTransitionTime":"2025-11-28T09:59:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:59:02 crc kubenswrapper[4838]: I1128 09:59:02.120059 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:59:02 crc kubenswrapper[4838]: I1128 09:59:02.120144 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:59:02 crc kubenswrapper[4838]: I1128 09:59:02.120169 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:59:02 crc kubenswrapper[4838]: I1128 09:59:02.120203 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:59:02 crc kubenswrapper[4838]: I1128 09:59:02.120228 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:59:02Z","lastTransitionTime":"2025-11-28T09:59:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:59:02 crc kubenswrapper[4838]: I1128 09:59:02.223557 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:59:02 crc kubenswrapper[4838]: I1128 09:59:02.223633 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:59:02 crc kubenswrapper[4838]: I1128 09:59:02.223655 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:59:02 crc kubenswrapper[4838]: I1128 09:59:02.223680 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:59:02 crc kubenswrapper[4838]: I1128 09:59:02.223698 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:59:02Z","lastTransitionTime":"2025-11-28T09:59:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:59:02 crc kubenswrapper[4838]: I1128 09:59:02.327104 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:59:02 crc kubenswrapper[4838]: I1128 09:59:02.327190 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:59:02 crc kubenswrapper[4838]: I1128 09:59:02.327209 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:59:02 crc kubenswrapper[4838]: I1128 09:59:02.327236 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:59:02 crc kubenswrapper[4838]: I1128 09:59:02.327285 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:59:02Z","lastTransitionTime":"2025-11-28T09:59:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:59:02 crc kubenswrapper[4838]: I1128 09:59:02.430857 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:59:02 crc kubenswrapper[4838]: I1128 09:59:02.430976 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:59:02 crc kubenswrapper[4838]: I1128 09:59:02.430995 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:59:02 crc kubenswrapper[4838]: I1128 09:59:02.431020 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:59:02 crc kubenswrapper[4838]: I1128 09:59:02.431038 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:59:02Z","lastTransitionTime":"2025-11-28T09:59:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:59:02 crc kubenswrapper[4838]: I1128 09:59:02.534438 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:59:02 crc kubenswrapper[4838]: I1128 09:59:02.534507 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:59:02 crc kubenswrapper[4838]: I1128 09:59:02.534526 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:59:02 crc kubenswrapper[4838]: I1128 09:59:02.534552 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:59:02 crc kubenswrapper[4838]: I1128 09:59:02.534568 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:59:02Z","lastTransitionTime":"2025-11-28T09:59:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:59:02 crc kubenswrapper[4838]: I1128 09:59:02.561420 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-p69l6" Nov 28 09:59:02 crc kubenswrapper[4838]: E1128 09:59:02.561694 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-p69l6" podUID="2a223cc8-af33-4e83-8bfc-2676c5700447" Nov 28 09:59:02 crc kubenswrapper[4838]: I1128 09:59:02.563119 4838 scope.go:117] "RemoveContainer" containerID="973017e2b3a339f98d3439e256810a83b425061d827e74edd127aee640a9c0eb" Nov 28 09:59:02 crc kubenswrapper[4838]: I1128 09:59:02.638190 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:59:02 crc kubenswrapper[4838]: I1128 09:59:02.638248 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:59:02 crc kubenswrapper[4838]: I1128 09:59:02.638269 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:59:02 crc kubenswrapper[4838]: I1128 09:59:02.638297 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:59:02 crc kubenswrapper[4838]: I1128 09:59:02.638313 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:59:02Z","lastTransitionTime":"2025-11-28T09:59:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:59:02 crc kubenswrapper[4838]: I1128 09:59:02.740664 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:59:02 crc kubenswrapper[4838]: I1128 09:59:02.740753 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:59:02 crc kubenswrapper[4838]: I1128 09:59:02.740768 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:59:02 crc kubenswrapper[4838]: I1128 09:59:02.740847 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:59:02 crc kubenswrapper[4838]: I1128 09:59:02.740865 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:59:02Z","lastTransitionTime":"2025-11-28T09:59:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:59:02 crc kubenswrapper[4838]: I1128 09:59:02.844493 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:59:02 crc kubenswrapper[4838]: I1128 09:59:02.844557 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:59:02 crc kubenswrapper[4838]: I1128 09:59:02.844574 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:59:02 crc kubenswrapper[4838]: I1128 09:59:02.844600 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:59:02 crc kubenswrapper[4838]: I1128 09:59:02.844620 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:59:02Z","lastTransitionTime":"2025-11-28T09:59:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:59:02 crc kubenswrapper[4838]: I1128 09:59:02.947898 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:59:02 crc kubenswrapper[4838]: I1128 09:59:02.947961 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:59:02 crc kubenswrapper[4838]: I1128 09:59:02.947983 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:59:02 crc kubenswrapper[4838]: I1128 09:59:02.948013 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:59:02 crc kubenswrapper[4838]: I1128 09:59:02.948040 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:59:02Z","lastTransitionTime":"2025-11-28T09:59:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:59:03 crc kubenswrapper[4838]: I1128 09:59:03.050516 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:59:03 crc kubenswrapper[4838]: I1128 09:59:03.050552 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:59:03 crc kubenswrapper[4838]: I1128 09:59:03.050566 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:59:03 crc kubenswrapper[4838]: I1128 09:59:03.050589 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:59:03 crc kubenswrapper[4838]: I1128 09:59:03.050605 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:59:03Z","lastTransitionTime":"2025-11-28T09:59:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:59:03 crc kubenswrapper[4838]: I1128 09:59:03.175273 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:59:03 crc kubenswrapper[4838]: I1128 09:59:03.175323 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:59:03 crc kubenswrapper[4838]: I1128 09:59:03.175334 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:59:03 crc kubenswrapper[4838]: I1128 09:59:03.175350 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:59:03 crc kubenswrapper[4838]: I1128 09:59:03.175362 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:59:03Z","lastTransitionTime":"2025-11-28T09:59:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:59:03 crc kubenswrapper[4838]: I1128 09:59:03.278557 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:59:03 crc kubenswrapper[4838]: I1128 09:59:03.278878 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:59:03 crc kubenswrapper[4838]: I1128 09:59:03.278946 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:59:03 crc kubenswrapper[4838]: I1128 09:59:03.279019 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:59:03 crc kubenswrapper[4838]: I1128 09:59:03.279091 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:59:03Z","lastTransitionTime":"2025-11-28T09:59:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:59:03 crc kubenswrapper[4838]: I1128 09:59:03.387308 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:59:03 crc kubenswrapper[4838]: I1128 09:59:03.387355 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:59:03 crc kubenswrapper[4838]: I1128 09:59:03.387373 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:59:03 crc kubenswrapper[4838]: I1128 09:59:03.387394 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:59:03 crc kubenswrapper[4838]: I1128 09:59:03.387410 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:59:03Z","lastTransitionTime":"2025-11-28T09:59:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:59:03 crc kubenswrapper[4838]: I1128 09:59:03.489799 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:59:03 crc kubenswrapper[4838]: I1128 09:59:03.489829 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:59:03 crc kubenswrapper[4838]: I1128 09:59:03.489837 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:59:03 crc kubenswrapper[4838]: I1128 09:59:03.489850 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:59:03 crc kubenswrapper[4838]: I1128 09:59:03.489859 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:59:03Z","lastTransitionTime":"2025-11-28T09:59:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:59:03 crc kubenswrapper[4838]: I1128 09:59:03.561763 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 09:59:03 crc kubenswrapper[4838]: I1128 09:59:03.561773 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 09:59:03 crc kubenswrapper[4838]: I1128 09:59:03.561902 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 09:59:03 crc kubenswrapper[4838]: E1128 09:59:03.562643 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 09:59:03 crc kubenswrapper[4838]: E1128 09:59:03.562406 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 09:59:03 crc kubenswrapper[4838]: E1128 09:59:03.562805 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 09:59:03 crc kubenswrapper[4838]: I1128 09:59:03.592813 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:59:03 crc kubenswrapper[4838]: I1128 09:59:03.592862 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:59:03 crc kubenswrapper[4838]: I1128 09:59:03.592875 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:59:03 crc kubenswrapper[4838]: I1128 09:59:03.592895 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:59:03 crc kubenswrapper[4838]: I1128 09:59:03.592908 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:59:03Z","lastTransitionTime":"2025-11-28T09:59:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:59:03 crc kubenswrapper[4838]: I1128 09:59:03.695484 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:59:03 crc kubenswrapper[4838]: I1128 09:59:03.695560 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:59:03 crc kubenswrapper[4838]: I1128 09:59:03.695611 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:59:03 crc kubenswrapper[4838]: I1128 09:59:03.695646 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:59:03 crc kubenswrapper[4838]: I1128 09:59:03.695665 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:59:03Z","lastTransitionTime":"2025-11-28T09:59:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:59:03 crc kubenswrapper[4838]: I1128 09:59:03.798850 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:59:03 crc kubenswrapper[4838]: I1128 09:59:03.798904 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:59:03 crc kubenswrapper[4838]: I1128 09:59:03.798916 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:59:03 crc kubenswrapper[4838]: I1128 09:59:03.798934 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:59:03 crc kubenswrapper[4838]: I1128 09:59:03.798948 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:59:03Z","lastTransitionTime":"2025-11-28T09:59:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:59:03 crc kubenswrapper[4838]: I1128 09:59:03.901677 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:59:03 crc kubenswrapper[4838]: I1128 09:59:03.901772 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:59:03 crc kubenswrapper[4838]: I1128 09:59:03.901796 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:59:03 crc kubenswrapper[4838]: I1128 09:59:03.901826 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:59:03 crc kubenswrapper[4838]: I1128 09:59:03.901849 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:59:03Z","lastTransitionTime":"2025-11-28T09:59:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:59:04 crc kubenswrapper[4838]: I1128 09:59:04.003644 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:59:04 crc kubenswrapper[4838]: I1128 09:59:04.003681 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:59:04 crc kubenswrapper[4838]: I1128 09:59:04.003690 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:59:04 crc kubenswrapper[4838]: I1128 09:59:04.003703 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:59:04 crc kubenswrapper[4838]: I1128 09:59:04.003747 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:59:04Z","lastTransitionTime":"2025-11-28T09:59:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:59:04 crc kubenswrapper[4838]: I1128 09:59:04.107885 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:59:04 crc kubenswrapper[4838]: I1128 09:59:04.107938 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:59:04 crc kubenswrapper[4838]: I1128 09:59:04.107951 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:59:04 crc kubenswrapper[4838]: I1128 09:59:04.107968 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:59:04 crc kubenswrapper[4838]: I1128 09:59:04.107982 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:59:04Z","lastTransitionTime":"2025-11-28T09:59:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:59:04 crc kubenswrapper[4838]: I1128 09:59:04.211771 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:59:04 crc kubenswrapper[4838]: I1128 09:59:04.211804 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:59:04 crc kubenswrapper[4838]: I1128 09:59:04.211815 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:59:04 crc kubenswrapper[4838]: I1128 09:59:04.211834 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:59:04 crc kubenswrapper[4838]: I1128 09:59:04.211846 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:59:04Z","lastTransitionTime":"2025-11-28T09:59:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:59:04 crc kubenswrapper[4838]: I1128 09:59:04.314950 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:59:04 crc kubenswrapper[4838]: I1128 09:59:04.315008 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:59:04 crc kubenswrapper[4838]: I1128 09:59:04.315024 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:59:04 crc kubenswrapper[4838]: I1128 09:59:04.315048 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:59:04 crc kubenswrapper[4838]: I1128 09:59:04.315065 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:59:04Z","lastTransitionTime":"2025-11-28T09:59:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:59:04 crc kubenswrapper[4838]: I1128 09:59:04.366212 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-gmhsj_41b01f7d-5c75-49de-86f7-87e04bf71194/ovnkube-controller/2.log" Nov 28 09:59:04 crc kubenswrapper[4838]: I1128 09:59:04.369018 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gmhsj" event={"ID":"41b01f7d-5c75-49de-86f7-87e04bf71194","Type":"ContainerStarted","Data":"71617073a4d76318049cb634cb4ee2135cd2c25bc3d3d6285eb69baf1fae9679"} Nov 28 09:59:04 crc kubenswrapper[4838]: I1128 09:59:04.417464 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:59:04 crc kubenswrapper[4838]: I1128 09:59:04.417495 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:59:04 crc kubenswrapper[4838]: I1128 09:59:04.417503 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:59:04 crc kubenswrapper[4838]: I1128 09:59:04.417516 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:59:04 crc kubenswrapper[4838]: I1128 09:59:04.417524 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:59:04Z","lastTransitionTime":"2025-11-28T09:59:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:59:04 crc kubenswrapper[4838]: I1128 09:59:04.520891 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:59:04 crc kubenswrapper[4838]: I1128 09:59:04.520963 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:59:04 crc kubenswrapper[4838]: I1128 09:59:04.520976 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:59:04 crc kubenswrapper[4838]: I1128 09:59:04.520999 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:59:04 crc kubenswrapper[4838]: I1128 09:59:04.521012 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:59:04Z","lastTransitionTime":"2025-11-28T09:59:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:59:04 crc kubenswrapper[4838]: I1128 09:59:04.562017 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-p69l6" Nov 28 09:59:04 crc kubenswrapper[4838]: E1128 09:59:04.562181 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-p69l6" podUID="2a223cc8-af33-4e83-8bfc-2676c5700447" Nov 28 09:59:04 crc kubenswrapper[4838]: I1128 09:59:04.623529 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:59:04 crc kubenswrapper[4838]: I1128 09:59:04.623593 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:59:04 crc kubenswrapper[4838]: I1128 09:59:04.623610 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:59:04 crc kubenswrapper[4838]: I1128 09:59:04.623641 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:59:04 crc kubenswrapper[4838]: I1128 09:59:04.623660 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:59:04Z","lastTransitionTime":"2025-11-28T09:59:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:59:04 crc kubenswrapper[4838]: I1128 09:59:04.726371 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:59:04 crc kubenswrapper[4838]: I1128 09:59:04.726427 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:59:04 crc kubenswrapper[4838]: I1128 09:59:04.726440 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:59:04 crc kubenswrapper[4838]: I1128 09:59:04.726459 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:59:04 crc kubenswrapper[4838]: I1128 09:59:04.726471 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:59:04Z","lastTransitionTime":"2025-11-28T09:59:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:59:04 crc kubenswrapper[4838]: I1128 09:59:04.838981 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:59:04 crc kubenswrapper[4838]: I1128 09:59:04.839182 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:59:04 crc kubenswrapper[4838]: I1128 09:59:04.839193 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:59:04 crc kubenswrapper[4838]: I1128 09:59:04.839210 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:59:04 crc kubenswrapper[4838]: I1128 09:59:04.839221 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:59:04Z","lastTransitionTime":"2025-11-28T09:59:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:59:04 crc kubenswrapper[4838]: I1128 09:59:04.943781 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:59:04 crc kubenswrapper[4838]: I1128 09:59:04.943850 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:59:04 crc kubenswrapper[4838]: I1128 09:59:04.943867 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:59:04 crc kubenswrapper[4838]: I1128 09:59:04.943891 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:59:04 crc kubenswrapper[4838]: I1128 09:59:04.943912 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:59:04Z","lastTransitionTime":"2025-11-28T09:59:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:59:05 crc kubenswrapper[4838]: I1128 09:59:05.047656 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:59:05 crc kubenswrapper[4838]: I1128 09:59:05.047737 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:59:05 crc kubenswrapper[4838]: I1128 09:59:05.047756 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:59:05 crc kubenswrapper[4838]: I1128 09:59:05.047778 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:59:05 crc kubenswrapper[4838]: I1128 09:59:05.047795 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:59:05Z","lastTransitionTime":"2025-11-28T09:59:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:59:05 crc kubenswrapper[4838]: I1128 09:59:05.151295 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:59:05 crc kubenswrapper[4838]: I1128 09:59:05.151355 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:59:05 crc kubenswrapper[4838]: I1128 09:59:05.151372 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:59:05 crc kubenswrapper[4838]: I1128 09:59:05.151399 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:59:05 crc kubenswrapper[4838]: I1128 09:59:05.151418 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:59:05Z","lastTransitionTime":"2025-11-28T09:59:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:59:05 crc kubenswrapper[4838]: I1128 09:59:05.254976 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:59:05 crc kubenswrapper[4838]: I1128 09:59:05.255088 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:59:05 crc kubenswrapper[4838]: I1128 09:59:05.255107 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:59:05 crc kubenswrapper[4838]: I1128 09:59:05.255131 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:59:05 crc kubenswrapper[4838]: I1128 09:59:05.255150 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:59:05Z","lastTransitionTime":"2025-11-28T09:59:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:59:05 crc kubenswrapper[4838]: I1128 09:59:05.358202 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:59:05 crc kubenswrapper[4838]: I1128 09:59:05.358269 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:59:05 crc kubenswrapper[4838]: I1128 09:59:05.358285 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:59:05 crc kubenswrapper[4838]: I1128 09:59:05.358309 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:59:05 crc kubenswrapper[4838]: I1128 09:59:05.358326 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:59:05Z","lastTransitionTime":"2025-11-28T09:59:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:59:05 crc kubenswrapper[4838]: I1128 09:59:05.373636 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-gmhsj" Nov 28 09:59:05 crc kubenswrapper[4838]: I1128 09:59:05.397295 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://05cb23094534a9fdebbe33d8d34a79412ee49437c25e185c6dfa99384cbf9629\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:59:05Z is after 2025-08-24T17:21:41Z" Nov 28 09:59:05 crc kubenswrapper[4838]: I1128 09:59:05.417611 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3968fb11df6b2265691177838400dcb08e03d330c166dd880b3acfac7ec7938f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e91940e5398321649eac2960a545bb4bbe047113c81f152aa55894cabee55006\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:59:05Z is after 2025-08-24T17:21:41Z" Nov 28 09:59:05 crc kubenswrapper[4838]: I1128 09:59:05.437427 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-58mh7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f556bd7-3b15-4d7d-b8e2-4815bb5c9c7d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://658172db64e44d752eb06fe2788dda717db3fc2e672b073a2bdf159a16fd901f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0a0fbfb7a81acf63b4deabef68d55dd843092bce1c00c27c127955417bede44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f0a0fbfb7a81acf63b4deabef68d55dd843092bce1c00c27c127955417bede44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dea8cf8adbb0c946731fdc71c2b0d80ace50b919cf9c776eb70cb6ff36529401\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dea8cf8adbb0c946731fdc71c2b0d80ace50b919cf9c776eb70cb6ff36529401\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f717c0691a541e3a8310cd520a96e92144902511c74835a5ef8ba9536cd65657\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f717c0691a541e3a8310cd520a96e92144902511c74835a5ef8ba9536cd65657\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b8b67d281ed50a87ac086c5ce0bcc082c30a41d80074b638340326c334fa4f0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b8b67d281ed50a87ac086c5ce0bcc082c30a41d80074b638340326c334fa4f0c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4ac7e0a6a8340f45399bb4b576bbcd90d4eee54c571e6d64846c521486a2607\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f4ac7e0a6a8340f45399bb4b576bbcd90d4eee54c571e6d64846c521486a2607\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e3e1a16aa714e8c2b82811d71b1845942712cad84d7dd465c02a3c4478419af\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0e3e1a16aa714e8c2b82811d71b1845942712cad84d7dd465c02a3c4478419af\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-58mh7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:59:05Z is after 2025-08-24T17:21:41Z" Nov 28 09:59:05 crc kubenswrapper[4838]: I1128 09:59:05.451752 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-sft2b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"200cdb91-cc86-40be-a5b6-30f7b9beba6d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c4f94520c534ecc5a5a92d18b6047c6df131fa7d2a9b8712c021a74c3ffc18ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lpt6k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-sft2b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:59:05Z is after 2025-08-24T17:21:41Z" Nov 28 09:59:05 crc kubenswrapper[4838]: I1128 09:59:05.461850 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:59:05 crc kubenswrapper[4838]: I1128 09:59:05.461907 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:59:05 crc kubenswrapper[4838]: I1128 09:59:05.461919 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:59:05 crc kubenswrapper[4838]: I1128 09:59:05.461941 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:59:05 crc kubenswrapper[4838]: I1128 09:59:05.461957 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:59:05Z","lastTransitionTime":"2025-11-28T09:59:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:59:05 crc kubenswrapper[4838]: I1128 09:59:05.467019 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-p69l6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2a223cc8-af33-4e83-8bfc-2676c5700447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h25xx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h25xx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:58:05Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-p69l6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:59:05Z is after 2025-08-24T17:21:41Z" Nov 28 09:59:05 crc kubenswrapper[4838]: I1128 09:59:05.477625 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"de645de9-a5cd-4075-8bfd-402a619ea73f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://17e965aad7643d62c651c1e652be45bd914cfe3f14a0a6f43e4e4376b4cb7be0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://40b6222c83e4141c5d286efddc1b65ef732c5cadda9cebdd8d9ee114bf2eb533\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://40b6222c83e4141c5d286efddc1b65ef732c5cadda9cebdd8d9ee114bf2eb533\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:08Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:59:05Z is after 2025-08-24T17:21:41Z" Nov 28 09:59:05 crc kubenswrapper[4838]: I1128 09:59:05.490805 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:59:05Z is after 2025-08-24T17:21:41Z" Nov 28 09:59:05 crc kubenswrapper[4838]: I1128 09:59:05.505975 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4rv9b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"051f7e1c-2d47-4be9-bbd5-14feec16eb16\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://262d73384f8aa0c5e8405e70d091fbc0003217ee2c08a4776048649b9a6eda59\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f641ae0677a50fbc7b20fac7bb5c567f031af1b6a06fbe9e965091efa4ee4ec3\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T09:58:51Z\\\",\\\"message\\\":\\\"2025-11-28T09:58:06+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_42560742-ad80-4917-aa9d-aab5fda94562\\\\n2025-11-28T09:58:06+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_42560742-ad80-4917-aa9d-aab5fda94562 to /host/opt/cni/bin/\\\\n2025-11-28T09:58:06Z [verbose] multus-daemon started\\\\n2025-11-28T09:58:06Z [verbose] Readiness Indicator file check\\\\n2025-11-28T09:58:51Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:05Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-szqtp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4rv9b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:59:05Z is after 2025-08-24T17:21:41Z" Nov 28 09:59:05 crc kubenswrapper[4838]: I1128 09:59:05.517183 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-tnclp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ebace5c6-6ca4-48ff-9c50-c6b769d599b5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://572204db3ac1da6b232430bf06fb87e94638e151ea0edc2f8b111deb7d82c58c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8pxnw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://efa96991cdc29f4f075bfd7751f620c7b091510ea68d577b243811ddd4140cec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8pxnw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:58:03Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-tnclp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:59:05Z is after 2025-08-24T17:21:41Z" Nov 28 09:59:05 crc kubenswrapper[4838]: I1128 09:59:05.533295 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42be35de-5c72-4cc2-a5e4-fb7872425cdf\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5a39765f9493a3a9454db77d07855624ff5645e9dbd898e6dcb880d7a01a8c42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://426741a23e7b1b9fae6769b581c0d44694f957b0da985476923801395fad082f\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T09:57:46Z\\\",\\\"message\\\":\\\"+ timeout 3m /bin/bash -exuo pipefail -c 'while [ -n \\\\\\\"$(ss -Htanop \\\\\\\\( sport = 10357 \\\\\\\\))\\\\\\\" ]; do sleep 1; done'\\\\n++ ss -Htanop '(' sport = 10357 ')'\\\\n+ '[' -n '' ']'\\\\n+ exec cluster-policy-controller start --config=/etc/kubernetes/static-pod-resources/configmaps/cluster-policy-controller-config/config.yaml --kubeconfig=/etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig --namespace=openshift-kube-controller-manager -v=2\\\\nI1128 09:57:12.554377 1 leaderelection.go:121] The leader election gives 4 retries and allows for 30s of clock skew. The kube-apiserver downtime tolerance is 78s. Worst non-graceful lease acquisition is 2m43s. Worst graceful lease acquisition is {26s}.\\\\nI1128 09:57:12.555793 1 observer_polling.go:159] Starting file observer\\\\nI1128 09:57:12.567187 1 builder.go:298] cluster-policy-controller version 4.18.0-202501230001.p0.g5fd8525.assembly.stream.el9-5fd8525-5fd852525909ce6eab52972ba9ce8fcf56528eb9\\\\nI1128 09:57:12.568976 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.crt::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.key\\\\\\\"\\\\nI1128 09:57:45.000857 1 cmd.go:138] Received SIGTERM or SIGINT signal, shutting down controller.\\\\nF1128 09:57:46.203931 1 cmd.go:179] failed checking apiserver connectivity: client rate limiter Wait returned an error: context deadline exceeded\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:10Z\\\"}},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef54c8995d6bb8e077c6d1c3d796b6f2ce88370b6cadc4f040f590760103320b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fab4ce68cf732b3e6b32f68e84805013d646a9cbd6d5c55ea3d2f41a7f9db83d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2da93de71b5896e3c9ba192df2896b946b1376fefe2a87cf21adb87ea665be04\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:08Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:59:05Z is after 2025-08-24T17:21:41Z" Nov 28 09:59:05 crc kubenswrapper[4838]: I1128 09:59:05.552509 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1630b1c6-63b5-4481-a711-0485765d37e3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3a04db28528da269759635186b06952f9a3dc4c2b130458354a5bf9ef994db8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://065f0c39a0f1fbdb83a6a758ddd20a4af3ddf96297ce35551b570f5e6c9deb89\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T09:57:35Z\\\",\\\"message\\\":\\\"rpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"192.168.126.11:2379\\\\\\\", ServerName: \\\\\\\"192.168.126.11:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp 192.168.126.11:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:30.193159 13 logging.go:55] [core] [Channel #1 SubChannel #5]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"192.168.126.11:2379\\\\\\\", ServerName: \\\\\\\"192.168.126.11:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp 192.168.126.11:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:30.410371 13 logging.go:55] [core] [Channel #7 SubChannel #8]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"192.168.126.11:2379\\\\\\\", ServerName: \\\\\\\"192.168.126.11:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp 192.168.126.11:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:30.766067 13 logging.go:55] [core] [Channel #7 SubChannel #9]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"localhost:2379\\\\\\\", ServerName: \\\\\\\"localhost:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp [::1]:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:30.792318 13 logging.go:55] [core] [Channel #2 SubChannel #4]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"localhost:2379\\\\\\\", ServerName: \\\\\\\"localhost:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp [::1]:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:34.548830 13 logging.go:55] [core] [Channel #1 SubChannel #6]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"localhost:2379\\\\\\\", ServerName: \\\\\\\"localhost:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: authentication handshake failed: context canceled\\\\\\\"\\\\nE1128 09:57:34.559534 13 run.go:72] \\\\\\\"command failed\\\\\\\" err=\\\\\\\"context deadline exceeded\\\\\\\"\\\\nI1128 09:57:34.572762 1 main.go:235] Termination finished with exit code 1\\\\nI1128 09:57:34.572819 1 main.go:188] Deleting termination lock file \\\\\\\"/var/log/kube-apiserver/.terminating\\\\\\\"\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25d620ea6d7c38547e89d61e7a60f227d28b21f18d563055db47256b266d5b6b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://837cc7d33d601516b4ea56a283f71167b41da7c769070c97ea77f29e97cf1555\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a821e5b105f62cf7f3b12714bfb0f58867a808d1c777f2fb711895c345d8ee9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ad3223a9346861cf1b27af8c95207349f10af6f416380747e32c4faf1d3add4\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T09:57:48Z\\\",\\\"message\\\":\\\"le observer\\\\nW1128 09:57:46.202169 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1128 09:57:46.202478 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 09:57:46.203984 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2688063589/tls.crt::/tmp/serving-cert-2688063589/tls.key\\\\\\\"\\\\nI1128 09:57:46.517349 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 09:57:46.756626 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 09:57:46.756681 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 09:57:46.756806 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 09:57:46.756825 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 09:57:46.768610 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1128 09:57:46.768654 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1128 09:57:46.768660 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 09:57:46.768697 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 09:57:46.768707 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 09:57:46.768714 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 09:57:46.768746 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 09:57:46.768752 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1128 09:57:46.772242 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:40Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c690f38f717fbfbd681f21f5dc845b94601530b4fc0860fdbeb1317042c793b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:18Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b4d4af6fc5dd97d5b6104ef79b62fd241db5659dfdfd496a38536453b207a4e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b4d4af6fc5dd97d5b6104ef79b62fd241db5659dfdfd496a38536453b207a4e5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:08Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:59:05Z is after 2025-08-24T17:21:41Z" Nov 28 09:59:05 crc kubenswrapper[4838]: I1128 09:59:05.561386 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 09:59:05 crc kubenswrapper[4838]: I1128 09:59:05.561505 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 09:59:05 crc kubenswrapper[4838]: E1128 09:59:05.561579 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 09:59:05 crc kubenswrapper[4838]: E1128 09:59:05.561749 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 09:59:05 crc kubenswrapper[4838]: I1128 09:59:05.561891 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 09:59:05 crc kubenswrapper[4838]: E1128 09:59:05.561981 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 09:59:05 crc kubenswrapper[4838]: I1128 09:59:05.565359 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:59:05 crc kubenswrapper[4838]: I1128 09:59:05.565395 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:59:05 crc kubenswrapper[4838]: I1128 09:59:05.565406 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:59:05 crc kubenswrapper[4838]: I1128 09:59:05.565424 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:59:05 crc kubenswrapper[4838]: I1128 09:59:05.565435 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:59:05Z","lastTransitionTime":"2025-11-28T09:59:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:59:05 crc kubenswrapper[4838]: I1128 09:59:05.570439 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33d18a643d8d8a58731e53993b40c94bb51ff9c35242f768090dc737cac26503\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:59:05Z is after 2025-08-24T17:21:41Z" Nov 28 09:59:05 crc kubenswrapper[4838]: I1128 09:59:05.592235 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gmhsj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"41b01f7d-5c75-49de-86f7-87e04bf71194\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ccbb0df20c7e22308632a5a2d8837d77579134973f3888355be0fe46b4e59aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://87fcac052cf598fe1999137913f0b5ab8c999dad7d8fb512c3b079fd3b49bdaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b1f5c4da04a97ccc1851e901f9a13763e45ff8c7f6aab25bcbb934541ae4a776\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5781261b70d6722e8d28b91441c4241e1b48e6ff71ae3ec8973bde50180fc146\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3a5c0b96cef205139d500d85f27d2c9230ab23a84ffc78f2587a465a7ff25e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://669de13027827632f353811e082cce7cca326651a9bf4820e66504bb59067d72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://71617073a4d76318049cb634cb4ee2135cd2c25bc3d3d6285eb69baf1fae9679\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://973017e2b3a339f98d3439e256810a83b425061d827e74edd127aee640a9c0eb\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T09:58:38Z\\\",\\\"message\\\":\\\"l\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1128 09:58:37.612073 6708 services_controller.go:452] Built service openshift-console/downloads per-node LB for network=default: []services.LB{}\\\\nI1128 09:58:37.612094 6708 services_controller.go:452] Built service openshift-machine-api/machine-api-operator-machine-webhook per-node LB for network=default: []services.LB{}\\\\nI1128 09:58:37.612103 6708 services_controller.go:453] Built service openshift-console/downloads template LB for network=default: []services.LB{}\\\\nI1128 09:58:37.612134 6708 services_controller.go:453] Built service openshift-machine-api/machine-api-operator-machine-webhook template LB for network=default: []services.LB{}\\\\nI1128 09:58:37.612147 6708 services_controller.go:454] Service openshift-console/downloads for network=default has 1 cluster-wide, 0 per-node configs, 0 template configs, making 1 (cluster) 0 (per node) and 0 (template) load balancers\\\\nF1128 09:58:37.612098 6708 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node netwo\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:36Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:59:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1ae634151347de87041ca338bcea3a34ee0c2330a3c6630a3e342f62beba0ab8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9efead1ac3ae101f3a81e0c5568645b6ac107dc126ff57bc55745b86ffacb730\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9efead1ac3ae101f3a81e0c5568645b6ac107dc126ff57bc55745b86ffacb730\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-gmhsj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:59:05Z is after 2025-08-24T17:21:41Z" Nov 28 09:59:05 crc kubenswrapper[4838]: I1128 09:59:05.604791 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5c3daa53-8c4e-4e30-aeba-146602dd45cd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d582c5c17a21e943db9e4af274b6cb31d7551e545039aca20a767b35e2ca5040\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9dmrk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5374d5a62ca21176826339023247946593cc1a7bdb4fb39596f12bf598790697\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9dmrk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-5dxdd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:59:05Z is after 2025-08-24T17:21:41Z" Nov 28 09:59:05 crc kubenswrapper[4838]: I1128 09:59:05.620128 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"faf44892-fdd2-4b45-8772-20049c555d3b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8f8f5484d008289a9e34ceaffd3cf2582565e7265003b0a6a913fe424760fc65\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://496ac21c6d7e650f191b3bc29ab676bab6ba40727c5ac4d517833ef9a115ae07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://698aacf6e94caf2da7095c89716d63d935ff60d95cb91c9a39dfe9282cbba005\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8352058616fa4ed90ca907e547bad2201d9aa696330f1eb8434c3c3d54d71d89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8352058616fa4ed90ca907e547bad2201d9aa696330f1eb8434c3c3d54d71d89\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:09Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:08Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:59:05Z is after 2025-08-24T17:21:41Z" Nov 28 09:59:05 crc kubenswrapper[4838]: I1128 09:59:05.637804 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:59:05Z is after 2025-08-24T17:21:41Z" Nov 28 09:59:05 crc kubenswrapper[4838]: I1128 09:59:05.656262 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:59:05Z is after 2025-08-24T17:21:41Z" Nov 28 09:59:05 crc kubenswrapper[4838]: I1128 09:59:05.667797 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:59:05 crc kubenswrapper[4838]: I1128 09:59:05.667870 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:59:05 crc kubenswrapper[4838]: I1128 09:59:05.667879 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:59:05 crc kubenswrapper[4838]: I1128 09:59:05.667896 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:59:05 crc kubenswrapper[4838]: I1128 09:59:05.667908 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:59:05Z","lastTransitionTime":"2025-11-28T09:59:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:59:05 crc kubenswrapper[4838]: I1128 09:59:05.668493 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-tj8hl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cbb3c60a-bf9f-4a62-9310-30898e42be4f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e6fe3f1cbc53f02e2556c5fb44cffcf13330c3240a0ff10a8471080466416bd1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c2jkq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:54Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-tj8hl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:59:05Z is after 2025-08-24T17:21:41Z" Nov 28 09:59:05 crc kubenswrapper[4838]: I1128 09:59:05.770109 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:59:05 crc kubenswrapper[4838]: I1128 09:59:05.770144 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:59:05 crc kubenswrapper[4838]: I1128 09:59:05.770152 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:59:05 crc kubenswrapper[4838]: I1128 09:59:05.770166 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:59:05 crc kubenswrapper[4838]: I1128 09:59:05.770174 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:59:05Z","lastTransitionTime":"2025-11-28T09:59:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:59:05 crc kubenswrapper[4838]: I1128 09:59:05.872570 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:59:05 crc kubenswrapper[4838]: I1128 09:59:05.872605 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:59:05 crc kubenswrapper[4838]: I1128 09:59:05.872613 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:59:05 crc kubenswrapper[4838]: I1128 09:59:05.872626 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:59:05 crc kubenswrapper[4838]: I1128 09:59:05.872634 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:59:05Z","lastTransitionTime":"2025-11-28T09:59:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:59:05 crc kubenswrapper[4838]: I1128 09:59:05.975391 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:59:05 crc kubenswrapper[4838]: I1128 09:59:05.975439 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:59:05 crc kubenswrapper[4838]: I1128 09:59:05.975452 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:59:05 crc kubenswrapper[4838]: I1128 09:59:05.975471 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:59:05 crc kubenswrapper[4838]: I1128 09:59:05.975485 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:59:05Z","lastTransitionTime":"2025-11-28T09:59:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:59:06 crc kubenswrapper[4838]: I1128 09:59:06.078458 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:59:06 crc kubenswrapper[4838]: I1128 09:59:06.078532 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:59:06 crc kubenswrapper[4838]: I1128 09:59:06.078555 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:59:06 crc kubenswrapper[4838]: I1128 09:59:06.078586 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:59:06 crc kubenswrapper[4838]: I1128 09:59:06.078610 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:59:06Z","lastTransitionTime":"2025-11-28T09:59:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:59:06 crc kubenswrapper[4838]: I1128 09:59:06.181017 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:59:06 crc kubenswrapper[4838]: I1128 09:59:06.181059 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:59:06 crc kubenswrapper[4838]: I1128 09:59:06.181076 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:59:06 crc kubenswrapper[4838]: I1128 09:59:06.181097 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:59:06 crc kubenswrapper[4838]: I1128 09:59:06.181112 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:59:06Z","lastTransitionTime":"2025-11-28T09:59:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:59:06 crc kubenswrapper[4838]: I1128 09:59:06.284120 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:59:06 crc kubenswrapper[4838]: I1128 09:59:06.284212 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:59:06 crc kubenswrapper[4838]: I1128 09:59:06.284230 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:59:06 crc kubenswrapper[4838]: I1128 09:59:06.284253 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:59:06 crc kubenswrapper[4838]: I1128 09:59:06.284272 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:59:06Z","lastTransitionTime":"2025-11-28T09:59:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:59:06 crc kubenswrapper[4838]: I1128 09:59:06.387363 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:59:06 crc kubenswrapper[4838]: I1128 09:59:06.387449 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:59:06 crc kubenswrapper[4838]: I1128 09:59:06.387471 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:59:06 crc kubenswrapper[4838]: I1128 09:59:06.387495 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:59:06 crc kubenswrapper[4838]: I1128 09:59:06.387513 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:59:06Z","lastTransitionTime":"2025-11-28T09:59:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:59:06 crc kubenswrapper[4838]: I1128 09:59:06.490266 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:59:06 crc kubenswrapper[4838]: I1128 09:59:06.490320 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:59:06 crc kubenswrapper[4838]: I1128 09:59:06.490337 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:59:06 crc kubenswrapper[4838]: I1128 09:59:06.490360 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:59:06 crc kubenswrapper[4838]: I1128 09:59:06.490377 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:59:06Z","lastTransitionTime":"2025-11-28T09:59:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:59:06 crc kubenswrapper[4838]: I1128 09:59:06.561111 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-p69l6" Nov 28 09:59:06 crc kubenswrapper[4838]: E1128 09:59:06.561341 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-p69l6" podUID="2a223cc8-af33-4e83-8bfc-2676c5700447" Nov 28 09:59:06 crc kubenswrapper[4838]: I1128 09:59:06.592879 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:59:06 crc kubenswrapper[4838]: I1128 09:59:06.592939 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:59:06 crc kubenswrapper[4838]: I1128 09:59:06.592953 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:59:06 crc kubenswrapper[4838]: I1128 09:59:06.592972 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:59:06 crc kubenswrapper[4838]: I1128 09:59:06.592986 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:59:06Z","lastTransitionTime":"2025-11-28T09:59:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:59:06 crc kubenswrapper[4838]: I1128 09:59:06.696012 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:59:06 crc kubenswrapper[4838]: I1128 09:59:06.696061 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:59:06 crc kubenswrapper[4838]: I1128 09:59:06.696075 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:59:06 crc kubenswrapper[4838]: I1128 09:59:06.696094 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:59:06 crc kubenswrapper[4838]: I1128 09:59:06.696124 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:59:06Z","lastTransitionTime":"2025-11-28T09:59:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:59:06 crc kubenswrapper[4838]: I1128 09:59:06.799186 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:59:06 crc kubenswrapper[4838]: I1128 09:59:06.799268 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:59:06 crc kubenswrapper[4838]: I1128 09:59:06.799287 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:59:06 crc kubenswrapper[4838]: I1128 09:59:06.799338 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:59:06 crc kubenswrapper[4838]: I1128 09:59:06.799367 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:59:06Z","lastTransitionTime":"2025-11-28T09:59:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:59:06 crc kubenswrapper[4838]: I1128 09:59:06.902392 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:59:06 crc kubenswrapper[4838]: I1128 09:59:06.902453 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:59:06 crc kubenswrapper[4838]: I1128 09:59:06.902471 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:59:06 crc kubenswrapper[4838]: I1128 09:59:06.902496 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:59:06 crc kubenswrapper[4838]: I1128 09:59:06.902517 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:59:06Z","lastTransitionTime":"2025-11-28T09:59:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:59:07 crc kubenswrapper[4838]: I1128 09:59:07.005370 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:59:07 crc kubenswrapper[4838]: I1128 09:59:07.005428 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:59:07 crc kubenswrapper[4838]: I1128 09:59:07.005439 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:59:07 crc kubenswrapper[4838]: I1128 09:59:07.005462 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:59:07 crc kubenswrapper[4838]: I1128 09:59:07.005474 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:59:07Z","lastTransitionTime":"2025-11-28T09:59:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:59:07 crc kubenswrapper[4838]: I1128 09:59:07.108335 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:59:07 crc kubenswrapper[4838]: I1128 09:59:07.108401 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:59:07 crc kubenswrapper[4838]: I1128 09:59:07.108413 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:59:07 crc kubenswrapper[4838]: I1128 09:59:07.108450 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:59:07 crc kubenswrapper[4838]: I1128 09:59:07.108466 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:59:07Z","lastTransitionTime":"2025-11-28T09:59:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:59:07 crc kubenswrapper[4838]: I1128 09:59:07.210938 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:59:07 crc kubenswrapper[4838]: I1128 09:59:07.211005 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:59:07 crc kubenswrapper[4838]: I1128 09:59:07.211022 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:59:07 crc kubenswrapper[4838]: I1128 09:59:07.211046 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:59:07 crc kubenswrapper[4838]: I1128 09:59:07.211063 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:59:07Z","lastTransitionTime":"2025-11-28T09:59:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:59:07 crc kubenswrapper[4838]: I1128 09:59:07.314256 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:59:07 crc kubenswrapper[4838]: I1128 09:59:07.314315 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:59:07 crc kubenswrapper[4838]: I1128 09:59:07.314334 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:59:07 crc kubenswrapper[4838]: I1128 09:59:07.314359 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:59:07 crc kubenswrapper[4838]: I1128 09:59:07.314377 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:59:07Z","lastTransitionTime":"2025-11-28T09:59:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:59:07 crc kubenswrapper[4838]: I1128 09:59:07.381946 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-gmhsj_41b01f7d-5c75-49de-86f7-87e04bf71194/ovnkube-controller/3.log" Nov 28 09:59:07 crc kubenswrapper[4838]: I1128 09:59:07.383149 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-gmhsj_41b01f7d-5c75-49de-86f7-87e04bf71194/ovnkube-controller/2.log" Nov 28 09:59:07 crc kubenswrapper[4838]: I1128 09:59:07.386406 4838 generic.go:334] "Generic (PLEG): container finished" podID="41b01f7d-5c75-49de-86f7-87e04bf71194" containerID="71617073a4d76318049cb634cb4ee2135cd2c25bc3d3d6285eb69baf1fae9679" exitCode=1 Nov 28 09:59:07 crc kubenswrapper[4838]: I1128 09:59:07.386461 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gmhsj" event={"ID":"41b01f7d-5c75-49de-86f7-87e04bf71194","Type":"ContainerDied","Data":"71617073a4d76318049cb634cb4ee2135cd2c25bc3d3d6285eb69baf1fae9679"} Nov 28 09:59:07 crc kubenswrapper[4838]: I1128 09:59:07.386544 4838 scope.go:117] "RemoveContainer" containerID="973017e2b3a339f98d3439e256810a83b425061d827e74edd127aee640a9c0eb" Nov 28 09:59:07 crc kubenswrapper[4838]: I1128 09:59:07.387938 4838 scope.go:117] "RemoveContainer" containerID="71617073a4d76318049cb634cb4ee2135cd2c25bc3d3d6285eb69baf1fae9679" Nov 28 09:59:07 crc kubenswrapper[4838]: E1128 09:59:07.388318 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-gmhsj_openshift-ovn-kubernetes(41b01f7d-5c75-49de-86f7-87e04bf71194)\"" pod="openshift-ovn-kubernetes/ovnkube-node-gmhsj" podUID="41b01f7d-5c75-49de-86f7-87e04bf71194" Nov 28 09:59:07 crc kubenswrapper[4838]: I1128 09:59:07.405410 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"faf44892-fdd2-4b45-8772-20049c555d3b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8f8f5484d008289a9e34ceaffd3cf2582565e7265003b0a6a913fe424760fc65\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://496ac21c6d7e650f191b3bc29ab676bab6ba40727c5ac4d517833ef9a115ae07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://698aacf6e94caf2da7095c89716d63d935ff60d95cb91c9a39dfe9282cbba005\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8352058616fa4ed90ca907e547bad2201d9aa696330f1eb8434c3c3d54d71d89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8352058616fa4ed90ca907e547bad2201d9aa696330f1eb8434c3c3d54d71d89\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:09Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:08Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:59:07Z is after 2025-08-24T17:21:41Z" Nov 28 09:59:07 crc kubenswrapper[4838]: I1128 09:59:07.417409 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:59:07 crc kubenswrapper[4838]: I1128 09:59:07.417459 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:59:07 crc kubenswrapper[4838]: I1128 09:59:07.417469 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:59:07 crc kubenswrapper[4838]: I1128 09:59:07.417486 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:59:07 crc kubenswrapper[4838]: I1128 09:59:07.417498 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:59:07Z","lastTransitionTime":"2025-11-28T09:59:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:59:07 crc kubenswrapper[4838]: I1128 09:59:07.426063 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:59:07Z is after 2025-08-24T17:21:41Z" Nov 28 09:59:07 crc kubenswrapper[4838]: I1128 09:59:07.444471 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:59:07Z is after 2025-08-24T17:21:41Z" Nov 28 09:59:07 crc kubenswrapper[4838]: I1128 09:59:07.457942 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-tj8hl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cbb3c60a-bf9f-4a62-9310-30898e42be4f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e6fe3f1cbc53f02e2556c5fb44cffcf13330c3240a0ff10a8471080466416bd1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c2jkq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:54Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-tj8hl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:59:07Z is after 2025-08-24T17:21:41Z" Nov 28 09:59:07 crc kubenswrapper[4838]: I1128 09:59:07.478559 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://05cb23094534a9fdebbe33d8d34a79412ee49437c25e185c6dfa99384cbf9629\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:59:07Z is after 2025-08-24T17:21:41Z" Nov 28 09:59:07 crc kubenswrapper[4838]: I1128 09:59:07.498242 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3968fb11df6b2265691177838400dcb08e03d330c166dd880b3acfac7ec7938f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e91940e5398321649eac2960a545bb4bbe047113c81f152aa55894cabee55006\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:59:07Z is after 2025-08-24T17:21:41Z" Nov 28 09:59:07 crc kubenswrapper[4838]: I1128 09:59:07.521050 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:59:07 crc kubenswrapper[4838]: I1128 09:59:07.521115 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:59:07 crc kubenswrapper[4838]: I1128 09:59:07.521142 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:59:07 crc kubenswrapper[4838]: I1128 09:59:07.521171 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:59:07 crc kubenswrapper[4838]: I1128 09:59:07.521192 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:59:07Z","lastTransitionTime":"2025-11-28T09:59:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:59:07 crc kubenswrapper[4838]: I1128 09:59:07.525115 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-58mh7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f556bd7-3b15-4d7d-b8e2-4815bb5c9c7d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://658172db64e44d752eb06fe2788dda717db3fc2e672b073a2bdf159a16fd901f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0a0fbfb7a81acf63b4deabef68d55dd843092bce1c00c27c127955417bede44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f0a0fbfb7a81acf63b4deabef68d55dd843092bce1c00c27c127955417bede44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dea8cf8adbb0c946731fdc71c2b0d80ace50b919cf9c776eb70cb6ff36529401\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dea8cf8adbb0c946731fdc71c2b0d80ace50b919cf9c776eb70cb6ff36529401\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f717c0691a541e3a8310cd520a96e92144902511c74835a5ef8ba9536cd65657\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f717c0691a541e3a8310cd520a96e92144902511c74835a5ef8ba9536cd65657\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b8b67d281ed50a87ac086c5ce0bcc082c30a41d80074b638340326c334fa4f0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b8b67d281ed50a87ac086c5ce0bcc082c30a41d80074b638340326c334fa4f0c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4ac7e0a6a8340f45399bb4b576bbcd90d4eee54c571e6d64846c521486a2607\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f4ac7e0a6a8340f45399bb4b576bbcd90d4eee54c571e6d64846c521486a2607\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e3e1a16aa714e8c2b82811d71b1845942712cad84d7dd465c02a3c4478419af\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0e3e1a16aa714e8c2b82811d71b1845942712cad84d7dd465c02a3c4478419af\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-58mh7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:59:07Z is after 2025-08-24T17:21:41Z" Nov 28 09:59:07 crc kubenswrapper[4838]: I1128 09:59:07.540036 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-sft2b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"200cdb91-cc86-40be-a5b6-30f7b9beba6d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c4f94520c534ecc5a5a92d18b6047c6df131fa7d2a9b8712c021a74c3ffc18ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lpt6k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-sft2b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:59:07Z is after 2025-08-24T17:21:41Z" Nov 28 09:59:07 crc kubenswrapper[4838]: I1128 09:59:07.555591 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-p69l6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2a223cc8-af33-4e83-8bfc-2676c5700447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h25xx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h25xx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:58:05Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-p69l6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:59:07Z is after 2025-08-24T17:21:41Z" Nov 28 09:59:07 crc kubenswrapper[4838]: I1128 09:59:07.561199 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 09:59:07 crc kubenswrapper[4838]: I1128 09:59:07.561275 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 09:59:07 crc kubenswrapper[4838]: E1128 09:59:07.561325 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 09:59:07 crc kubenswrapper[4838]: E1128 09:59:07.561457 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 09:59:07 crc kubenswrapper[4838]: I1128 09:59:07.561698 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 09:59:07 crc kubenswrapper[4838]: E1128 09:59:07.561858 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 09:59:07 crc kubenswrapper[4838]: I1128 09:59:07.574838 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"de645de9-a5cd-4075-8bfd-402a619ea73f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://17e965aad7643d62c651c1e652be45bd914cfe3f14a0a6f43e4e4376b4cb7be0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://40b6222c83e4141c5d286efddc1b65ef732c5cadda9cebdd8d9ee114bf2eb533\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://40b6222c83e4141c5d286efddc1b65ef732c5cadda9cebdd8d9ee114bf2eb533\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:08Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:59:07Z is after 2025-08-24T17:21:41Z" Nov 28 09:59:07 crc kubenswrapper[4838]: I1128 09:59:07.593474 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:59:07Z is after 2025-08-24T17:21:41Z" Nov 28 09:59:07 crc kubenswrapper[4838]: I1128 09:59:07.607241 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4rv9b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"051f7e1c-2d47-4be9-bbd5-14feec16eb16\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://262d73384f8aa0c5e8405e70d091fbc0003217ee2c08a4776048649b9a6eda59\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f641ae0677a50fbc7b20fac7bb5c567f031af1b6a06fbe9e965091efa4ee4ec3\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T09:58:51Z\\\",\\\"message\\\":\\\"2025-11-28T09:58:06+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_42560742-ad80-4917-aa9d-aab5fda94562\\\\n2025-11-28T09:58:06+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_42560742-ad80-4917-aa9d-aab5fda94562 to /host/opt/cni/bin/\\\\n2025-11-28T09:58:06Z [verbose] multus-daemon started\\\\n2025-11-28T09:58:06Z [verbose] Readiness Indicator file check\\\\n2025-11-28T09:58:51Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:05Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-szqtp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4rv9b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:59:07Z is after 2025-08-24T17:21:41Z" Nov 28 09:59:07 crc kubenswrapper[4838]: I1128 09:59:07.620856 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-tnclp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ebace5c6-6ca4-48ff-9c50-c6b769d599b5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://572204db3ac1da6b232430bf06fb87e94638e151ea0edc2f8b111deb7d82c58c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8pxnw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://efa96991cdc29f4f075bfd7751f620c7b091510ea68d577b243811ddd4140cec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8pxnw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:58:03Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-tnclp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:59:07Z is after 2025-08-24T17:21:41Z" Nov 28 09:59:07 crc kubenswrapper[4838]: I1128 09:59:07.623152 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:59:07 crc kubenswrapper[4838]: I1128 09:59:07.623184 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:59:07 crc kubenswrapper[4838]: I1128 09:59:07.623193 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:59:07 crc kubenswrapper[4838]: I1128 09:59:07.623206 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:59:07 crc kubenswrapper[4838]: I1128 09:59:07.623216 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:59:07Z","lastTransitionTime":"2025-11-28T09:59:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:59:07 crc kubenswrapper[4838]: I1128 09:59:07.636074 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42be35de-5c72-4cc2-a5e4-fb7872425cdf\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5a39765f9493a3a9454db77d07855624ff5645e9dbd898e6dcb880d7a01a8c42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://426741a23e7b1b9fae6769b581c0d44694f957b0da985476923801395fad082f\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T09:57:46Z\\\",\\\"message\\\":\\\"+ timeout 3m /bin/bash -exuo pipefail -c 'while [ -n \\\\\\\"$(ss -Htanop \\\\\\\\( sport = 10357 \\\\\\\\))\\\\\\\" ]; do sleep 1; done'\\\\n++ ss -Htanop '(' sport = 10357 ')'\\\\n+ '[' -n '' ']'\\\\n+ exec cluster-policy-controller start --config=/etc/kubernetes/static-pod-resources/configmaps/cluster-policy-controller-config/config.yaml --kubeconfig=/etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig --namespace=openshift-kube-controller-manager -v=2\\\\nI1128 09:57:12.554377 1 leaderelection.go:121] The leader election gives 4 retries and allows for 30s of clock skew. The kube-apiserver downtime tolerance is 78s. Worst non-graceful lease acquisition is 2m43s. Worst graceful lease acquisition is {26s}.\\\\nI1128 09:57:12.555793 1 observer_polling.go:159] Starting file observer\\\\nI1128 09:57:12.567187 1 builder.go:298] cluster-policy-controller version 4.18.0-202501230001.p0.g5fd8525.assembly.stream.el9-5fd8525-5fd852525909ce6eab52972ba9ce8fcf56528eb9\\\\nI1128 09:57:12.568976 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.crt::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.key\\\\\\\"\\\\nI1128 09:57:45.000857 1 cmd.go:138] Received SIGTERM or SIGINT signal, shutting down controller.\\\\nF1128 09:57:46.203931 1 cmd.go:179] failed checking apiserver connectivity: client rate limiter Wait returned an error: context deadline exceeded\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:10Z\\\"}},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef54c8995d6bb8e077c6d1c3d796b6f2ce88370b6cadc4f040f590760103320b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fab4ce68cf732b3e6b32f68e84805013d646a9cbd6d5c55ea3d2f41a7f9db83d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2da93de71b5896e3c9ba192df2896b946b1376fefe2a87cf21adb87ea665be04\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:08Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:59:07Z is after 2025-08-24T17:21:41Z" Nov 28 09:59:07 crc kubenswrapper[4838]: I1128 09:59:07.651565 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1630b1c6-63b5-4481-a711-0485765d37e3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3a04db28528da269759635186b06952f9a3dc4c2b130458354a5bf9ef994db8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://065f0c39a0f1fbdb83a6a758ddd20a4af3ddf96297ce35551b570f5e6c9deb89\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T09:57:35Z\\\",\\\"message\\\":\\\"rpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"192.168.126.11:2379\\\\\\\", ServerName: \\\\\\\"192.168.126.11:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp 192.168.126.11:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:30.193159 13 logging.go:55] [core] [Channel #1 SubChannel #5]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"192.168.126.11:2379\\\\\\\", ServerName: \\\\\\\"192.168.126.11:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp 192.168.126.11:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:30.410371 13 logging.go:55] [core] [Channel #7 SubChannel #8]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"192.168.126.11:2379\\\\\\\", ServerName: \\\\\\\"192.168.126.11:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp 192.168.126.11:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:30.766067 13 logging.go:55] [core] [Channel #7 SubChannel #9]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"localhost:2379\\\\\\\", ServerName: \\\\\\\"localhost:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp [::1]:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:30.792318 13 logging.go:55] [core] [Channel #2 SubChannel #4]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"localhost:2379\\\\\\\", ServerName: \\\\\\\"localhost:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp [::1]:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:34.548830 13 logging.go:55] [core] [Channel #1 SubChannel #6]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"localhost:2379\\\\\\\", ServerName: \\\\\\\"localhost:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: authentication handshake failed: context canceled\\\\\\\"\\\\nE1128 09:57:34.559534 13 run.go:72] \\\\\\\"command failed\\\\\\\" err=\\\\\\\"context deadline exceeded\\\\\\\"\\\\nI1128 09:57:34.572762 1 main.go:235] Termination finished with exit code 1\\\\nI1128 09:57:34.572819 1 main.go:188] Deleting termination lock file \\\\\\\"/var/log/kube-apiserver/.terminating\\\\\\\"\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25d620ea6d7c38547e89d61e7a60f227d28b21f18d563055db47256b266d5b6b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://837cc7d33d601516b4ea56a283f71167b41da7c769070c97ea77f29e97cf1555\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a821e5b105f62cf7f3b12714bfb0f58867a808d1c777f2fb711895c345d8ee9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ad3223a9346861cf1b27af8c95207349f10af6f416380747e32c4faf1d3add4\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T09:57:48Z\\\",\\\"message\\\":\\\"le observer\\\\nW1128 09:57:46.202169 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1128 09:57:46.202478 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 09:57:46.203984 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2688063589/tls.crt::/tmp/serving-cert-2688063589/tls.key\\\\\\\"\\\\nI1128 09:57:46.517349 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 09:57:46.756626 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 09:57:46.756681 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 09:57:46.756806 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 09:57:46.756825 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 09:57:46.768610 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1128 09:57:46.768654 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1128 09:57:46.768660 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 09:57:46.768697 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 09:57:46.768707 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 09:57:46.768714 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 09:57:46.768746 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 09:57:46.768752 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1128 09:57:46.772242 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:40Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c690f38f717fbfbd681f21f5dc845b94601530b4fc0860fdbeb1317042c793b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:18Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b4d4af6fc5dd97d5b6104ef79b62fd241db5659dfdfd496a38536453b207a4e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b4d4af6fc5dd97d5b6104ef79b62fd241db5659dfdfd496a38536453b207a4e5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:08Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:59:07Z is after 2025-08-24T17:21:41Z" Nov 28 09:59:07 crc kubenswrapper[4838]: I1128 09:59:07.663169 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33d18a643d8d8a58731e53993b40c94bb51ff9c35242f768090dc737cac26503\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:59:07Z is after 2025-08-24T17:21:41Z" Nov 28 09:59:07 crc kubenswrapper[4838]: I1128 09:59:07.681054 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gmhsj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"41b01f7d-5c75-49de-86f7-87e04bf71194\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ccbb0df20c7e22308632a5a2d8837d77579134973f3888355be0fe46b4e59aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://87fcac052cf598fe1999137913f0b5ab8c999dad7d8fb512c3b079fd3b49bdaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b1f5c4da04a97ccc1851e901f9a13763e45ff8c7f6aab25bcbb934541ae4a776\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5781261b70d6722e8d28b91441c4241e1b48e6ff71ae3ec8973bde50180fc146\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3a5c0b96cef205139d500d85f27d2c9230ab23a84ffc78f2587a465a7ff25e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://669de13027827632f353811e082cce7cca326651a9bf4820e66504bb59067d72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://71617073a4d76318049cb634cb4ee2135cd2c25bc3d3d6285eb69baf1fae9679\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://973017e2b3a339f98d3439e256810a83b425061d827e74edd127aee640a9c0eb\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T09:58:38Z\\\",\\\"message\\\":\\\"l\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1128 09:58:37.612073 6708 services_controller.go:452] Built service openshift-console/downloads per-node LB for network=default: []services.LB{}\\\\nI1128 09:58:37.612094 6708 services_controller.go:452] Built service openshift-machine-api/machine-api-operator-machine-webhook per-node LB for network=default: []services.LB{}\\\\nI1128 09:58:37.612103 6708 services_controller.go:453] Built service openshift-console/downloads template LB for network=default: []services.LB{}\\\\nI1128 09:58:37.612134 6708 services_controller.go:453] Built service openshift-machine-api/machine-api-operator-machine-webhook template LB for network=default: []services.LB{}\\\\nI1128 09:58:37.612147 6708 services_controller.go:454] Service openshift-console/downloads for network=default has 1 cluster-wide, 0 per-node configs, 0 template configs, making 1 (cluster) 0 (per node) and 0 (template) load balancers\\\\nF1128 09:58:37.612098 6708 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node netwo\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:36Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://71617073a4d76318049cb634cb4ee2135cd2c25bc3d3d6285eb69baf1fae9679\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T09:59:06Z\\\",\\\"message\\\":\\\"l\\\\nI1128 09:59:06.154371 7040 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1128 09:59:06.154405 7040 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1128 09:59:06.154445 7040 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1128 09:59:06.154457 7040 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1128 09:59:06.154488 7040 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1128 09:59:06.154507 7040 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1128 09:59:06.154517 7040 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1128 09:59:06.154549 7040 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1128 09:59:06.154572 7040 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1128 09:59:06.154581 7040 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1128 09:59:06.154603 7040 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1128 09:59:06.154624 7040 handler.go:208] Removed *v1.Node event handler 7\\\\nI1128 09:59:06.154633 7040 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1128 09:59:06.154634 7040 factory.go:656] Stopping watch factory\\\\nI1128 09:59:06.154640 7040 handler.go:208] Removed *v1.Node event handler 2\\\\nI1128 09:59:06.154652 7040 ovnkube.go:599] Stopped ovnkube\\\\nI1128 0\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T09:59:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1ae634151347de87041ca338bcea3a34ee0c2330a3c6630a3e342f62beba0ab8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9efead1ac3ae101f3a81e0c5568645b6ac107dc126ff57bc55745b86ffacb730\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9efead1ac3ae101f3a81e0c5568645b6ac107dc126ff57bc55745b86ffacb730\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-gmhsj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:59:07Z is after 2025-08-24T17:21:41Z" Nov 28 09:59:07 crc kubenswrapper[4838]: I1128 09:59:07.693466 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5c3daa53-8c4e-4e30-aeba-146602dd45cd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d582c5c17a21e943db9e4af274b6cb31d7551e545039aca20a767b35e2ca5040\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9dmrk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5374d5a62ca21176826339023247946593cc1a7bdb4fb39596f12bf598790697\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9dmrk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-5dxdd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:59:07Z is after 2025-08-24T17:21:41Z" Nov 28 09:59:07 crc kubenswrapper[4838]: I1128 09:59:07.726452 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:59:07 crc kubenswrapper[4838]: I1128 09:59:07.726495 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:59:07 crc kubenswrapper[4838]: I1128 09:59:07.726506 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:59:07 crc kubenswrapper[4838]: I1128 09:59:07.726524 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:59:07 crc kubenswrapper[4838]: I1128 09:59:07.726535 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:59:07Z","lastTransitionTime":"2025-11-28T09:59:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:59:07 crc kubenswrapper[4838]: I1128 09:59:07.830448 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:59:07 crc kubenswrapper[4838]: I1128 09:59:07.830498 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:59:07 crc kubenswrapper[4838]: I1128 09:59:07.830517 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:59:07 crc kubenswrapper[4838]: I1128 09:59:07.830543 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:59:07 crc kubenswrapper[4838]: I1128 09:59:07.830561 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:59:07Z","lastTransitionTime":"2025-11-28T09:59:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:59:07 crc kubenswrapper[4838]: I1128 09:59:07.934025 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:59:07 crc kubenswrapper[4838]: I1128 09:59:07.934092 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:59:07 crc kubenswrapper[4838]: I1128 09:59:07.934109 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:59:07 crc kubenswrapper[4838]: I1128 09:59:07.934134 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:59:07 crc kubenswrapper[4838]: I1128 09:59:07.934152 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:59:07Z","lastTransitionTime":"2025-11-28T09:59:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:59:08 crc kubenswrapper[4838]: I1128 09:59:08.037898 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:59:08 crc kubenswrapper[4838]: I1128 09:59:08.037966 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:59:08 crc kubenswrapper[4838]: I1128 09:59:08.037984 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:59:08 crc kubenswrapper[4838]: I1128 09:59:08.038011 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:59:08 crc kubenswrapper[4838]: I1128 09:59:08.038028 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:59:08Z","lastTransitionTime":"2025-11-28T09:59:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:59:08 crc kubenswrapper[4838]: I1128 09:59:08.141326 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:59:08 crc kubenswrapper[4838]: I1128 09:59:08.141375 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:59:08 crc kubenswrapper[4838]: I1128 09:59:08.141385 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:59:08 crc kubenswrapper[4838]: I1128 09:59:08.141402 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:59:08 crc kubenswrapper[4838]: I1128 09:59:08.141413 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:59:08Z","lastTransitionTime":"2025-11-28T09:59:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:59:08 crc kubenswrapper[4838]: I1128 09:59:08.244158 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:59:08 crc kubenswrapper[4838]: I1128 09:59:08.244294 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:59:08 crc kubenswrapper[4838]: I1128 09:59:08.244317 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:59:08 crc kubenswrapper[4838]: I1128 09:59:08.244346 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:59:08 crc kubenswrapper[4838]: I1128 09:59:08.244367 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:59:08Z","lastTransitionTime":"2025-11-28T09:59:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:59:08 crc kubenswrapper[4838]: I1128 09:59:08.347128 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:59:08 crc kubenswrapper[4838]: I1128 09:59:08.347211 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:59:08 crc kubenswrapper[4838]: I1128 09:59:08.347228 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:59:08 crc kubenswrapper[4838]: I1128 09:59:08.347253 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:59:08 crc kubenswrapper[4838]: I1128 09:59:08.347270 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:59:08Z","lastTransitionTime":"2025-11-28T09:59:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:59:08 crc kubenswrapper[4838]: I1128 09:59:08.393279 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-gmhsj_41b01f7d-5c75-49de-86f7-87e04bf71194/ovnkube-controller/3.log" Nov 28 09:59:08 crc kubenswrapper[4838]: I1128 09:59:08.450239 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:59:08 crc kubenswrapper[4838]: I1128 09:59:08.450314 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:59:08 crc kubenswrapper[4838]: I1128 09:59:08.450333 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:59:08 crc kubenswrapper[4838]: I1128 09:59:08.450358 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:59:08 crc kubenswrapper[4838]: I1128 09:59:08.450376 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:59:08Z","lastTransitionTime":"2025-11-28T09:59:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:59:08 crc kubenswrapper[4838]: E1128 09:59:08.551521 4838 kubelet_node_status.go:497] "Node not becoming ready in time after startup" Nov 28 09:59:08 crc kubenswrapper[4838]: I1128 09:59:08.562091 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-p69l6" Nov 28 09:59:08 crc kubenswrapper[4838]: E1128 09:59:08.562260 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-p69l6" podUID="2a223cc8-af33-4e83-8bfc-2676c5700447" Nov 28 09:59:08 crc kubenswrapper[4838]: I1128 09:59:08.583511 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://05cb23094534a9fdebbe33d8d34a79412ee49437c25e185c6dfa99384cbf9629\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:59:08Z is after 2025-08-24T17:21:41Z" Nov 28 09:59:08 crc kubenswrapper[4838]: I1128 09:59:08.604973 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3968fb11df6b2265691177838400dcb08e03d330c166dd880b3acfac7ec7938f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e91940e5398321649eac2960a545bb4bbe047113c81f152aa55894cabee55006\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:59:08Z is after 2025-08-24T17:21:41Z" Nov 28 09:59:08 crc kubenswrapper[4838]: I1128 09:59:08.628958 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-58mh7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f556bd7-3b15-4d7d-b8e2-4815bb5c9c7d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://658172db64e44d752eb06fe2788dda717db3fc2e672b073a2bdf159a16fd901f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0a0fbfb7a81acf63b4deabef68d55dd843092bce1c00c27c127955417bede44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f0a0fbfb7a81acf63b4deabef68d55dd843092bce1c00c27c127955417bede44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dea8cf8adbb0c946731fdc71c2b0d80ace50b919cf9c776eb70cb6ff36529401\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dea8cf8adbb0c946731fdc71c2b0d80ace50b919cf9c776eb70cb6ff36529401\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f717c0691a541e3a8310cd520a96e92144902511c74835a5ef8ba9536cd65657\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f717c0691a541e3a8310cd520a96e92144902511c74835a5ef8ba9536cd65657\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b8b67d281ed50a87ac086c5ce0bcc082c30a41d80074b638340326c334fa4f0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b8b67d281ed50a87ac086c5ce0bcc082c30a41d80074b638340326c334fa4f0c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4ac7e0a6a8340f45399bb4b576bbcd90d4eee54c571e6d64846c521486a2607\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f4ac7e0a6a8340f45399bb4b576bbcd90d4eee54c571e6d64846c521486a2607\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e3e1a16aa714e8c2b82811d71b1845942712cad84d7dd465c02a3c4478419af\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0e3e1a16aa714e8c2b82811d71b1845942712cad84d7dd465c02a3c4478419af\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-58mh7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:59:08Z is after 2025-08-24T17:21:41Z" Nov 28 09:59:08 crc kubenswrapper[4838]: I1128 09:59:08.647083 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-sft2b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"200cdb91-cc86-40be-a5b6-30f7b9beba6d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c4f94520c534ecc5a5a92d18b6047c6df131fa7d2a9b8712c021a74c3ffc18ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lpt6k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-sft2b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:59:08Z is after 2025-08-24T17:21:41Z" Nov 28 09:59:08 crc kubenswrapper[4838]: E1128 09:59:08.661885 4838 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 28 09:59:08 crc kubenswrapper[4838]: I1128 09:59:08.661948 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-p69l6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2a223cc8-af33-4e83-8bfc-2676c5700447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h25xx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h25xx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:58:05Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-p69l6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:59:08Z is after 2025-08-24T17:21:41Z" Nov 28 09:59:08 crc kubenswrapper[4838]: I1128 09:59:08.675879 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"de645de9-a5cd-4075-8bfd-402a619ea73f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://17e965aad7643d62c651c1e652be45bd914cfe3f14a0a6f43e4e4376b4cb7be0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://40b6222c83e4141c5d286efddc1b65ef732c5cadda9cebdd8d9ee114bf2eb533\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://40b6222c83e4141c5d286efddc1b65ef732c5cadda9cebdd8d9ee114bf2eb533\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:08Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:59:08Z is after 2025-08-24T17:21:41Z" Nov 28 09:59:08 crc kubenswrapper[4838]: I1128 09:59:08.692592 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:59:08Z is after 2025-08-24T17:21:41Z" Nov 28 09:59:08 crc kubenswrapper[4838]: I1128 09:59:08.708411 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4rv9b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"051f7e1c-2d47-4be9-bbd5-14feec16eb16\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://262d73384f8aa0c5e8405e70d091fbc0003217ee2c08a4776048649b9a6eda59\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f641ae0677a50fbc7b20fac7bb5c567f031af1b6a06fbe9e965091efa4ee4ec3\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T09:58:51Z\\\",\\\"message\\\":\\\"2025-11-28T09:58:06+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_42560742-ad80-4917-aa9d-aab5fda94562\\\\n2025-11-28T09:58:06+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_42560742-ad80-4917-aa9d-aab5fda94562 to /host/opt/cni/bin/\\\\n2025-11-28T09:58:06Z [verbose] multus-daemon started\\\\n2025-11-28T09:58:06Z [verbose] Readiness Indicator file check\\\\n2025-11-28T09:58:51Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:05Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-szqtp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4rv9b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:59:08Z is after 2025-08-24T17:21:41Z" Nov 28 09:59:08 crc kubenswrapper[4838]: I1128 09:59:08.726243 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-tnclp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ebace5c6-6ca4-48ff-9c50-c6b769d599b5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://572204db3ac1da6b232430bf06fb87e94638e151ea0edc2f8b111deb7d82c58c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8pxnw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://efa96991cdc29f4f075bfd7751f620c7b091510ea68d577b243811ddd4140cec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8pxnw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:58:03Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-tnclp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:59:08Z is after 2025-08-24T17:21:41Z" Nov 28 09:59:08 crc kubenswrapper[4838]: I1128 09:59:08.741549 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42be35de-5c72-4cc2-a5e4-fb7872425cdf\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5a39765f9493a3a9454db77d07855624ff5645e9dbd898e6dcb880d7a01a8c42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://426741a23e7b1b9fae6769b581c0d44694f957b0da985476923801395fad082f\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T09:57:46Z\\\",\\\"message\\\":\\\"+ timeout 3m /bin/bash -exuo pipefail -c 'while [ -n \\\\\\\"$(ss -Htanop \\\\\\\\( sport = 10357 \\\\\\\\))\\\\\\\" ]; do sleep 1; done'\\\\n++ ss -Htanop '(' sport = 10357 ')'\\\\n+ '[' -n '' ']'\\\\n+ exec cluster-policy-controller start --config=/etc/kubernetes/static-pod-resources/configmaps/cluster-policy-controller-config/config.yaml --kubeconfig=/etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig --namespace=openshift-kube-controller-manager -v=2\\\\nI1128 09:57:12.554377 1 leaderelection.go:121] The leader election gives 4 retries and allows for 30s of clock skew. The kube-apiserver downtime tolerance is 78s. Worst non-graceful lease acquisition is 2m43s. Worst graceful lease acquisition is {26s}.\\\\nI1128 09:57:12.555793 1 observer_polling.go:159] Starting file observer\\\\nI1128 09:57:12.567187 1 builder.go:298] cluster-policy-controller version 4.18.0-202501230001.p0.g5fd8525.assembly.stream.el9-5fd8525-5fd852525909ce6eab52972ba9ce8fcf56528eb9\\\\nI1128 09:57:12.568976 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.crt::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.key\\\\\\\"\\\\nI1128 09:57:45.000857 1 cmd.go:138] Received SIGTERM or SIGINT signal, shutting down controller.\\\\nF1128 09:57:46.203931 1 cmd.go:179] failed checking apiserver connectivity: client rate limiter Wait returned an error: context deadline exceeded\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:10Z\\\"}},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef54c8995d6bb8e077c6d1c3d796b6f2ce88370b6cadc4f040f590760103320b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fab4ce68cf732b3e6b32f68e84805013d646a9cbd6d5c55ea3d2f41a7f9db83d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2da93de71b5896e3c9ba192df2896b946b1376fefe2a87cf21adb87ea665be04\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:08Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:59:08Z is after 2025-08-24T17:21:41Z" Nov 28 09:59:08 crc kubenswrapper[4838]: I1128 09:59:08.760591 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1630b1c6-63b5-4481-a711-0485765d37e3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3a04db28528da269759635186b06952f9a3dc4c2b130458354a5bf9ef994db8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://065f0c39a0f1fbdb83a6a758ddd20a4af3ddf96297ce35551b570f5e6c9deb89\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T09:57:35Z\\\",\\\"message\\\":\\\"rpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"192.168.126.11:2379\\\\\\\", ServerName: \\\\\\\"192.168.126.11:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp 192.168.126.11:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:30.193159 13 logging.go:55] [core] [Channel #1 SubChannel #5]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"192.168.126.11:2379\\\\\\\", ServerName: \\\\\\\"192.168.126.11:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp 192.168.126.11:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:30.410371 13 logging.go:55] [core] [Channel #7 SubChannel #8]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"192.168.126.11:2379\\\\\\\", ServerName: \\\\\\\"192.168.126.11:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp 192.168.126.11:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:30.766067 13 logging.go:55] [core] [Channel #7 SubChannel #9]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"localhost:2379\\\\\\\", ServerName: \\\\\\\"localhost:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp [::1]:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:30.792318 13 logging.go:55] [core] [Channel #2 SubChannel #4]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"localhost:2379\\\\\\\", ServerName: \\\\\\\"localhost:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp [::1]:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:34.548830 13 logging.go:55] [core] [Channel #1 SubChannel #6]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"localhost:2379\\\\\\\", ServerName: \\\\\\\"localhost:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: authentication handshake failed: context canceled\\\\\\\"\\\\nE1128 09:57:34.559534 13 run.go:72] \\\\\\\"command failed\\\\\\\" err=\\\\\\\"context deadline exceeded\\\\\\\"\\\\nI1128 09:57:34.572762 1 main.go:235] Termination finished with exit code 1\\\\nI1128 09:57:34.572819 1 main.go:188] Deleting termination lock file \\\\\\\"/var/log/kube-apiserver/.terminating\\\\\\\"\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25d620ea6d7c38547e89d61e7a60f227d28b21f18d563055db47256b266d5b6b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://837cc7d33d601516b4ea56a283f71167b41da7c769070c97ea77f29e97cf1555\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a821e5b105f62cf7f3b12714bfb0f58867a808d1c777f2fb711895c345d8ee9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ad3223a9346861cf1b27af8c95207349f10af6f416380747e32c4faf1d3add4\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T09:57:48Z\\\",\\\"message\\\":\\\"le observer\\\\nW1128 09:57:46.202169 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1128 09:57:46.202478 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 09:57:46.203984 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2688063589/tls.crt::/tmp/serving-cert-2688063589/tls.key\\\\\\\"\\\\nI1128 09:57:46.517349 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 09:57:46.756626 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 09:57:46.756681 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 09:57:46.756806 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 09:57:46.756825 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 09:57:46.768610 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1128 09:57:46.768654 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1128 09:57:46.768660 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 09:57:46.768697 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 09:57:46.768707 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 09:57:46.768714 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 09:57:46.768746 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 09:57:46.768752 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1128 09:57:46.772242 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:40Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c690f38f717fbfbd681f21f5dc845b94601530b4fc0860fdbeb1317042c793b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:18Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b4d4af6fc5dd97d5b6104ef79b62fd241db5659dfdfd496a38536453b207a4e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b4d4af6fc5dd97d5b6104ef79b62fd241db5659dfdfd496a38536453b207a4e5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:08Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:59:08Z is after 2025-08-24T17:21:41Z" Nov 28 09:59:08 crc kubenswrapper[4838]: I1128 09:59:08.773587 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33d18a643d8d8a58731e53993b40c94bb51ff9c35242f768090dc737cac26503\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:59:08Z is after 2025-08-24T17:21:41Z" Nov 28 09:59:08 crc kubenswrapper[4838]: I1128 09:59:08.795498 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gmhsj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"41b01f7d-5c75-49de-86f7-87e04bf71194\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ccbb0df20c7e22308632a5a2d8837d77579134973f3888355be0fe46b4e59aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://87fcac052cf598fe1999137913f0b5ab8c999dad7d8fb512c3b079fd3b49bdaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b1f5c4da04a97ccc1851e901f9a13763e45ff8c7f6aab25bcbb934541ae4a776\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5781261b70d6722e8d28b91441c4241e1b48e6ff71ae3ec8973bde50180fc146\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3a5c0b96cef205139d500d85f27d2c9230ab23a84ffc78f2587a465a7ff25e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://669de13027827632f353811e082cce7cca326651a9bf4820e66504bb59067d72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://71617073a4d76318049cb634cb4ee2135cd2c25bc3d3d6285eb69baf1fae9679\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://973017e2b3a339f98d3439e256810a83b425061d827e74edd127aee640a9c0eb\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T09:58:38Z\\\",\\\"message\\\":\\\"l\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1128 09:58:37.612073 6708 services_controller.go:452] Built service openshift-console/downloads per-node LB for network=default: []services.LB{}\\\\nI1128 09:58:37.612094 6708 services_controller.go:452] Built service openshift-machine-api/machine-api-operator-machine-webhook per-node LB for network=default: []services.LB{}\\\\nI1128 09:58:37.612103 6708 services_controller.go:453] Built service openshift-console/downloads template LB for network=default: []services.LB{}\\\\nI1128 09:58:37.612134 6708 services_controller.go:453] Built service openshift-machine-api/machine-api-operator-machine-webhook template LB for network=default: []services.LB{}\\\\nI1128 09:58:37.612147 6708 services_controller.go:454] Service openshift-console/downloads for network=default has 1 cluster-wide, 0 per-node configs, 0 template configs, making 1 (cluster) 0 (per node) and 0 (template) load balancers\\\\nF1128 09:58:37.612098 6708 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node netwo\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:36Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://71617073a4d76318049cb634cb4ee2135cd2c25bc3d3d6285eb69baf1fae9679\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T09:59:06Z\\\",\\\"message\\\":\\\"l\\\\nI1128 09:59:06.154371 7040 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1128 09:59:06.154405 7040 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1128 09:59:06.154445 7040 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1128 09:59:06.154457 7040 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1128 09:59:06.154488 7040 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1128 09:59:06.154507 7040 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1128 09:59:06.154517 7040 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1128 09:59:06.154549 7040 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1128 09:59:06.154572 7040 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1128 09:59:06.154581 7040 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1128 09:59:06.154603 7040 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1128 09:59:06.154624 7040 handler.go:208] Removed *v1.Node event handler 7\\\\nI1128 09:59:06.154633 7040 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1128 09:59:06.154634 7040 factory.go:656] Stopping watch factory\\\\nI1128 09:59:06.154640 7040 handler.go:208] Removed *v1.Node event handler 2\\\\nI1128 09:59:06.154652 7040 ovnkube.go:599] Stopped ovnkube\\\\nI1128 0\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T09:59:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1ae634151347de87041ca338bcea3a34ee0c2330a3c6630a3e342f62beba0ab8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9efead1ac3ae101f3a81e0c5568645b6ac107dc126ff57bc55745b86ffacb730\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9efead1ac3ae101f3a81e0c5568645b6ac107dc126ff57bc55745b86ffacb730\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-gmhsj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:59:08Z is after 2025-08-24T17:21:41Z" Nov 28 09:59:08 crc kubenswrapper[4838]: I1128 09:59:08.809748 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5c3daa53-8c4e-4e30-aeba-146602dd45cd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d582c5c17a21e943db9e4af274b6cb31d7551e545039aca20a767b35e2ca5040\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9dmrk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5374d5a62ca21176826339023247946593cc1a7bdb4fb39596f12bf598790697\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9dmrk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-5dxdd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:59:08Z is after 2025-08-24T17:21:41Z" Nov 28 09:59:08 crc kubenswrapper[4838]: I1128 09:59:08.823356 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"faf44892-fdd2-4b45-8772-20049c555d3b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8f8f5484d008289a9e34ceaffd3cf2582565e7265003b0a6a913fe424760fc65\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://496ac21c6d7e650f191b3bc29ab676bab6ba40727c5ac4d517833ef9a115ae07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://698aacf6e94caf2da7095c89716d63d935ff60d95cb91c9a39dfe9282cbba005\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8352058616fa4ed90ca907e547bad2201d9aa696330f1eb8434c3c3d54d71d89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8352058616fa4ed90ca907e547bad2201d9aa696330f1eb8434c3c3d54d71d89\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:09Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:08Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:59:08Z is after 2025-08-24T17:21:41Z" Nov 28 09:59:08 crc kubenswrapper[4838]: I1128 09:59:08.838280 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:59:08Z is after 2025-08-24T17:21:41Z" Nov 28 09:59:08 crc kubenswrapper[4838]: I1128 09:59:08.852677 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:59:08Z is after 2025-08-24T17:21:41Z" Nov 28 09:59:08 crc kubenswrapper[4838]: I1128 09:59:08.864011 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-tj8hl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cbb3c60a-bf9f-4a62-9310-30898e42be4f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e6fe3f1cbc53f02e2556c5fb44cffcf13330c3240a0ff10a8471080466416bd1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c2jkq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:54Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-tj8hl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:59:08Z is after 2025-08-24T17:21:41Z" Nov 28 09:59:08 crc kubenswrapper[4838]: I1128 09:59:08.991244 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/2a223cc8-af33-4e83-8bfc-2676c5700447-metrics-certs\") pod \"network-metrics-daemon-p69l6\" (UID: \"2a223cc8-af33-4e83-8bfc-2676c5700447\") " pod="openshift-multus/network-metrics-daemon-p69l6" Nov 28 09:59:08 crc kubenswrapper[4838]: E1128 09:59:08.991510 4838 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 28 09:59:08 crc kubenswrapper[4838]: E1128 09:59:08.991622 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/2a223cc8-af33-4e83-8bfc-2676c5700447-metrics-certs podName:2a223cc8-af33-4e83-8bfc-2676c5700447 nodeName:}" failed. No retries permitted until 2025-11-28 10:00:12.99159696 +0000 UTC m=+184.690571160 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/2a223cc8-af33-4e83-8bfc-2676c5700447-metrics-certs") pod "network-metrics-daemon-p69l6" (UID: "2a223cc8-af33-4e83-8bfc-2676c5700447") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 28 09:59:09 crc kubenswrapper[4838]: I1128 09:59:09.341266 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:59:09 crc kubenswrapper[4838]: I1128 09:59:09.341337 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:59:09 crc kubenswrapper[4838]: I1128 09:59:09.341359 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:59:09 crc kubenswrapper[4838]: I1128 09:59:09.341385 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:59:09 crc kubenswrapper[4838]: I1128 09:59:09.341402 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:59:09Z","lastTransitionTime":"2025-11-28T09:59:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:59:09 crc kubenswrapper[4838]: E1128 09:59:09.362789 4838 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:59:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:59:09Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:59:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:59:09Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:59:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:59:09Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:59:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:59:09Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2d884793-8973-45d8-9335-b721f6accbac\\\",\\\"systemUUID\\\":\\\"e14391b4-beaf-4b9f-9de4-e3bbde3f3327\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:59:09Z is after 2025-08-24T17:21:41Z" Nov 28 09:59:09 crc kubenswrapper[4838]: I1128 09:59:09.367475 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:59:09 crc kubenswrapper[4838]: I1128 09:59:09.367532 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:59:09 crc kubenswrapper[4838]: I1128 09:59:09.367549 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:59:09 crc kubenswrapper[4838]: I1128 09:59:09.367572 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:59:09 crc kubenswrapper[4838]: I1128 09:59:09.367591 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:59:09Z","lastTransitionTime":"2025-11-28T09:59:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:59:09 crc kubenswrapper[4838]: E1128 09:59:09.384122 4838 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:59:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:59:09Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:59:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:59:09Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:59:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:59:09Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:59:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:59:09Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2d884793-8973-45d8-9335-b721f6accbac\\\",\\\"systemUUID\\\":\\\"e14391b4-beaf-4b9f-9de4-e3bbde3f3327\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:59:09Z is after 2025-08-24T17:21:41Z" Nov 28 09:59:09 crc kubenswrapper[4838]: I1128 09:59:09.388499 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:59:09 crc kubenswrapper[4838]: I1128 09:59:09.388546 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:59:09 crc kubenswrapper[4838]: I1128 09:59:09.388559 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:59:09 crc kubenswrapper[4838]: I1128 09:59:09.388575 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:59:09 crc kubenswrapper[4838]: I1128 09:59:09.388588 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:59:09Z","lastTransitionTime":"2025-11-28T09:59:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:59:09 crc kubenswrapper[4838]: E1128 09:59:09.403076 4838 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:59:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:59:09Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:59:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:59:09Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:59:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:59:09Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:59:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:59:09Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2d884793-8973-45d8-9335-b721f6accbac\\\",\\\"systemUUID\\\":\\\"e14391b4-beaf-4b9f-9de4-e3bbde3f3327\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:59:09Z is after 2025-08-24T17:21:41Z" Nov 28 09:59:09 crc kubenswrapper[4838]: I1128 09:59:09.408457 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:59:09 crc kubenswrapper[4838]: I1128 09:59:09.408496 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:59:09 crc kubenswrapper[4838]: I1128 09:59:09.408511 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:59:09 crc kubenswrapper[4838]: I1128 09:59:09.408532 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:59:09 crc kubenswrapper[4838]: I1128 09:59:09.408547 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:59:09Z","lastTransitionTime":"2025-11-28T09:59:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:59:09 crc kubenswrapper[4838]: E1128 09:59:09.422562 4838 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:59:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:59:09Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:59:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:59:09Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:59:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:59:09Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:59:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:59:09Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2d884793-8973-45d8-9335-b721f6accbac\\\",\\\"systemUUID\\\":\\\"e14391b4-beaf-4b9f-9de4-e3bbde3f3327\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:59:09Z is after 2025-08-24T17:21:41Z" Nov 28 09:59:09 crc kubenswrapper[4838]: I1128 09:59:09.427596 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:59:09 crc kubenswrapper[4838]: I1128 09:59:09.427696 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:59:09 crc kubenswrapper[4838]: I1128 09:59:09.427747 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:59:09 crc kubenswrapper[4838]: I1128 09:59:09.427773 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:59:09 crc kubenswrapper[4838]: I1128 09:59:09.427789 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:59:09Z","lastTransitionTime":"2025-11-28T09:59:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:59:09 crc kubenswrapper[4838]: E1128 09:59:09.443220 4838 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:59:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:59:09Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:59:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:59:09Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:59:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:59:09Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:59:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:59:09Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2d884793-8973-45d8-9335-b721f6accbac\\\",\\\"systemUUID\\\":\\\"e14391b4-beaf-4b9f-9de4-e3bbde3f3327\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:59:09Z is after 2025-08-24T17:21:41Z" Nov 28 09:59:09 crc kubenswrapper[4838]: E1128 09:59:09.443382 4838 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 28 09:59:09 crc kubenswrapper[4838]: I1128 09:59:09.561137 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 09:59:09 crc kubenswrapper[4838]: I1128 09:59:09.561207 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 09:59:09 crc kubenswrapper[4838]: I1128 09:59:09.561167 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 09:59:09 crc kubenswrapper[4838]: E1128 09:59:09.561371 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 09:59:09 crc kubenswrapper[4838]: E1128 09:59:09.561558 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 09:59:09 crc kubenswrapper[4838]: E1128 09:59:09.561702 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 09:59:10 crc kubenswrapper[4838]: I1128 09:59:10.561134 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-p69l6" Nov 28 09:59:10 crc kubenswrapper[4838]: E1128 09:59:10.561334 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-p69l6" podUID="2a223cc8-af33-4e83-8bfc-2676c5700447" Nov 28 09:59:11 crc kubenswrapper[4838]: I1128 09:59:11.561277 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 09:59:11 crc kubenswrapper[4838]: I1128 09:59:11.561305 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 09:59:11 crc kubenswrapper[4838]: E1128 09:59:11.561396 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 09:59:11 crc kubenswrapper[4838]: I1128 09:59:11.561464 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 09:59:11 crc kubenswrapper[4838]: E1128 09:59:11.561544 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 09:59:11 crc kubenswrapper[4838]: E1128 09:59:11.561627 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 09:59:12 crc kubenswrapper[4838]: I1128 09:59:12.562144 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-p69l6" Nov 28 09:59:12 crc kubenswrapper[4838]: E1128 09:59:12.562341 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-p69l6" podUID="2a223cc8-af33-4e83-8bfc-2676c5700447" Nov 28 09:59:13 crc kubenswrapper[4838]: I1128 09:59:13.561559 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 09:59:13 crc kubenswrapper[4838]: I1128 09:59:13.561616 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 09:59:13 crc kubenswrapper[4838]: I1128 09:59:13.561797 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 09:59:13 crc kubenswrapper[4838]: E1128 09:59:13.561910 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 09:59:13 crc kubenswrapper[4838]: E1128 09:59:13.562106 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 09:59:13 crc kubenswrapper[4838]: E1128 09:59:13.562232 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 09:59:13 crc kubenswrapper[4838]: E1128 09:59:13.663766 4838 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 28 09:59:14 crc kubenswrapper[4838]: I1128 09:59:14.561225 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-p69l6" Nov 28 09:59:14 crc kubenswrapper[4838]: E1128 09:59:14.561418 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-p69l6" podUID="2a223cc8-af33-4e83-8bfc-2676c5700447" Nov 28 09:59:15 crc kubenswrapper[4838]: I1128 09:59:15.561402 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 09:59:15 crc kubenswrapper[4838]: I1128 09:59:15.561516 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 09:59:15 crc kubenswrapper[4838]: I1128 09:59:15.561410 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 09:59:15 crc kubenswrapper[4838]: E1128 09:59:15.561581 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 09:59:15 crc kubenswrapper[4838]: E1128 09:59:15.561701 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 09:59:15 crc kubenswrapper[4838]: E1128 09:59:15.561866 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 09:59:16 crc kubenswrapper[4838]: I1128 09:59:16.562048 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-p69l6" Nov 28 09:59:16 crc kubenswrapper[4838]: E1128 09:59:16.562742 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-p69l6" podUID="2a223cc8-af33-4e83-8bfc-2676c5700447" Nov 28 09:59:17 crc kubenswrapper[4838]: I1128 09:59:17.561675 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 09:59:17 crc kubenswrapper[4838]: I1128 09:59:17.561804 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 09:59:17 crc kubenswrapper[4838]: E1128 09:59:17.561907 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 09:59:17 crc kubenswrapper[4838]: I1128 09:59:17.561683 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 09:59:17 crc kubenswrapper[4838]: E1128 09:59:17.562010 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 09:59:17 crc kubenswrapper[4838]: E1128 09:59:17.562088 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 09:59:18 crc kubenswrapper[4838]: I1128 09:59:18.561429 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-p69l6" Nov 28 09:59:18 crc kubenswrapper[4838]: E1128 09:59:18.563646 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-p69l6" podUID="2a223cc8-af33-4e83-8bfc-2676c5700447" Nov 28 09:59:18 crc kubenswrapper[4838]: I1128 09:59:18.571235 4838 scope.go:117] "RemoveContainer" containerID="71617073a4d76318049cb634cb4ee2135cd2c25bc3d3d6285eb69baf1fae9679" Nov 28 09:59:18 crc kubenswrapper[4838]: E1128 09:59:18.571610 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-gmhsj_openshift-ovn-kubernetes(41b01f7d-5c75-49de-86f7-87e04bf71194)\"" pod="openshift-ovn-kubernetes/ovnkube-node-gmhsj" podUID="41b01f7d-5c75-49de-86f7-87e04bf71194" Nov 28 09:59:18 crc kubenswrapper[4838]: I1128 09:59:18.589845 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd/etcd-crc"] Nov 28 09:59:18 crc kubenswrapper[4838]: I1128 09:59:18.589930 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1630b1c6-63b5-4481-a711-0485765d37e3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3a04db28528da269759635186b06952f9a3dc4c2b130458354a5bf9ef994db8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://065f0c39a0f1fbdb83a6a758ddd20a4af3ddf96297ce35551b570f5e6c9deb89\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T09:57:35Z\\\",\\\"message\\\":\\\"rpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"192.168.126.11:2379\\\\\\\", ServerName: \\\\\\\"192.168.126.11:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp 192.168.126.11:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:30.193159 13 logging.go:55] [core] [Channel #1 SubChannel #5]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"192.168.126.11:2379\\\\\\\", ServerName: \\\\\\\"192.168.126.11:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp 192.168.126.11:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:30.410371 13 logging.go:55] [core] [Channel #7 SubChannel #8]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"192.168.126.11:2379\\\\\\\", ServerName: \\\\\\\"192.168.126.11:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp 192.168.126.11:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:30.766067 13 logging.go:55] [core] [Channel #7 SubChannel #9]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"localhost:2379\\\\\\\", ServerName: \\\\\\\"localhost:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp [::1]:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:30.792318 13 logging.go:55] [core] [Channel #2 SubChannel #4]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"localhost:2379\\\\\\\", ServerName: \\\\\\\"localhost:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp [::1]:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:34.548830 13 logging.go:55] [core] [Channel #1 SubChannel #6]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"localhost:2379\\\\\\\", ServerName: \\\\\\\"localhost:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: authentication handshake failed: context canceled\\\\\\\"\\\\nE1128 09:57:34.559534 13 run.go:72] \\\\\\\"command failed\\\\\\\" err=\\\\\\\"context deadline exceeded\\\\\\\"\\\\nI1128 09:57:34.572762 1 main.go:235] Termination finished with exit code 1\\\\nI1128 09:57:34.572819 1 main.go:188] Deleting termination lock file \\\\\\\"/var/log/kube-apiserver/.terminating\\\\\\\"\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25d620ea6d7c38547e89d61e7a60f227d28b21f18d563055db47256b266d5b6b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://837cc7d33d601516b4ea56a283f71167b41da7c769070c97ea77f29e97cf1555\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a821e5b105f62cf7f3b12714bfb0f58867a808d1c777f2fb711895c345d8ee9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ad3223a9346861cf1b27af8c95207349f10af6f416380747e32c4faf1d3add4\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T09:57:48Z\\\",\\\"message\\\":\\\"le observer\\\\nW1128 09:57:46.202169 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1128 09:57:46.202478 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 09:57:46.203984 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2688063589/tls.crt::/tmp/serving-cert-2688063589/tls.key\\\\\\\"\\\\nI1128 09:57:46.517349 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 09:57:46.756626 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 09:57:46.756681 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 09:57:46.756806 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 09:57:46.756825 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 09:57:46.768610 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1128 09:57:46.768654 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1128 09:57:46.768660 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 09:57:46.768697 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 09:57:46.768707 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 09:57:46.768714 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 09:57:46.768746 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 09:57:46.768752 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1128 09:57:46.772242 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:40Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c690f38f717fbfbd681f21f5dc845b94601530b4fc0860fdbeb1317042c793b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:18Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b4d4af6fc5dd97d5b6104ef79b62fd241db5659dfdfd496a38536453b207a4e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b4d4af6fc5dd97d5b6104ef79b62fd241db5659dfdfd496a38536453b207a4e5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:08Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:59:18Z is after 2025-08-24T17:21:41Z" Nov 28 09:59:18 crc kubenswrapper[4838]: I1128 09:59:18.607437 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33d18a643d8d8a58731e53993b40c94bb51ff9c35242f768090dc737cac26503\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:59:18Z is after 2025-08-24T17:21:41Z" Nov 28 09:59:18 crc kubenswrapper[4838]: I1128 09:59:18.631669 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gmhsj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"41b01f7d-5c75-49de-86f7-87e04bf71194\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ccbb0df20c7e22308632a5a2d8837d77579134973f3888355be0fe46b4e59aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://87fcac052cf598fe1999137913f0b5ab8c999dad7d8fb512c3b079fd3b49bdaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b1f5c4da04a97ccc1851e901f9a13763e45ff8c7f6aab25bcbb934541ae4a776\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5781261b70d6722e8d28b91441c4241e1b48e6ff71ae3ec8973bde50180fc146\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3a5c0b96cef205139d500d85f27d2c9230ab23a84ffc78f2587a465a7ff25e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://669de13027827632f353811e082cce7cca326651a9bf4820e66504bb59067d72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://71617073a4d76318049cb634cb4ee2135cd2c25bc3d3d6285eb69baf1fae9679\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://973017e2b3a339f98d3439e256810a83b425061d827e74edd127aee640a9c0eb\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T09:58:38Z\\\",\\\"message\\\":\\\"l\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1128 09:58:37.612073 6708 services_controller.go:452] Built service openshift-console/downloads per-node LB for network=default: []services.LB{}\\\\nI1128 09:58:37.612094 6708 services_controller.go:452] Built service openshift-machine-api/machine-api-operator-machine-webhook per-node LB for network=default: []services.LB{}\\\\nI1128 09:58:37.612103 6708 services_controller.go:453] Built service openshift-console/downloads template LB for network=default: []services.LB{}\\\\nI1128 09:58:37.612134 6708 services_controller.go:453] Built service openshift-machine-api/machine-api-operator-machine-webhook template LB for network=default: []services.LB{}\\\\nI1128 09:58:37.612147 6708 services_controller.go:454] Service openshift-console/downloads for network=default has 1 cluster-wide, 0 per-node configs, 0 template configs, making 1 (cluster) 0 (per node) and 0 (template) load balancers\\\\nF1128 09:58:37.612098 6708 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node netwo\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:36Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://71617073a4d76318049cb634cb4ee2135cd2c25bc3d3d6285eb69baf1fae9679\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T09:59:06Z\\\",\\\"message\\\":\\\"l\\\\nI1128 09:59:06.154371 7040 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1128 09:59:06.154405 7040 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1128 09:59:06.154445 7040 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1128 09:59:06.154457 7040 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1128 09:59:06.154488 7040 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1128 09:59:06.154507 7040 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1128 09:59:06.154517 7040 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1128 09:59:06.154549 7040 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1128 09:59:06.154572 7040 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1128 09:59:06.154581 7040 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1128 09:59:06.154603 7040 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1128 09:59:06.154624 7040 handler.go:208] Removed *v1.Node event handler 7\\\\nI1128 09:59:06.154633 7040 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1128 09:59:06.154634 7040 factory.go:656] Stopping watch factory\\\\nI1128 09:59:06.154640 7040 handler.go:208] Removed *v1.Node event handler 2\\\\nI1128 09:59:06.154652 7040 ovnkube.go:599] Stopped ovnkube\\\\nI1128 0\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T09:59:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1ae634151347de87041ca338bcea3a34ee0c2330a3c6630a3e342f62beba0ab8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9efead1ac3ae101f3a81e0c5568645b6ac107dc126ff57bc55745b86ffacb730\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9efead1ac3ae101f3a81e0c5568645b6ac107dc126ff57bc55745b86ffacb730\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-gmhsj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:59:18Z is after 2025-08-24T17:21:41Z" Nov 28 09:59:18 crc kubenswrapper[4838]: I1128 09:59:18.649238 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5c3daa53-8c4e-4e30-aeba-146602dd45cd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d582c5c17a21e943db9e4af274b6cb31d7551e545039aca20a767b35e2ca5040\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9dmrk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5374d5a62ca21176826339023247946593cc1a7bdb4fb39596f12bf598790697\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9dmrk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-5dxdd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:59:18Z is after 2025-08-24T17:21:41Z" Nov 28 09:59:18 crc kubenswrapper[4838]: E1128 09:59:18.664657 4838 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 28 09:59:18 crc kubenswrapper[4838]: I1128 09:59:18.665193 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42be35de-5c72-4cc2-a5e4-fb7872425cdf\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5a39765f9493a3a9454db77d07855624ff5645e9dbd898e6dcb880d7a01a8c42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://426741a23e7b1b9fae6769b581c0d44694f957b0da985476923801395fad082f\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T09:57:46Z\\\",\\\"message\\\":\\\"+ timeout 3m /bin/bash -exuo pipefail -c 'while [ -n \\\\\\\"$(ss -Htanop \\\\\\\\( sport = 10357 \\\\\\\\))\\\\\\\" ]; do sleep 1; done'\\\\n++ ss -Htanop '(' sport = 10357 ')'\\\\n+ '[' -n '' ']'\\\\n+ exec cluster-policy-controller start --config=/etc/kubernetes/static-pod-resources/configmaps/cluster-policy-controller-config/config.yaml --kubeconfig=/etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig --namespace=openshift-kube-controller-manager -v=2\\\\nI1128 09:57:12.554377 1 leaderelection.go:121] The leader election gives 4 retries and allows for 30s of clock skew. The kube-apiserver downtime tolerance is 78s. Worst non-graceful lease acquisition is 2m43s. Worst graceful lease acquisition is {26s}.\\\\nI1128 09:57:12.555793 1 observer_polling.go:159] Starting file observer\\\\nI1128 09:57:12.567187 1 builder.go:298] cluster-policy-controller version 4.18.0-202501230001.p0.g5fd8525.assembly.stream.el9-5fd8525-5fd852525909ce6eab52972ba9ce8fcf56528eb9\\\\nI1128 09:57:12.568976 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.crt::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.key\\\\\\\"\\\\nI1128 09:57:45.000857 1 cmd.go:138] Received SIGTERM or SIGINT signal, shutting down controller.\\\\nF1128 09:57:46.203931 1 cmd.go:179] failed checking apiserver connectivity: client rate limiter Wait returned an error: context deadline exceeded\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:10Z\\\"}},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef54c8995d6bb8e077c6d1c3d796b6f2ce88370b6cadc4f040f590760103320b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fab4ce68cf732b3e6b32f68e84805013d646a9cbd6d5c55ea3d2f41a7f9db83d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2da93de71b5896e3c9ba192df2896b946b1376fefe2a87cf21adb87ea665be04\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:08Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:59:18Z is after 2025-08-24T17:21:41Z" Nov 28 09:59:18 crc kubenswrapper[4838]: I1128 09:59:18.689347 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:59:18Z is after 2025-08-24T17:21:41Z" Nov 28 09:59:18 crc kubenswrapper[4838]: I1128 09:59:18.706335 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:59:18Z is after 2025-08-24T17:21:41Z" Nov 28 09:59:18 crc kubenswrapper[4838]: I1128 09:59:18.722887 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-tj8hl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cbb3c60a-bf9f-4a62-9310-30898e42be4f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e6fe3f1cbc53f02e2556c5fb44cffcf13330c3240a0ff10a8471080466416bd1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c2jkq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:54Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-tj8hl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:59:18Z is after 2025-08-24T17:21:41Z" Nov 28 09:59:18 crc kubenswrapper[4838]: I1128 09:59:18.739454 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"faf44892-fdd2-4b45-8772-20049c555d3b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8f8f5484d008289a9e34ceaffd3cf2582565e7265003b0a6a913fe424760fc65\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://496ac21c6d7e650f191b3bc29ab676bab6ba40727c5ac4d517833ef9a115ae07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://698aacf6e94caf2da7095c89716d63d935ff60d95cb91c9a39dfe9282cbba005\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8352058616fa4ed90ca907e547bad2201d9aa696330f1eb8434c3c3d54d71d89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8352058616fa4ed90ca907e547bad2201d9aa696330f1eb8434c3c3d54d71d89\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:09Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:08Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:59:18Z is after 2025-08-24T17:21:41Z" Nov 28 09:59:18 crc kubenswrapper[4838]: I1128 09:59:18.757549 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3968fb11df6b2265691177838400dcb08e03d330c166dd880b3acfac7ec7938f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e91940e5398321649eac2960a545bb4bbe047113c81f152aa55894cabee55006\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:59:18Z is after 2025-08-24T17:21:41Z" Nov 28 09:59:18 crc kubenswrapper[4838]: I1128 09:59:18.782221 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-58mh7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f556bd7-3b15-4d7d-b8e2-4815bb5c9c7d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://658172db64e44d752eb06fe2788dda717db3fc2e672b073a2bdf159a16fd901f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0a0fbfb7a81acf63b4deabef68d55dd843092bce1c00c27c127955417bede44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f0a0fbfb7a81acf63b4deabef68d55dd843092bce1c00c27c127955417bede44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dea8cf8adbb0c946731fdc71c2b0d80ace50b919cf9c776eb70cb6ff36529401\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dea8cf8adbb0c946731fdc71c2b0d80ace50b919cf9c776eb70cb6ff36529401\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f717c0691a541e3a8310cd520a96e92144902511c74835a5ef8ba9536cd65657\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f717c0691a541e3a8310cd520a96e92144902511c74835a5ef8ba9536cd65657\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b8b67d281ed50a87ac086c5ce0bcc082c30a41d80074b638340326c334fa4f0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b8b67d281ed50a87ac086c5ce0bcc082c30a41d80074b638340326c334fa4f0c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4ac7e0a6a8340f45399bb4b576bbcd90d4eee54c571e6d64846c521486a2607\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f4ac7e0a6a8340f45399bb4b576bbcd90d4eee54c571e6d64846c521486a2607\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e3e1a16aa714e8c2b82811d71b1845942712cad84d7dd465c02a3c4478419af\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0e3e1a16aa714e8c2b82811d71b1845942712cad84d7dd465c02a3c4478419af\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-58mh7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:59:18Z is after 2025-08-24T17:21:41Z" Nov 28 09:59:18 crc kubenswrapper[4838]: I1128 09:59:18.797881 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-sft2b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"200cdb91-cc86-40be-a5b6-30f7b9beba6d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c4f94520c534ecc5a5a92d18b6047c6df131fa7d2a9b8712c021a74c3ffc18ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lpt6k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-sft2b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:59:18Z is after 2025-08-24T17:21:41Z" Nov 28 09:59:18 crc kubenswrapper[4838]: I1128 09:59:18.813802 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-p69l6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2a223cc8-af33-4e83-8bfc-2676c5700447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h25xx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h25xx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:58:05Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-p69l6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:59:18Z is after 2025-08-24T17:21:41Z" Nov 28 09:59:18 crc kubenswrapper[4838]: I1128 09:59:18.834476 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://05cb23094534a9fdebbe33d8d34a79412ee49437c25e185c6dfa99384cbf9629\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:59:18Z is after 2025-08-24T17:21:41Z" Nov 28 09:59:18 crc kubenswrapper[4838]: I1128 09:59:18.853162 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:59:18Z is after 2025-08-24T17:21:41Z" Nov 28 09:59:18 crc kubenswrapper[4838]: I1128 09:59:18.873709 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4rv9b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"051f7e1c-2d47-4be9-bbd5-14feec16eb16\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://262d73384f8aa0c5e8405e70d091fbc0003217ee2c08a4776048649b9a6eda59\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f641ae0677a50fbc7b20fac7bb5c567f031af1b6a06fbe9e965091efa4ee4ec3\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T09:58:51Z\\\",\\\"message\\\":\\\"2025-11-28T09:58:06+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_42560742-ad80-4917-aa9d-aab5fda94562\\\\n2025-11-28T09:58:06+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_42560742-ad80-4917-aa9d-aab5fda94562 to /host/opt/cni/bin/\\\\n2025-11-28T09:58:06Z [verbose] multus-daemon started\\\\n2025-11-28T09:58:06Z [verbose] Readiness Indicator file check\\\\n2025-11-28T09:58:51Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:05Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-szqtp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4rv9b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:59:18Z is after 2025-08-24T17:21:41Z" Nov 28 09:59:18 crc kubenswrapper[4838]: I1128 09:59:18.893387 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-tnclp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ebace5c6-6ca4-48ff-9c50-c6b769d599b5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://572204db3ac1da6b232430bf06fb87e94638e151ea0edc2f8b111deb7d82c58c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8pxnw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://efa96991cdc29f4f075bfd7751f620c7b091510ea68d577b243811ddd4140cec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8pxnw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:58:03Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-tnclp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:59:18Z is after 2025-08-24T17:21:41Z" Nov 28 09:59:18 crc kubenswrapper[4838]: I1128 09:59:18.912930 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"de645de9-a5cd-4075-8bfd-402a619ea73f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://17e965aad7643d62c651c1e652be45bd914cfe3f14a0a6f43e4e4376b4cb7be0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://40b6222c83e4141c5d286efddc1b65ef732c5cadda9cebdd8d9ee114bf2eb533\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://40b6222c83e4141c5d286efddc1b65ef732c5cadda9cebdd8d9ee114bf2eb533\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:08Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:59:18Z is after 2025-08-24T17:21:41Z" Nov 28 09:59:18 crc kubenswrapper[4838]: I1128 09:59:18.931277 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://05cb23094534a9fdebbe33d8d34a79412ee49437c25e185c6dfa99384cbf9629\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:59:18Z is after 2025-08-24T17:21:41Z" Nov 28 09:59:18 crc kubenswrapper[4838]: I1128 09:59:18.948897 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3968fb11df6b2265691177838400dcb08e03d330c166dd880b3acfac7ec7938f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e91940e5398321649eac2960a545bb4bbe047113c81f152aa55894cabee55006\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:59:18Z is after 2025-08-24T17:21:41Z" Nov 28 09:59:18 crc kubenswrapper[4838]: I1128 09:59:18.970100 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-58mh7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f556bd7-3b15-4d7d-b8e2-4815bb5c9c7d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://658172db64e44d752eb06fe2788dda717db3fc2e672b073a2bdf159a16fd901f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0a0fbfb7a81acf63b4deabef68d55dd843092bce1c00c27c127955417bede44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f0a0fbfb7a81acf63b4deabef68d55dd843092bce1c00c27c127955417bede44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dea8cf8adbb0c946731fdc71c2b0d80ace50b919cf9c776eb70cb6ff36529401\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dea8cf8adbb0c946731fdc71c2b0d80ace50b919cf9c776eb70cb6ff36529401\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f717c0691a541e3a8310cd520a96e92144902511c74835a5ef8ba9536cd65657\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f717c0691a541e3a8310cd520a96e92144902511c74835a5ef8ba9536cd65657\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b8b67d281ed50a87ac086c5ce0bcc082c30a41d80074b638340326c334fa4f0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b8b67d281ed50a87ac086c5ce0bcc082c30a41d80074b638340326c334fa4f0c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4ac7e0a6a8340f45399bb4b576bbcd90d4eee54c571e6d64846c521486a2607\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f4ac7e0a6a8340f45399bb4b576bbcd90d4eee54c571e6d64846c521486a2607\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e3e1a16aa714e8c2b82811d71b1845942712cad84d7dd465c02a3c4478419af\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0e3e1a16aa714e8c2b82811d71b1845942712cad84d7dd465c02a3c4478419af\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-58mh7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:59:18Z is after 2025-08-24T17:21:41Z" Nov 28 09:59:18 crc kubenswrapper[4838]: I1128 09:59:18.983392 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-sft2b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"200cdb91-cc86-40be-a5b6-30f7b9beba6d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c4f94520c534ecc5a5a92d18b6047c6df131fa7d2a9b8712c021a74c3ffc18ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lpt6k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-sft2b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:59:18Z is after 2025-08-24T17:21:41Z" Nov 28 09:59:18 crc kubenswrapper[4838]: I1128 09:59:18.999804 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-p69l6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2a223cc8-af33-4e83-8bfc-2676c5700447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h25xx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h25xx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:58:05Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-p69l6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:59:18Z is after 2025-08-24T17:21:41Z" Nov 28 09:59:19 crc kubenswrapper[4838]: I1128 09:59:19.015552 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"de645de9-a5cd-4075-8bfd-402a619ea73f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://17e965aad7643d62c651c1e652be45bd914cfe3f14a0a6f43e4e4376b4cb7be0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://40b6222c83e4141c5d286efddc1b65ef732c5cadda9cebdd8d9ee114bf2eb533\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://40b6222c83e4141c5d286efddc1b65ef732c5cadda9cebdd8d9ee114bf2eb533\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:08Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:59:19Z is after 2025-08-24T17:21:41Z" Nov 28 09:59:19 crc kubenswrapper[4838]: I1128 09:59:19.037088 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:59:19Z is after 2025-08-24T17:21:41Z" Nov 28 09:59:19 crc kubenswrapper[4838]: I1128 09:59:19.058551 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4rv9b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"051f7e1c-2d47-4be9-bbd5-14feec16eb16\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://262d73384f8aa0c5e8405e70d091fbc0003217ee2c08a4776048649b9a6eda59\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f641ae0677a50fbc7b20fac7bb5c567f031af1b6a06fbe9e965091efa4ee4ec3\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T09:58:51Z\\\",\\\"message\\\":\\\"2025-11-28T09:58:06+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_42560742-ad80-4917-aa9d-aab5fda94562\\\\n2025-11-28T09:58:06+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_42560742-ad80-4917-aa9d-aab5fda94562 to /host/opt/cni/bin/\\\\n2025-11-28T09:58:06Z [verbose] multus-daemon started\\\\n2025-11-28T09:58:06Z [verbose] Readiness Indicator file check\\\\n2025-11-28T09:58:51Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:05Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-szqtp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4rv9b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:59:19Z is after 2025-08-24T17:21:41Z" Nov 28 09:59:19 crc kubenswrapper[4838]: I1128 09:59:19.084936 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-tnclp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ebace5c6-6ca4-48ff-9c50-c6b769d599b5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://572204db3ac1da6b232430bf06fb87e94638e151ea0edc2f8b111deb7d82c58c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8pxnw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://efa96991cdc29f4f075bfd7751f620c7b091510ea68d577b243811ddd4140cec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8pxnw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:58:03Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-tnclp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:59:19Z is after 2025-08-24T17:21:41Z" Nov 28 09:59:19 crc kubenswrapper[4838]: I1128 09:59:19.120261 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42be35de-5c72-4cc2-a5e4-fb7872425cdf\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5a39765f9493a3a9454db77d07855624ff5645e9dbd898e6dcb880d7a01a8c42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://426741a23e7b1b9fae6769b581c0d44694f957b0da985476923801395fad082f\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T09:57:46Z\\\",\\\"message\\\":\\\"+ timeout 3m /bin/bash -exuo pipefail -c 'while [ -n \\\\\\\"$(ss -Htanop \\\\\\\\( sport = 10357 \\\\\\\\))\\\\\\\" ]; do sleep 1; done'\\\\n++ ss -Htanop '(' sport = 10357 ')'\\\\n+ '[' -n '' ']'\\\\n+ exec cluster-policy-controller start --config=/etc/kubernetes/static-pod-resources/configmaps/cluster-policy-controller-config/config.yaml --kubeconfig=/etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig --namespace=openshift-kube-controller-manager -v=2\\\\nI1128 09:57:12.554377 1 leaderelection.go:121] The leader election gives 4 retries and allows for 30s of clock skew. The kube-apiserver downtime tolerance is 78s. Worst non-graceful lease acquisition is 2m43s. Worst graceful lease acquisition is {26s}.\\\\nI1128 09:57:12.555793 1 observer_polling.go:159] Starting file observer\\\\nI1128 09:57:12.567187 1 builder.go:298] cluster-policy-controller version 4.18.0-202501230001.p0.g5fd8525.assembly.stream.el9-5fd8525-5fd852525909ce6eab52972ba9ce8fcf56528eb9\\\\nI1128 09:57:12.568976 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.crt::/etc/kubernetes/static-pod-resources/secrets/serving-cert/tls.key\\\\\\\"\\\\nI1128 09:57:45.000857 1 cmd.go:138] Received SIGTERM or SIGINT signal, shutting down controller.\\\\nF1128 09:57:46.203931 1 cmd.go:179] failed checking apiserver connectivity: client rate limiter Wait returned an error: context deadline exceeded\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:10Z\\\"}},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef54c8995d6bb8e077c6d1c3d796b6f2ce88370b6cadc4f040f590760103320b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fab4ce68cf732b3e6b32f68e84805013d646a9cbd6d5c55ea3d2f41a7f9db83d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2da93de71b5896e3c9ba192df2896b946b1376fefe2a87cf21adb87ea665be04\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:08Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:59:19Z is after 2025-08-24T17:21:41Z" Nov 28 09:59:19 crc kubenswrapper[4838]: I1128 09:59:19.144756 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1630b1c6-63b5-4481-a711-0485765d37e3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3a04db28528da269759635186b06952f9a3dc4c2b130458354a5bf9ef994db8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://065f0c39a0f1fbdb83a6a758ddd20a4af3ddf96297ce35551b570f5e6c9deb89\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T09:57:35Z\\\",\\\"message\\\":\\\"rpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"192.168.126.11:2379\\\\\\\", ServerName: \\\\\\\"192.168.126.11:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp 192.168.126.11:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:30.193159 13 logging.go:55] [core] [Channel #1 SubChannel #5]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"192.168.126.11:2379\\\\\\\", ServerName: \\\\\\\"192.168.126.11:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp 192.168.126.11:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:30.410371 13 logging.go:55] [core] [Channel #7 SubChannel #8]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"192.168.126.11:2379\\\\\\\", ServerName: \\\\\\\"192.168.126.11:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp 192.168.126.11:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:30.766067 13 logging.go:55] [core] [Channel #7 SubChannel #9]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"localhost:2379\\\\\\\", ServerName: \\\\\\\"localhost:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp [::1]:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:30.792318 13 logging.go:55] [core] [Channel #2 SubChannel #4]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"localhost:2379\\\\\\\", ServerName: \\\\\\\"localhost:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: Error while dialing: dial tcp [::1]:2379: connect: connection refused\\\\\\\"\\\\nW1128 09:57:34.548830 13 logging.go:55] [core] [Channel #1 SubChannel #6]grpc: addrConn.createTransport failed to connect to {Addr: \\\\\\\"localhost:2379\\\\\\\", ServerName: \\\\\\\"localhost:2379\\\\\\\", }. Err: connection error: desc = \\\\\\\"transport: authentication handshake failed: context canceled\\\\\\\"\\\\nE1128 09:57:34.559534 13 run.go:72] \\\\\\\"command failed\\\\\\\" err=\\\\\\\"context deadline exceeded\\\\\\\"\\\\nI1128 09:57:34.572762 1 main.go:235] Termination finished with exit code 1\\\\nI1128 09:57:34.572819 1 main.go:188] Deleting termination lock file \\\\\\\"/var/log/kube-apiserver/.terminating\\\\\\\"\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25d620ea6d7c38547e89d61e7a60f227d28b21f18d563055db47256b266d5b6b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://837cc7d33d601516b4ea56a283f71167b41da7c769070c97ea77f29e97cf1555\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a821e5b105f62cf7f3b12714bfb0f58867a808d1c777f2fb711895c345d8ee9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ad3223a9346861cf1b27af8c95207349f10af6f416380747e32c4faf1d3add4\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T09:57:48Z\\\",\\\"message\\\":\\\"le observer\\\\nW1128 09:57:46.202169 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1128 09:57:46.202478 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 09:57:46.203984 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2688063589/tls.crt::/tmp/serving-cert-2688063589/tls.key\\\\\\\"\\\\nI1128 09:57:46.517349 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 09:57:46.756626 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 09:57:46.756681 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 09:57:46.756806 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 09:57:46.756825 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 09:57:46.768610 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1128 09:57:46.768654 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1128 09:57:46.768660 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 09:57:46.768697 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 09:57:46.768707 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 09:57:46.768714 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 09:57:46.768746 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 09:57:46.768752 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1128 09:57:46.772242 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:40Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c690f38f717fbfbd681f21f5dc845b94601530b4fc0860fdbeb1317042c793b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:18Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b4d4af6fc5dd97d5b6104ef79b62fd241db5659dfdfd496a38536453b207a4e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b4d4af6fc5dd97d5b6104ef79b62fd241db5659dfdfd496a38536453b207a4e5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:08Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:59:19Z is after 2025-08-24T17:21:41Z" Nov 28 09:59:19 crc kubenswrapper[4838]: I1128 09:59:19.179118 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33d18a643d8d8a58731e53993b40c94bb51ff9c35242f768090dc737cac26503\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:59:19Z is after 2025-08-24T17:21:41Z" Nov 28 09:59:19 crc kubenswrapper[4838]: I1128 09:59:19.206424 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gmhsj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"41b01f7d-5c75-49de-86f7-87e04bf71194\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ccbb0df20c7e22308632a5a2d8837d77579134973f3888355be0fe46b4e59aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://87fcac052cf598fe1999137913f0b5ab8c999dad7d8fb512c3b079fd3b49bdaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b1f5c4da04a97ccc1851e901f9a13763e45ff8c7f6aab25bcbb934541ae4a776\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5781261b70d6722e8d28b91441c4241e1b48e6ff71ae3ec8973bde50180fc146\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3a5c0b96cef205139d500d85f27d2c9230ab23a84ffc78f2587a465a7ff25e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://669de13027827632f353811e082cce7cca326651a9bf4820e66504bb59067d72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://71617073a4d76318049cb634cb4ee2135cd2c25bc3d3d6285eb69baf1fae9679\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://71617073a4d76318049cb634cb4ee2135cd2c25bc3d3d6285eb69baf1fae9679\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T09:59:06Z\\\",\\\"message\\\":\\\"l\\\\nI1128 09:59:06.154371 7040 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1128 09:59:06.154405 7040 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1128 09:59:06.154445 7040 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1128 09:59:06.154457 7040 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1128 09:59:06.154488 7040 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1128 09:59:06.154507 7040 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1128 09:59:06.154517 7040 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1128 09:59:06.154549 7040 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1128 09:59:06.154572 7040 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1128 09:59:06.154581 7040 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1128 09:59:06.154603 7040 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1128 09:59:06.154624 7040 handler.go:208] Removed *v1.Node event handler 7\\\\nI1128 09:59:06.154633 7040 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1128 09:59:06.154634 7040 factory.go:656] Stopping watch factory\\\\nI1128 09:59:06.154640 7040 handler.go:208] Removed *v1.Node event handler 2\\\\nI1128 09:59:06.154652 7040 ovnkube.go:599] Stopped ovnkube\\\\nI1128 0\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T09:59:04Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-gmhsj_openshift-ovn-kubernetes(41b01f7d-5c75-49de-86f7-87e04bf71194)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1ae634151347de87041ca338bcea3a34ee0c2330a3c6630a3e342f62beba0ab8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9efead1ac3ae101f3a81e0c5568645b6ac107dc126ff57bc55745b86ffacb730\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9efead1ac3ae101f3a81e0c5568645b6ac107dc126ff57bc55745b86ffacb730\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-svlft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-gmhsj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:59:19Z is after 2025-08-24T17:21:41Z" Nov 28 09:59:19 crc kubenswrapper[4838]: I1128 09:59:19.221320 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5c3daa53-8c4e-4e30-aeba-146602dd45cd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d582c5c17a21e943db9e4af274b6cb31d7551e545039aca20a767b35e2ca5040\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9dmrk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5374d5a62ca21176826339023247946593cc1a7bdb4fb39596f12bf598790697\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9dmrk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-5dxdd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:59:19Z is after 2025-08-24T17:21:41Z" Nov 28 09:59:19 crc kubenswrapper[4838]: I1128 09:59:19.237936 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"faf44892-fdd2-4b45-8772-20049c555d3b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8f8f5484d008289a9e34ceaffd3cf2582565e7265003b0a6a913fe424760fc65\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://496ac21c6d7e650f191b3bc29ab676bab6ba40727c5ac4d517833ef9a115ae07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://698aacf6e94caf2da7095c89716d63d935ff60d95cb91c9a39dfe9282cbba005\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8352058616fa4ed90ca907e547bad2201d9aa696330f1eb8434c3c3d54d71d89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8352058616fa4ed90ca907e547bad2201d9aa696330f1eb8434c3c3d54d71d89\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:09Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:08Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:59:19Z is after 2025-08-24T17:21:41Z" Nov 28 09:59:19 crc kubenswrapper[4838]: I1128 09:59:19.261330 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d408ad62-42e1-41dc-88ef-3969e2b7d6cc\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4546745e01db79b4e5c22e32cc6c0c0290159bc97182b87319e4311deb64c031\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf37cdbd396192c6d67f546d27be521dafa2258de2928d497c13c9fe4ee53d13\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://610cdc4f3b3598ee593b3bbffdb4df63cc98c1f6a56602b357471acb90ad3add\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac22cb890363e5cedeb87067d4a8b7ba53c3cc547f2c133472a373795ca79cda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://437bbddaf475f73991f0b8de425637a7b4baf931244bbb84f64e41aec2025064\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33c04d931c3928e04948b6939ccc80fc0c50e2a3af20ab6617bf8c994ad4cbd9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://33c04d931c3928e04948b6939ccc80fc0c50e2a3af20ab6617bf8c994ad4cbd9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:57:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://70d608e317f201a7280cec671bea1a0496f7a68e7caa8925f17ff61f9addf9c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://70d608e317f201a7280cec671bea1a0496f7a68e7caa8925f17ff61f9addf9c7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:57:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:12Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://8ab9a9d79738e635e5aaffb8063f01f042d429c0323078dc8aab6bdb886927a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8ab9a9d79738e635e5aaffb8063f01f042d429c0323078dc8aab6bdb886927a3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:57:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:57:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:08Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:59:19Z is after 2025-08-24T17:21:41Z" Nov 28 09:59:19 crc kubenswrapper[4838]: I1128 09:59:19.279232 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:59:19Z is after 2025-08-24T17:21:41Z" Nov 28 09:59:19 crc kubenswrapper[4838]: I1128 09:59:19.293866 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:59:19Z is after 2025-08-24T17:21:41Z" Nov 28 09:59:19 crc kubenswrapper[4838]: I1128 09:59:19.305003 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-tj8hl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cbb3c60a-bf9f-4a62-9310-30898e42be4f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e6fe3f1cbc53f02e2556c5fb44cffcf13330c3240a0ff10a8471080466416bd1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:57:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c2jkq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:54Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-tj8hl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:59:19Z is after 2025-08-24T17:21:41Z" Nov 28 09:59:19 crc kubenswrapper[4838]: I1128 09:59:19.561556 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 09:59:19 crc kubenswrapper[4838]: I1128 09:59:19.561570 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 09:59:19 crc kubenswrapper[4838]: I1128 09:59:19.562703 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 09:59:19 crc kubenswrapper[4838]: E1128 09:59:19.562983 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 09:59:19 crc kubenswrapper[4838]: E1128 09:59:19.563134 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 09:59:19 crc kubenswrapper[4838]: E1128 09:59:19.563282 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 09:59:19 crc kubenswrapper[4838]: I1128 09:59:19.838235 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:59:19 crc kubenswrapper[4838]: I1128 09:59:19.838312 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:59:19 crc kubenswrapper[4838]: I1128 09:59:19.838328 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:59:19 crc kubenswrapper[4838]: I1128 09:59:19.838354 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:59:19 crc kubenswrapper[4838]: I1128 09:59:19.838375 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:59:19Z","lastTransitionTime":"2025-11-28T09:59:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:59:19 crc kubenswrapper[4838]: E1128 09:59:19.858781 4838 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:59:19Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:59:19Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:59:19Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:59:19Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:59:19Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:59:19Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:59:19Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:59:19Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2d884793-8973-45d8-9335-b721f6accbac\\\",\\\"systemUUID\\\":\\\"e14391b4-beaf-4b9f-9de4-e3bbde3f3327\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:59:19Z is after 2025-08-24T17:21:41Z" Nov 28 09:59:19 crc kubenswrapper[4838]: I1128 09:59:19.864238 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:59:19 crc kubenswrapper[4838]: I1128 09:59:19.864471 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:59:19 crc kubenswrapper[4838]: I1128 09:59:19.864611 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:59:19 crc kubenswrapper[4838]: I1128 09:59:19.864786 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:59:19 crc kubenswrapper[4838]: I1128 09:59:19.864927 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:59:19Z","lastTransitionTime":"2025-11-28T09:59:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:59:19 crc kubenswrapper[4838]: E1128 09:59:19.887632 4838 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:59:19Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:59:19Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:59:19Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:59:19Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:59:19Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:59:19Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:59:19Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:59:19Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2d884793-8973-45d8-9335-b721f6accbac\\\",\\\"systemUUID\\\":\\\"e14391b4-beaf-4b9f-9de4-e3bbde3f3327\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:59:19Z is after 2025-08-24T17:21:41Z" Nov 28 09:59:19 crc kubenswrapper[4838]: I1128 09:59:19.893393 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:59:19 crc kubenswrapper[4838]: I1128 09:59:19.893595 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:59:19 crc kubenswrapper[4838]: I1128 09:59:19.893763 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:59:19 crc kubenswrapper[4838]: I1128 09:59:19.893935 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:59:19 crc kubenswrapper[4838]: I1128 09:59:19.894062 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:59:19Z","lastTransitionTime":"2025-11-28T09:59:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:59:19 crc kubenswrapper[4838]: E1128 09:59:19.916777 4838 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:59:19Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:59:19Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:59:19Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:59:19Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:59:19Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:59:19Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:59:19Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:59:19Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2d884793-8973-45d8-9335-b721f6accbac\\\",\\\"systemUUID\\\":\\\"e14391b4-beaf-4b9f-9de4-e3bbde3f3327\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:59:19Z is after 2025-08-24T17:21:41Z" Nov 28 09:59:19 crc kubenswrapper[4838]: I1128 09:59:19.921967 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:59:19 crc kubenswrapper[4838]: I1128 09:59:19.922215 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:59:19 crc kubenswrapper[4838]: I1128 09:59:19.922403 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:59:19 crc kubenswrapper[4838]: I1128 09:59:19.922575 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:59:19 crc kubenswrapper[4838]: I1128 09:59:19.922782 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:59:19Z","lastTransitionTime":"2025-11-28T09:59:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:59:19 crc kubenswrapper[4838]: E1128 09:59:19.937852 4838 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:59:19Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:59:19Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:59:19Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:59:19Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:59:19Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:59:19Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:59:19Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:59:19Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2d884793-8973-45d8-9335-b721f6accbac\\\",\\\"systemUUID\\\":\\\"e14391b4-beaf-4b9f-9de4-e3bbde3f3327\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:59:19Z is after 2025-08-24T17:21:41Z" Nov 28 09:59:19 crc kubenswrapper[4838]: I1128 09:59:19.942175 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:59:19 crc kubenswrapper[4838]: I1128 09:59:19.942376 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:59:19 crc kubenswrapper[4838]: I1128 09:59:19.942666 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:59:19 crc kubenswrapper[4838]: I1128 09:59:19.942853 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:59:19 crc kubenswrapper[4838]: I1128 09:59:19.942998 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:59:19Z","lastTransitionTime":"2025-11-28T09:59:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:59:19 crc kubenswrapper[4838]: E1128 09:59:19.957012 4838 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:59:19Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:59:19Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:59:19Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:59:19Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:59:19Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:59:19Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T09:59:19Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T09:59:19Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2d884793-8973-45d8-9335-b721f6accbac\\\",\\\"systemUUID\\\":\\\"e14391b4-beaf-4b9f-9de4-e3bbde3f3327\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:59:19Z is after 2025-08-24T17:21:41Z" Nov 28 09:59:19 crc kubenswrapper[4838]: E1128 09:59:19.957256 4838 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 28 09:59:20 crc kubenswrapper[4838]: I1128 09:59:20.562090 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-p69l6" Nov 28 09:59:20 crc kubenswrapper[4838]: E1128 09:59:20.562288 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-p69l6" podUID="2a223cc8-af33-4e83-8bfc-2676c5700447" Nov 28 09:59:21 crc kubenswrapper[4838]: I1128 09:59:21.561419 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 09:59:21 crc kubenswrapper[4838]: I1128 09:59:21.561458 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 09:59:21 crc kubenswrapper[4838]: I1128 09:59:21.561810 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 09:59:21 crc kubenswrapper[4838]: E1128 09:59:21.561991 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 09:59:21 crc kubenswrapper[4838]: E1128 09:59:21.562246 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 09:59:21 crc kubenswrapper[4838]: E1128 09:59:21.562427 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 09:59:22 crc kubenswrapper[4838]: I1128 09:59:22.561833 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-p69l6" Nov 28 09:59:22 crc kubenswrapper[4838]: E1128 09:59:22.562448 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-p69l6" podUID="2a223cc8-af33-4e83-8bfc-2676c5700447" Nov 28 09:59:23 crc kubenswrapper[4838]: I1128 09:59:23.561083 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 09:59:23 crc kubenswrapper[4838]: I1128 09:59:23.561137 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 09:59:23 crc kubenswrapper[4838]: I1128 09:59:23.561085 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 09:59:23 crc kubenswrapper[4838]: E1128 09:59:23.561324 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 09:59:23 crc kubenswrapper[4838]: E1128 09:59:23.561488 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 09:59:23 crc kubenswrapper[4838]: E1128 09:59:23.561593 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 09:59:23 crc kubenswrapper[4838]: E1128 09:59:23.666249 4838 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 28 09:59:24 crc kubenswrapper[4838]: I1128 09:59:24.561941 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-p69l6" Nov 28 09:59:24 crc kubenswrapper[4838]: E1128 09:59:24.562128 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-p69l6" podUID="2a223cc8-af33-4e83-8bfc-2676c5700447" Nov 28 09:59:25 crc kubenswrapper[4838]: I1128 09:59:25.561556 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 09:59:25 crc kubenswrapper[4838]: I1128 09:59:25.561620 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 09:59:25 crc kubenswrapper[4838]: E1128 09:59:25.561825 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 09:59:25 crc kubenswrapper[4838]: I1128 09:59:25.562042 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 09:59:25 crc kubenswrapper[4838]: E1128 09:59:25.562147 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 09:59:25 crc kubenswrapper[4838]: E1128 09:59:25.562275 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 09:59:26 crc kubenswrapper[4838]: I1128 09:59:26.561831 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-p69l6" Nov 28 09:59:26 crc kubenswrapper[4838]: E1128 09:59:26.561993 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-p69l6" podUID="2a223cc8-af33-4e83-8bfc-2676c5700447" Nov 28 09:59:27 crc kubenswrapper[4838]: I1128 09:59:27.561158 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 09:59:27 crc kubenswrapper[4838]: I1128 09:59:27.561201 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 09:59:27 crc kubenswrapper[4838]: I1128 09:59:27.561173 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 09:59:27 crc kubenswrapper[4838]: E1128 09:59:27.561542 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 09:59:27 crc kubenswrapper[4838]: E1128 09:59:27.561670 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 09:59:27 crc kubenswrapper[4838]: E1128 09:59:27.561841 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 09:59:28 crc kubenswrapper[4838]: I1128 09:59:28.562128 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-p69l6" Nov 28 09:59:28 crc kubenswrapper[4838]: E1128 09:59:28.563272 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-p69l6" podUID="2a223cc8-af33-4e83-8bfc-2676c5700447" Nov 28 09:59:28 crc kubenswrapper[4838]: I1128 09:59:28.589708 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://05cb23094534a9fdebbe33d8d34a79412ee49437c25e185c6dfa99384cbf9629\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:59:28Z is after 2025-08-24T17:21:41Z" Nov 28 09:59:28 crc kubenswrapper[4838]: I1128 09:59:28.608945 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3968fb11df6b2265691177838400dcb08e03d330c166dd880b3acfac7ec7938f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e91940e5398321649eac2960a545bb4bbe047113c81f152aa55894cabee55006\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:59:28Z is after 2025-08-24T17:21:41Z" Nov 28 09:59:28 crc kubenswrapper[4838]: I1128 09:59:28.630577 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-58mh7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f556bd7-3b15-4d7d-b8e2-4815bb5c9c7d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://658172db64e44d752eb06fe2788dda717db3fc2e672b073a2bdf159a16fd901f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0a0fbfb7a81acf63b4deabef68d55dd843092bce1c00c27c127955417bede44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f0a0fbfb7a81acf63b4deabef68d55dd843092bce1c00c27c127955417bede44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dea8cf8adbb0c946731fdc71c2b0d80ace50b919cf9c776eb70cb6ff36529401\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dea8cf8adbb0c946731fdc71c2b0d80ace50b919cf9c776eb70cb6ff36529401\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f717c0691a541e3a8310cd520a96e92144902511c74835a5ef8ba9536cd65657\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f717c0691a541e3a8310cd520a96e92144902511c74835a5ef8ba9536cd65657\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b8b67d281ed50a87ac086c5ce0bcc082c30a41d80074b638340326c334fa4f0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b8b67d281ed50a87ac086c5ce0bcc082c30a41d80074b638340326c334fa4f0c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4ac7e0a6a8340f45399bb4b576bbcd90d4eee54c571e6d64846c521486a2607\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f4ac7e0a6a8340f45399bb4b576bbcd90d4eee54c571e6d64846c521486a2607\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e3e1a16aa714e8c2b82811d71b1845942712cad84d7dd465c02a3c4478419af\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0e3e1a16aa714e8c2b82811d71b1845942712cad84d7dd465c02a3c4478419af\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T09:58:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T09:58:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jgs5r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-58mh7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:59:28Z is after 2025-08-24T17:21:41Z" Nov 28 09:59:28 crc kubenswrapper[4838]: I1128 09:59:28.646146 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-sft2b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"200cdb91-cc86-40be-a5b6-30f7b9beba6d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:57:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c4f94520c534ecc5a5a92d18b6047c6df131fa7d2a9b8712c021a74c3ffc18ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T09:58:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lpt6k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:57:53Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-sft2b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:59:28Z is after 2025-08-24T17:21:41Z" Nov 28 09:59:28 crc kubenswrapper[4838]: I1128 09:59:28.663073 4838 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-p69l6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2a223cc8-af33-4e83-8bfc-2676c5700447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T09:58:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h25xx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h25xx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T09:58:05Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-p69l6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T09:59:28Z is after 2025-08-24T17:21:41Z" Nov 28 09:59:28 crc kubenswrapper[4838]: E1128 09:59:28.667772 4838 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 28 09:59:28 crc kubenswrapper[4838]: I1128 09:59:28.692087 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" podStartSLOduration=73.692071843 podStartE2EDuration="1m13.692071843s" podCreationTimestamp="2025-11-28 09:58:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 09:59:28.692044472 +0000 UTC m=+140.391018682" watchObservedRunningTime="2025-11-28 09:59:28.692071843 +0000 UTC m=+140.391046013" Nov 28 09:59:28 crc kubenswrapper[4838]: I1128 09:59:28.729285 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-4rv9b" podStartSLOduration=99.729266943 podStartE2EDuration="1m39.729266943s" podCreationTimestamp="2025-11-28 09:57:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 09:59:28.718795909 +0000 UTC m=+140.417770099" watchObservedRunningTime="2025-11-28 09:59:28.729266943 +0000 UTC m=+140.428241123" Nov 28 09:59:28 crc kubenswrapper[4838]: I1128 09:59:28.751416 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podStartSLOduration=96.751401612 podStartE2EDuration="1m36.751401612s" podCreationTimestamp="2025-11-28 09:57:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 09:59:28.751153942 +0000 UTC m=+140.450128142" watchObservedRunningTime="2025-11-28 09:59:28.751401612 +0000 UTC m=+140.450375782" Nov 28 09:59:28 crc kubenswrapper[4838]: I1128 09:59:28.751531 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-tnclp" podStartSLOduration=99.751527427 podStartE2EDuration="1m39.751527427s" podCreationTimestamp="2025-11-28 09:57:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 09:59:28.729871298 +0000 UTC m=+140.428845468" watchObservedRunningTime="2025-11-28 09:59:28.751527427 +0000 UTC m=+140.450501597" Nov 28 09:59:28 crc kubenswrapper[4838]: I1128 09:59:28.782372 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-crc" podStartSLOduration=90.782356109 podStartE2EDuration="1m30.782356109s" podCreationTimestamp="2025-11-28 09:57:58 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 09:59:28.769775479 +0000 UTC m=+140.468749659" watchObservedRunningTime="2025-11-28 09:59:28.782356109 +0000 UTC m=+140.481330279" Nov 28 09:59:28 crc kubenswrapper[4838]: I1128 09:59:28.819208 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podStartSLOduration=99.819189945 podStartE2EDuration="1m39.819189945s" podCreationTimestamp="2025-11-28 09:57:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 09:59:28.818661304 +0000 UTC m=+140.517635494" watchObservedRunningTime="2025-11-28 09:59:28.819189945 +0000 UTC m=+140.518164125" Nov 28 09:59:28 crc kubenswrapper[4838]: I1128 09:59:28.866497 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd/etcd-crc" podStartSLOduration=10.866480136 podStartE2EDuration="10.866480136s" podCreationTimestamp="2025-11-28 09:59:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 09:59:28.865799069 +0000 UTC m=+140.564773259" watchObservedRunningTime="2025-11-28 09:59:28.866480136 +0000 UTC m=+140.565454306" Nov 28 09:59:28 crc kubenswrapper[4838]: I1128 09:59:28.867131 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" podStartSLOduration=91.867123543 podStartE2EDuration="1m31.867123543s" podCreationTimestamp="2025-11-28 09:57:57 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 09:59:28.831033077 +0000 UTC m=+140.530007247" watchObservedRunningTime="2025-11-28 09:59:28.867123543 +0000 UTC m=+140.566097713" Nov 28 09:59:28 crc kubenswrapper[4838]: I1128 09:59:28.909316 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/node-ca-tj8hl" podStartSLOduration=99.909293446 podStartE2EDuration="1m39.909293446s" podCreationTimestamp="2025-11-28 09:57:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 09:59:28.908279914 +0000 UTC m=+140.607254084" watchObservedRunningTime="2025-11-28 09:59:28.909293446 +0000 UTC m=+140.608267626" Nov 28 09:59:29 crc kubenswrapper[4838]: I1128 09:59:29.561966 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 09:59:29 crc kubenswrapper[4838]: I1128 09:59:29.562478 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 09:59:29 crc kubenswrapper[4838]: E1128 09:59:29.563467 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 09:59:29 crc kubenswrapper[4838]: E1128 09:59:29.562898 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 09:59:29 crc kubenswrapper[4838]: I1128 09:59:29.562884 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 09:59:29 crc kubenswrapper[4838]: I1128 09:59:29.563977 4838 scope.go:117] "RemoveContainer" containerID="71617073a4d76318049cb634cb4ee2135cd2c25bc3d3d6285eb69baf1fae9679" Nov 28 09:59:29 crc kubenswrapper[4838]: E1128 09:59:29.564286 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 09:59:29 crc kubenswrapper[4838]: E1128 09:59:29.564770 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-gmhsj_openshift-ovn-kubernetes(41b01f7d-5c75-49de-86f7-87e04bf71194)\"" pod="openshift-ovn-kubernetes/ovnkube-node-gmhsj" podUID="41b01f7d-5c75-49de-86f7-87e04bf71194" Nov 28 09:59:30 crc kubenswrapper[4838]: I1128 09:59:30.010464 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 09:59:30 crc kubenswrapper[4838]: I1128 09:59:30.010525 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 09:59:30 crc kubenswrapper[4838]: I1128 09:59:30.010544 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 09:59:30 crc kubenswrapper[4838]: I1128 09:59:30.010573 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 09:59:30 crc kubenswrapper[4838]: I1128 09:59:30.010593 4838 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T09:59:30Z","lastTransitionTime":"2025-11-28T09:59:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 09:59:30 crc kubenswrapper[4838]: I1128 09:59:30.063365 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-version/cluster-version-operator-5c965bbfc6-wpslw"] Nov 28 09:59:30 crc kubenswrapper[4838]: I1128 09:59:30.064037 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-wpslw" Nov 28 09:59:30 crc kubenswrapper[4838]: I1128 09:59:30.066075 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"cluster-version-operator-serving-cert" Nov 28 09:59:30 crc kubenswrapper[4838]: I1128 09:59:30.066707 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"kube-root-ca.crt" Nov 28 09:59:30 crc kubenswrapper[4838]: I1128 09:59:30.066949 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"openshift-service-ca.crt" Nov 28 09:59:30 crc kubenswrapper[4838]: I1128 09:59:30.067060 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"default-dockercfg-gxtc4" Nov 28 09:59:30 crc kubenswrapper[4838]: I1128 09:59:30.139229 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-additional-cni-plugins-58mh7" podStartSLOduration=101.139208448 podStartE2EDuration="1m41.139208448s" podCreationTimestamp="2025-11-28 09:57:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 09:59:30.138304951 +0000 UTC m=+141.837279161" watchObservedRunningTime="2025-11-28 09:59:30.139208448 +0000 UTC m=+141.838182628" Nov 28 09:59:30 crc kubenswrapper[4838]: I1128 09:59:30.172361 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/node-resolver-sft2b" podStartSLOduration=101.172333933 podStartE2EDuration="1m41.172333933s" podCreationTimestamp="2025-11-28 09:57:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 09:59:30.156543452 +0000 UTC m=+141.855517662" watchObservedRunningTime="2025-11-28 09:59:30.172333933 +0000 UTC m=+141.871308143" Nov 28 09:59:30 crc kubenswrapper[4838]: I1128 09:59:30.191634 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/25e84a77-e762-451f-abc0-0c0fac7bd62b-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-wpslw\" (UID: \"25e84a77-e762-451f-abc0-0c0fac7bd62b\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-wpslw" Nov 28 09:59:30 crc kubenswrapper[4838]: I1128 09:59:30.191674 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/25e84a77-e762-451f-abc0-0c0fac7bd62b-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-wpslw\" (UID: \"25e84a77-e762-451f-abc0-0c0fac7bd62b\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-wpslw" Nov 28 09:59:30 crc kubenswrapper[4838]: I1128 09:59:30.191692 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/25e84a77-e762-451f-abc0-0c0fac7bd62b-service-ca\") pod \"cluster-version-operator-5c965bbfc6-wpslw\" (UID: \"25e84a77-e762-451f-abc0-0c0fac7bd62b\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-wpslw" Nov 28 09:59:30 crc kubenswrapper[4838]: I1128 09:59:30.191737 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/25e84a77-e762-451f-abc0-0c0fac7bd62b-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-wpslw\" (UID: \"25e84a77-e762-451f-abc0-0c0fac7bd62b\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-wpslw" Nov 28 09:59:30 crc kubenswrapper[4838]: I1128 09:59:30.191778 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/25e84a77-e762-451f-abc0-0c0fac7bd62b-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-wpslw\" (UID: \"25e84a77-e762-451f-abc0-0c0fac7bd62b\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-wpslw" Nov 28 09:59:30 crc kubenswrapper[4838]: I1128 09:59:30.298741 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/25e84a77-e762-451f-abc0-0c0fac7bd62b-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-wpslw\" (UID: \"25e84a77-e762-451f-abc0-0c0fac7bd62b\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-wpslw" Nov 28 09:59:30 crc kubenswrapper[4838]: I1128 09:59:30.298934 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/25e84a77-e762-451f-abc0-0c0fac7bd62b-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-wpslw\" (UID: \"25e84a77-e762-451f-abc0-0c0fac7bd62b\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-wpslw" Nov 28 09:59:30 crc kubenswrapper[4838]: I1128 09:59:30.298971 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/25e84a77-e762-451f-abc0-0c0fac7bd62b-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-wpslw\" (UID: \"25e84a77-e762-451f-abc0-0c0fac7bd62b\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-wpslw" Nov 28 09:59:30 crc kubenswrapper[4838]: I1128 09:59:30.299002 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/25e84a77-e762-451f-abc0-0c0fac7bd62b-service-ca\") pod \"cluster-version-operator-5c965bbfc6-wpslw\" (UID: \"25e84a77-e762-451f-abc0-0c0fac7bd62b\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-wpslw" Nov 28 09:59:30 crc kubenswrapper[4838]: I1128 09:59:30.299059 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/25e84a77-e762-451f-abc0-0c0fac7bd62b-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-wpslw\" (UID: \"25e84a77-e762-451f-abc0-0c0fac7bd62b\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-wpslw" Nov 28 09:59:30 crc kubenswrapper[4838]: I1128 09:59:30.299208 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/25e84a77-e762-451f-abc0-0c0fac7bd62b-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-wpslw\" (UID: \"25e84a77-e762-451f-abc0-0c0fac7bd62b\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-wpslw" Nov 28 09:59:30 crc kubenswrapper[4838]: I1128 09:59:30.299213 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/25e84a77-e762-451f-abc0-0c0fac7bd62b-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-wpslw\" (UID: \"25e84a77-e762-451f-abc0-0c0fac7bd62b\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-wpslw" Nov 28 09:59:30 crc kubenswrapper[4838]: I1128 09:59:30.300003 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/25e84a77-e762-451f-abc0-0c0fac7bd62b-service-ca\") pod \"cluster-version-operator-5c965bbfc6-wpslw\" (UID: \"25e84a77-e762-451f-abc0-0c0fac7bd62b\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-wpslw" Nov 28 09:59:30 crc kubenswrapper[4838]: I1128 09:59:30.305625 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/25e84a77-e762-451f-abc0-0c0fac7bd62b-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-wpslw\" (UID: \"25e84a77-e762-451f-abc0-0c0fac7bd62b\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-wpslw" Nov 28 09:59:30 crc kubenswrapper[4838]: I1128 09:59:30.322657 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/25e84a77-e762-451f-abc0-0c0fac7bd62b-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-wpslw\" (UID: \"25e84a77-e762-451f-abc0-0c0fac7bd62b\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-wpslw" Nov 28 09:59:30 crc kubenswrapper[4838]: I1128 09:59:30.386627 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-wpslw" Nov 28 09:59:30 crc kubenswrapper[4838]: I1128 09:59:30.476699 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-wpslw" event={"ID":"25e84a77-e762-451f-abc0-0c0fac7bd62b","Type":"ContainerStarted","Data":"8ce2543f5904caf3982f7418f3b238117b13d76d07f32957a4889bdda5b892e8"} Nov 28 09:59:30 crc kubenswrapper[4838]: I1128 09:59:30.561286 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-p69l6" Nov 28 09:59:30 crc kubenswrapper[4838]: E1128 09:59:30.561582 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-p69l6" podUID="2a223cc8-af33-4e83-8bfc-2676c5700447" Nov 28 09:59:31 crc kubenswrapper[4838]: I1128 09:59:31.483545 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-wpslw" event={"ID":"25e84a77-e762-451f-abc0-0c0fac7bd62b","Type":"ContainerStarted","Data":"f865177e48820601c34db39ceac2df5ccdb1130ddd97c9f2b2a830e7409a896c"} Nov 28 09:59:31 crc kubenswrapper[4838]: I1128 09:59:31.508548 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-wpslw" podStartSLOduration=102.508525073 podStartE2EDuration="1m42.508525073s" podCreationTimestamp="2025-11-28 09:57:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 09:59:31.507327464 +0000 UTC m=+143.206301684" watchObservedRunningTime="2025-11-28 09:59:31.508525073 +0000 UTC m=+143.207499273" Nov 28 09:59:31 crc kubenswrapper[4838]: I1128 09:59:31.561928 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 09:59:31 crc kubenswrapper[4838]: I1128 09:59:31.561928 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 09:59:31 crc kubenswrapper[4838]: I1128 09:59:31.561954 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 09:59:31 crc kubenswrapper[4838]: E1128 09:59:31.562133 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 09:59:31 crc kubenswrapper[4838]: E1128 09:59:31.562285 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 09:59:31 crc kubenswrapper[4838]: E1128 09:59:31.562502 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 09:59:32 crc kubenswrapper[4838]: I1128 09:59:32.561703 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-p69l6" Nov 28 09:59:32 crc kubenswrapper[4838]: E1128 09:59:32.561903 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-p69l6" podUID="2a223cc8-af33-4e83-8bfc-2676c5700447" Nov 28 09:59:33 crc kubenswrapper[4838]: I1128 09:59:33.561168 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 09:59:33 crc kubenswrapper[4838]: I1128 09:59:33.561212 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 09:59:33 crc kubenswrapper[4838]: I1128 09:59:33.561381 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 09:59:33 crc kubenswrapper[4838]: E1128 09:59:33.561538 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 09:59:33 crc kubenswrapper[4838]: E1128 09:59:33.561697 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 09:59:33 crc kubenswrapper[4838]: E1128 09:59:33.561987 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 09:59:33 crc kubenswrapper[4838]: E1128 09:59:33.669657 4838 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 28 09:59:34 crc kubenswrapper[4838]: I1128 09:59:34.564117 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-p69l6" Nov 28 09:59:34 crc kubenswrapper[4838]: E1128 09:59:34.564321 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-p69l6" podUID="2a223cc8-af33-4e83-8bfc-2676c5700447" Nov 28 09:59:35 crc kubenswrapper[4838]: I1128 09:59:35.561694 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 09:59:35 crc kubenswrapper[4838]: I1128 09:59:35.561768 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 09:59:35 crc kubenswrapper[4838]: I1128 09:59:35.561839 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 09:59:35 crc kubenswrapper[4838]: E1128 09:59:35.562002 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 09:59:35 crc kubenswrapper[4838]: E1128 09:59:35.562145 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 09:59:35 crc kubenswrapper[4838]: E1128 09:59:35.562678 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 09:59:36 crc kubenswrapper[4838]: I1128 09:59:36.562145 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-p69l6" Nov 28 09:59:36 crc kubenswrapper[4838]: E1128 09:59:36.562294 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-p69l6" podUID="2a223cc8-af33-4e83-8bfc-2676c5700447" Nov 28 09:59:37 crc kubenswrapper[4838]: I1128 09:59:37.561708 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 09:59:37 crc kubenswrapper[4838]: I1128 09:59:37.561816 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 09:59:37 crc kubenswrapper[4838]: I1128 09:59:37.561877 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 09:59:37 crc kubenswrapper[4838]: E1128 09:59:37.561926 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 09:59:37 crc kubenswrapper[4838]: E1128 09:59:37.562085 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 09:59:37 crc kubenswrapper[4838]: E1128 09:59:37.562250 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 09:59:38 crc kubenswrapper[4838]: I1128 09:59:38.508488 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-4rv9b_051f7e1c-2d47-4be9-bbd5-14feec16eb16/kube-multus/1.log" Nov 28 09:59:38 crc kubenswrapper[4838]: I1128 09:59:38.509184 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-4rv9b_051f7e1c-2d47-4be9-bbd5-14feec16eb16/kube-multus/0.log" Nov 28 09:59:38 crc kubenswrapper[4838]: I1128 09:59:38.509263 4838 generic.go:334] "Generic (PLEG): container finished" podID="051f7e1c-2d47-4be9-bbd5-14feec16eb16" containerID="262d73384f8aa0c5e8405e70d091fbc0003217ee2c08a4776048649b9a6eda59" exitCode=1 Nov 28 09:59:38 crc kubenswrapper[4838]: I1128 09:59:38.509307 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-4rv9b" event={"ID":"051f7e1c-2d47-4be9-bbd5-14feec16eb16","Type":"ContainerDied","Data":"262d73384f8aa0c5e8405e70d091fbc0003217ee2c08a4776048649b9a6eda59"} Nov 28 09:59:38 crc kubenswrapper[4838]: I1128 09:59:38.509355 4838 scope.go:117] "RemoveContainer" containerID="f641ae0677a50fbc7b20fac7bb5c567f031af1b6a06fbe9e965091efa4ee4ec3" Nov 28 09:59:38 crc kubenswrapper[4838]: I1128 09:59:38.509878 4838 scope.go:117] "RemoveContainer" containerID="262d73384f8aa0c5e8405e70d091fbc0003217ee2c08a4776048649b9a6eda59" Nov 28 09:59:38 crc kubenswrapper[4838]: E1128 09:59:38.510076 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-multus pod=multus-4rv9b_openshift-multus(051f7e1c-2d47-4be9-bbd5-14feec16eb16)\"" pod="openshift-multus/multus-4rv9b" podUID="051f7e1c-2d47-4be9-bbd5-14feec16eb16" Nov 28 09:59:38 crc kubenswrapper[4838]: I1128 09:59:38.561888 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-p69l6" Nov 28 09:59:38 crc kubenswrapper[4838]: E1128 09:59:38.562387 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-p69l6" podUID="2a223cc8-af33-4e83-8bfc-2676c5700447" Nov 28 09:59:38 crc kubenswrapper[4838]: E1128 09:59:38.670146 4838 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 28 09:59:39 crc kubenswrapper[4838]: I1128 09:59:39.518496 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-4rv9b_051f7e1c-2d47-4be9-bbd5-14feec16eb16/kube-multus/1.log" Nov 28 09:59:39 crc kubenswrapper[4838]: I1128 09:59:39.561351 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 09:59:39 crc kubenswrapper[4838]: I1128 09:59:39.561450 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 09:59:39 crc kubenswrapper[4838]: E1128 09:59:39.561519 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 09:59:39 crc kubenswrapper[4838]: I1128 09:59:39.561572 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 09:59:39 crc kubenswrapper[4838]: E1128 09:59:39.561787 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 09:59:39 crc kubenswrapper[4838]: E1128 09:59:39.561940 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 09:59:40 crc kubenswrapper[4838]: I1128 09:59:40.562052 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-p69l6" Nov 28 09:59:40 crc kubenswrapper[4838]: E1128 09:59:40.562303 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-p69l6" podUID="2a223cc8-af33-4e83-8bfc-2676c5700447" Nov 28 09:59:40 crc kubenswrapper[4838]: I1128 09:59:40.563638 4838 scope.go:117] "RemoveContainer" containerID="71617073a4d76318049cb634cb4ee2135cd2c25bc3d3d6285eb69baf1fae9679" Nov 28 09:59:40 crc kubenswrapper[4838]: E1128 09:59:40.563982 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-gmhsj_openshift-ovn-kubernetes(41b01f7d-5c75-49de-86f7-87e04bf71194)\"" pod="openshift-ovn-kubernetes/ovnkube-node-gmhsj" podUID="41b01f7d-5c75-49de-86f7-87e04bf71194" Nov 28 09:59:41 crc kubenswrapper[4838]: I1128 09:59:41.561264 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 09:59:41 crc kubenswrapper[4838]: I1128 09:59:41.561360 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 09:59:41 crc kubenswrapper[4838]: I1128 09:59:41.561277 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 09:59:41 crc kubenswrapper[4838]: E1128 09:59:41.561461 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 09:59:41 crc kubenswrapper[4838]: E1128 09:59:41.561635 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 09:59:41 crc kubenswrapper[4838]: E1128 09:59:41.561814 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 09:59:42 crc kubenswrapper[4838]: I1128 09:59:42.561861 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-p69l6" Nov 28 09:59:42 crc kubenswrapper[4838]: E1128 09:59:42.562060 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-p69l6" podUID="2a223cc8-af33-4e83-8bfc-2676c5700447" Nov 28 09:59:43 crc kubenswrapper[4838]: I1128 09:59:43.561829 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 09:59:43 crc kubenswrapper[4838]: I1128 09:59:43.561887 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 09:59:43 crc kubenswrapper[4838]: I1128 09:59:43.561829 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 09:59:43 crc kubenswrapper[4838]: E1128 09:59:43.562067 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 09:59:43 crc kubenswrapper[4838]: E1128 09:59:43.562197 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 09:59:43 crc kubenswrapper[4838]: E1128 09:59:43.562321 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 09:59:43 crc kubenswrapper[4838]: E1128 09:59:43.745056 4838 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 28 09:59:44 crc kubenswrapper[4838]: I1128 09:59:44.561596 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-p69l6" Nov 28 09:59:44 crc kubenswrapper[4838]: E1128 09:59:44.561838 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-p69l6" podUID="2a223cc8-af33-4e83-8bfc-2676c5700447" Nov 28 09:59:45 crc kubenswrapper[4838]: I1128 09:59:45.561116 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 09:59:45 crc kubenswrapper[4838]: I1128 09:59:45.561193 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 09:59:45 crc kubenswrapper[4838]: I1128 09:59:45.561157 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 09:59:45 crc kubenswrapper[4838]: E1128 09:59:45.561358 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 09:59:45 crc kubenswrapper[4838]: E1128 09:59:45.561552 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 09:59:45 crc kubenswrapper[4838]: E1128 09:59:45.561821 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 09:59:46 crc kubenswrapper[4838]: I1128 09:59:46.561385 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-p69l6" Nov 28 09:59:46 crc kubenswrapper[4838]: E1128 09:59:46.561659 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-p69l6" podUID="2a223cc8-af33-4e83-8bfc-2676c5700447" Nov 28 09:59:47 crc kubenswrapper[4838]: I1128 09:59:47.561449 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 09:59:47 crc kubenswrapper[4838]: E1128 09:59:47.561828 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 09:59:47 crc kubenswrapper[4838]: I1128 09:59:47.561502 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 09:59:47 crc kubenswrapper[4838]: I1128 09:59:47.561507 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 09:59:47 crc kubenswrapper[4838]: E1128 09:59:47.562017 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 09:59:47 crc kubenswrapper[4838]: E1128 09:59:47.562166 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 09:59:48 crc kubenswrapper[4838]: I1128 09:59:48.561938 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-p69l6" Nov 28 09:59:48 crc kubenswrapper[4838]: E1128 09:59:48.562131 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-p69l6" podUID="2a223cc8-af33-4e83-8bfc-2676c5700447" Nov 28 09:59:48 crc kubenswrapper[4838]: E1128 09:59:48.746399 4838 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 28 09:59:49 crc kubenswrapper[4838]: I1128 09:59:49.561804 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 09:59:49 crc kubenswrapper[4838]: I1128 09:59:49.561850 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 09:59:49 crc kubenswrapper[4838]: E1128 09:59:49.561941 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 09:59:49 crc kubenswrapper[4838]: E1128 09:59:49.562018 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 09:59:49 crc kubenswrapper[4838]: I1128 09:59:49.562870 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 09:59:49 crc kubenswrapper[4838]: E1128 09:59:49.563094 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 09:59:50 crc kubenswrapper[4838]: I1128 09:59:50.561552 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-p69l6" Nov 28 09:59:50 crc kubenswrapper[4838]: E1128 09:59:50.561827 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-p69l6" podUID="2a223cc8-af33-4e83-8bfc-2676c5700447" Nov 28 09:59:51 crc kubenswrapper[4838]: I1128 09:59:51.561299 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 09:59:51 crc kubenswrapper[4838]: I1128 09:59:51.561330 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 09:59:51 crc kubenswrapper[4838]: E1128 09:59:51.561440 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 09:59:51 crc kubenswrapper[4838]: I1128 09:59:51.561449 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 09:59:51 crc kubenswrapper[4838]: E1128 09:59:51.561640 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 09:59:51 crc kubenswrapper[4838]: E1128 09:59:51.562267 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 09:59:51 crc kubenswrapper[4838]: I1128 09:59:51.562823 4838 scope.go:117] "RemoveContainer" containerID="71617073a4d76318049cb634cb4ee2135cd2c25bc3d3d6285eb69baf1fae9679" Nov 28 09:59:52 crc kubenswrapper[4838]: I1128 09:59:52.561974 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-p69l6" Nov 28 09:59:52 crc kubenswrapper[4838]: I1128 09:59:52.562128 4838 scope.go:117] "RemoveContainer" containerID="262d73384f8aa0c5e8405e70d091fbc0003217ee2c08a4776048649b9a6eda59" Nov 28 09:59:52 crc kubenswrapper[4838]: E1128 09:59:52.562229 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-p69l6" podUID="2a223cc8-af33-4e83-8bfc-2676c5700447" Nov 28 09:59:52 crc kubenswrapper[4838]: I1128 09:59:52.566777 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-gmhsj_41b01f7d-5c75-49de-86f7-87e04bf71194/ovnkube-controller/3.log" Nov 28 09:59:52 crc kubenswrapper[4838]: I1128 09:59:52.571350 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gmhsj" event={"ID":"41b01f7d-5c75-49de-86f7-87e04bf71194","Type":"ContainerStarted","Data":"7fe59cc9e131d69ec0f3655ef34ceba4f4ba7f369c3da8ae4470fe09eb16c16d"} Nov 28 09:59:52 crc kubenswrapper[4838]: I1128 09:59:52.572049 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-gmhsj" Nov 28 09:59:52 crc kubenswrapper[4838]: I1128 09:59:52.904952 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-gmhsj" podStartSLOduration=123.90492858 podStartE2EDuration="2m3.90492858s" podCreationTimestamp="2025-11-28 09:57:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 09:59:52.627269983 +0000 UTC m=+164.326244183" watchObservedRunningTime="2025-11-28 09:59:52.90492858 +0000 UTC m=+164.603902750" Nov 28 09:59:52 crc kubenswrapper[4838]: I1128 09:59:52.906025 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-p69l6"] Nov 28 09:59:53 crc kubenswrapper[4838]: I1128 09:59:53.561411 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 09:59:53 crc kubenswrapper[4838]: E1128 09:59:53.561823 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 09:59:53 crc kubenswrapper[4838]: I1128 09:59:53.561484 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 09:59:53 crc kubenswrapper[4838]: I1128 09:59:53.561441 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 09:59:53 crc kubenswrapper[4838]: E1128 09:59:53.561910 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 09:59:53 crc kubenswrapper[4838]: E1128 09:59:53.562045 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 09:59:53 crc kubenswrapper[4838]: I1128 09:59:53.577735 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-4rv9b_051f7e1c-2d47-4be9-bbd5-14feec16eb16/kube-multus/1.log" Nov 28 09:59:53 crc kubenswrapper[4838]: I1128 09:59:53.577824 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-p69l6" Nov 28 09:59:53 crc kubenswrapper[4838]: E1128 09:59:53.577931 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-p69l6" podUID="2a223cc8-af33-4e83-8bfc-2676c5700447" Nov 28 09:59:53 crc kubenswrapper[4838]: I1128 09:59:53.590911 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-4rv9b" event={"ID":"051f7e1c-2d47-4be9-bbd5-14feec16eb16","Type":"ContainerStarted","Data":"9c246d0893eb62f4c097d470f8716546d2e91da8aed1554ef64709537d7d377d"} Nov 28 09:59:53 crc kubenswrapper[4838]: E1128 09:59:53.750655 4838 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 28 09:59:55 crc kubenswrapper[4838]: I1128 09:59:55.561483 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 09:59:55 crc kubenswrapper[4838]: I1128 09:59:55.561619 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 09:59:55 crc kubenswrapper[4838]: I1128 09:59:55.562554 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-p69l6" Nov 28 09:59:55 crc kubenswrapper[4838]: E1128 09:59:55.562749 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 09:59:55 crc kubenswrapper[4838]: I1128 09:59:55.562789 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 09:59:55 crc kubenswrapper[4838]: E1128 09:59:55.562935 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 09:59:55 crc kubenswrapper[4838]: E1128 09:59:55.563136 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 09:59:55 crc kubenswrapper[4838]: E1128 09:59:55.563264 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-p69l6" podUID="2a223cc8-af33-4e83-8bfc-2676c5700447" Nov 28 09:59:57 crc kubenswrapper[4838]: I1128 09:59:57.562104 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 09:59:57 crc kubenswrapper[4838]: I1128 09:59:57.562187 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 09:59:57 crc kubenswrapper[4838]: I1128 09:59:57.562291 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 09:59:57 crc kubenswrapper[4838]: E1128 09:59:57.562786 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 09:59:57 crc kubenswrapper[4838]: E1128 09:59:57.563040 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 09:59:57 crc kubenswrapper[4838]: I1128 09:59:57.563227 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-p69l6" Nov 28 09:59:57 crc kubenswrapper[4838]: E1128 09:59:57.563267 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 09:59:57 crc kubenswrapper[4838]: E1128 09:59:57.563558 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-p69l6" podUID="2a223cc8-af33-4e83-8bfc-2676c5700447" Nov 28 09:59:59 crc kubenswrapper[4838]: I1128 09:59:59.561654 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-p69l6" Nov 28 09:59:59 crc kubenswrapper[4838]: I1128 09:59:59.561704 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 09:59:59 crc kubenswrapper[4838]: I1128 09:59:59.561767 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 09:59:59 crc kubenswrapper[4838]: I1128 09:59:59.561954 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 09:59:59 crc kubenswrapper[4838]: I1128 09:59:59.564390 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Nov 28 09:59:59 crc kubenswrapper[4838]: I1128 09:59:59.564567 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Nov 28 09:59:59 crc kubenswrapper[4838]: I1128 09:59:59.564945 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-sa-dockercfg-d427c" Nov 28 09:59:59 crc kubenswrapper[4838]: I1128 09:59:59.565044 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-secret" Nov 28 09:59:59 crc kubenswrapper[4838]: I1128 09:59:59.565056 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Nov 28 09:59:59 crc kubenswrapper[4838]: I1128 09:59:59.565117 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Nov 28 10:00:00 crc kubenswrapper[4838]: I1128 10:00:00.825549 4838 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeReady" Nov 28 10:00:00 crc kubenswrapper[4838]: I1128 10:00:00.871093 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-75j6q"] Nov 28 10:00:00 crc kubenswrapper[4838]: I1128 10:00:00.871901 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-75j6q" Nov 28 10:00:00 crc kubenswrapper[4838]: I1128 10:00:00.874842 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-25qvk"] Nov 28 10:00:00 crc kubenswrapper[4838]: I1128 10:00:00.875567 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-25qvk" Nov 28 10:00:00 crc kubenswrapper[4838]: I1128 10:00:00.877404 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console-operator/console-operator-58897d9998-vn4kw"] Nov 28 10:00:00 crc kubenswrapper[4838]: I1128 10:00:00.877799 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-vn4kw" Nov 28 10:00:00 crc kubenswrapper[4838]: I1128 10:00:00.892461 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-z695f"] Nov 28 10:00:00 crc kubenswrapper[4838]: I1128 10:00:00.893526 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-z695f" Nov 28 10:00:00 crc kubenswrapper[4838]: I1128 10:00:00.895109 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Nov 28 10:00:00 crc kubenswrapper[4838]: I1128 10:00:00.895139 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Nov 28 10:00:00 crc kubenswrapper[4838]: I1128 10:00:00.895227 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"console-operator-config" Nov 28 10:00:00 crc kubenswrapper[4838]: I1128 10:00:00.895238 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Nov 28 10:00:00 crc kubenswrapper[4838]: I1128 10:00:00.895451 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"openshift-service-ca.crt" Nov 28 10:00:00 crc kubenswrapper[4838]: I1128 10:00:00.895504 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"authentication-operator-dockercfg-mz9bj" Nov 28 10:00:00 crc kubenswrapper[4838]: I1128 10:00:00.895598 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"authentication-operator-config" Nov 28 10:00:00 crc kubenswrapper[4838]: I1128 10:00:00.895601 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Nov 28 10:00:00 crc kubenswrapper[4838]: I1128 10:00:00.895674 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Nov 28 10:00:00 crc kubenswrapper[4838]: I1128 10:00:00.895747 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"kube-root-ca.crt" Nov 28 10:00:00 crc kubenswrapper[4838]: I1128 10:00:00.895975 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Nov 28 10:00:00 crc kubenswrapper[4838]: I1128 10:00:00.896188 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"service-ca-bundle" Nov 28 10:00:00 crc kubenswrapper[4838]: I1128 10:00:00.896435 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-qfpq5"] Nov 28 10:00:00 crc kubenswrapper[4838]: I1128 10:00:00.896998 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-qfpq5" Nov 28 10:00:00 crc kubenswrapper[4838]: I1128 10:00:00.897478 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-machine-approver/machine-approver-56656f9798-465nq"] Nov 28 10:00:00 crc kubenswrapper[4838]: I1128 10:00:00.897980 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-465nq" Nov 28 10:00:00 crc kubenswrapper[4838]: I1128 10:00:00.899782 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"serving-cert" Nov 28 10:00:00 crc kubenswrapper[4838]: I1128 10:00:00.899834 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"openshift-service-ca.crt" Nov 28 10:00:00 crc kubenswrapper[4838]: I1128 10:00:00.899994 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"kube-root-ca.crt" Nov 28 10:00:00 crc kubenswrapper[4838]: I1128 10:00:00.900244 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"serving-cert" Nov 28 10:00:00 crc kubenswrapper[4838]: I1128 10:00:00.909070 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"console-operator-dockercfg-4xjcr" Nov 28 10:00:00 crc kubenswrapper[4838]: I1128 10:00:00.911116 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-jvhxj"] Nov 28 10:00:00 crc kubenswrapper[4838]: I1128 10:00:00.925909 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"openshift-service-ca.crt" Nov 28 10:00:00 crc kubenswrapper[4838]: I1128 10:00:00.926791 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/downloads-7954f5f757-2w9k9"] Nov 28 10:00:00 crc kubenswrapper[4838]: I1128 10:00:00.927103 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-2w9k9" Nov 28 10:00:00 crc kubenswrapper[4838]: I1128 10:00:00.927394 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-jvhxj" Nov 28 10:00:00 crc kubenswrapper[4838]: I1128 10:00:00.928251 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-x4zn6"] Nov 28 10:00:00 crc kubenswrapper[4838]: I1128 10:00:00.928787 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-hrtmd"] Nov 28 10:00:00 crc kubenswrapper[4838]: I1128 10:00:00.928848 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-x4zn6" Nov 28 10:00:00 crc kubenswrapper[4838]: I1128 10:00:00.929205 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-hrtmd" Nov 28 10:00:00 crc kubenswrapper[4838]: I1128 10:00:00.929210 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-xrgtq"] Nov 28 10:00:00 crc kubenswrapper[4838]: I1128 10:00:00.929655 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-xrgtq" Nov 28 10:00:00 crc kubenswrapper[4838]: I1128 10:00:00.932678 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"openshift-service-ca.crt" Nov 28 10:00:00 crc kubenswrapper[4838]: I1128 10:00:00.932826 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"openshift-config-operator-dockercfg-7pc5z" Nov 28 10:00:00 crc kubenswrapper[4838]: I1128 10:00:00.933273 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"openshift-service-ca.crt" Nov 28 10:00:00 crc kubenswrapper[4838]: I1128 10:00:00.933488 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"dns-operator-dockercfg-9mqw5" Nov 28 10:00:00 crc kubenswrapper[4838]: I1128 10:00:00.933543 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"kube-root-ca.crt" Nov 28 10:00:00 crc kubenswrapper[4838]: I1128 10:00:00.933682 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-tls" Nov 28 10:00:00 crc kubenswrapper[4838]: I1128 10:00:00.933707 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-rbac-proxy" Nov 28 10:00:00 crc kubenswrapper[4838]: I1128 10:00:00.933856 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"machine-approver-config" Nov 28 10:00:00 crc kubenswrapper[4838]: I1128 10:00:00.933928 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"kube-root-ca.crt" Nov 28 10:00:00 crc kubenswrapper[4838]: I1128 10:00:00.934062 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"trusted-ca-bundle" Nov 28 10:00:00 crc kubenswrapper[4838]: I1128 10:00:00.934247 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"metrics-tls" Nov 28 10:00:00 crc kubenswrapper[4838]: I1128 10:00:00.934378 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"openshift-service-ca.crt" Nov 28 10:00:00 crc kubenswrapper[4838]: I1128 10:00:00.934546 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"default-dockercfg-chnjx" Nov 28 10:00:00 crc kubenswrapper[4838]: I1128 10:00:00.933502 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"config-operator-serving-cert" Nov 28 10:00:00 crc kubenswrapper[4838]: I1128 10:00:00.934785 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-root-ca.crt" Nov 28 10:00:00 crc kubenswrapper[4838]: I1128 10:00:00.934960 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-sa-dockercfg-nl2j4" Nov 28 10:00:00 crc kubenswrapper[4838]: I1128 10:00:00.935540 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/6524e1a4-6ca0-4e18-8be8-f7cd56757453-machine-approver-tls\") pod \"machine-approver-56656f9798-465nq\" (UID: \"6524e1a4-6ca0-4e18-8be8-f7cd56757453\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-465nq" Nov 28 10:00:00 crc kubenswrapper[4838]: I1128 10:00:00.935607 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/69a82003-94b3-4aaa-9904-8485cfa5f662-serving-cert\") pod \"apiserver-7bbb656c7d-jvhxj\" (UID: \"69a82003-94b3-4aaa-9904-8485cfa5f662\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-jvhxj" Nov 28 10:00:00 crc kubenswrapper[4838]: I1128 10:00:00.935645 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/69a82003-94b3-4aaa-9904-8485cfa5f662-encryption-config\") pod \"apiserver-7bbb656c7d-jvhxj\" (UID: \"69a82003-94b3-4aaa-9904-8485cfa5f662\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-jvhxj" Nov 28 10:00:00 crc kubenswrapper[4838]: I1128 10:00:00.935685 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zjsxx\" (UniqueName: \"kubernetes.io/projected/ebabbe26-3c09-4a23-8dcc-aed864a3e4a4-kube-api-access-zjsxx\") pod \"downloads-7954f5f757-2w9k9\" (UID: \"ebabbe26-3c09-4a23-8dcc-aed864a3e4a4\") " pod="openshift-console/downloads-7954f5f757-2w9k9" Nov 28 10:00:00 crc kubenswrapper[4838]: I1128 10:00:00.935756 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nvl79\" (UniqueName: \"kubernetes.io/projected/8c869fc5-3a3b-41e2-8eac-4dc5835be740-kube-api-access-nvl79\") pod \"dns-operator-744455d44c-qfpq5\" (UID: \"8c869fc5-3a3b-41e2-8eac-4dc5835be740\") " pod="openshift-dns-operator/dns-operator-744455d44c-qfpq5" Nov 28 10:00:00 crc kubenswrapper[4838]: I1128 10:00:00.935794 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/cc483880-bf40-4f4c-bf77-52eb4896bd5b-serving-cert\") pod \"controller-manager-879f6c89f-75j6q\" (UID: \"cc483880-bf40-4f4c-bf77-52eb4896bd5b\") " pod="openshift-controller-manager/controller-manager-879f6c89f-75j6q" Nov 28 10:00:00 crc kubenswrapper[4838]: I1128 10:00:00.935823 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9wsnx\" (UniqueName: \"kubernetes.io/projected/488c1f95-482d-4a08-b83d-81b3f08090ab-kube-api-access-9wsnx\") pod \"console-operator-58897d9998-vn4kw\" (UID: \"488c1f95-482d-4a08-b83d-81b3f08090ab\") " pod="openshift-console-operator/console-operator-58897d9998-vn4kw" Nov 28 10:00:00 crc kubenswrapper[4838]: I1128 10:00:00.935851 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cc483880-bf40-4f4c-bf77-52eb4896bd5b-config\") pod \"controller-manager-879f6c89f-75j6q\" (UID: \"cc483880-bf40-4f4c-bf77-52eb4896bd5b\") " pod="openshift-controller-manager/controller-manager-879f6c89f-75j6q" Nov 28 10:00:00 crc kubenswrapper[4838]: I1128 10:00:00.935883 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4831ba67-ae8f-40fc-9d30-1c4ce8e11f8a-serving-cert\") pod \"authentication-operator-69f744f599-25qvk\" (UID: \"4831ba67-ae8f-40fc-9d30-1c4ce8e11f8a\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-25qvk" Nov 28 10:00:00 crc kubenswrapper[4838]: I1128 10:00:00.935911 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/4831ba67-ae8f-40fc-9d30-1c4ce8e11f8a-service-ca-bundle\") pod \"authentication-operator-69f744f599-25qvk\" (UID: \"4831ba67-ae8f-40fc-9d30-1c4ce8e11f8a\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-25qvk" Nov 28 10:00:00 crc kubenswrapper[4838]: I1128 10:00:00.935943 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/fdac71d5-8b14-4c59-9d37-345456b26b36-serving-cert\") pod \"openshift-config-operator-7777fb866f-z695f\" (UID: \"fdac71d5-8b14-4c59-9d37-345456b26b36\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-z695f" Nov 28 10:00:00 crc kubenswrapper[4838]: I1128 10:00:00.935983 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/69a82003-94b3-4aaa-9904-8485cfa5f662-audit-dir\") pod \"apiserver-7bbb656c7d-jvhxj\" (UID: \"69a82003-94b3-4aaa-9904-8485cfa5f662\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-jvhxj" Nov 28 10:00:00 crc kubenswrapper[4838]: I1128 10:00:00.936013 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/fdac71d5-8b14-4c59-9d37-345456b26b36-available-featuregates\") pod \"openshift-config-operator-7777fb866f-z695f\" (UID: \"fdac71d5-8b14-4c59-9d37-345456b26b36\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-z695f" Nov 28 10:00:00 crc kubenswrapper[4838]: I1128 10:00:00.936045 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/488c1f95-482d-4a08-b83d-81b3f08090ab-config\") pod \"console-operator-58897d9998-vn4kw\" (UID: \"488c1f95-482d-4a08-b83d-81b3f08090ab\") " pod="openshift-console-operator/console-operator-58897d9998-vn4kw" Nov 28 10:00:00 crc kubenswrapper[4838]: I1128 10:00:00.936072 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/4831ba67-ae8f-40fc-9d30-1c4ce8e11f8a-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-25qvk\" (UID: \"4831ba67-ae8f-40fc-9d30-1c4ce8e11f8a\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-25qvk" Nov 28 10:00:00 crc kubenswrapper[4838]: I1128 10:00:00.936117 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/cc483880-bf40-4f4c-bf77-52eb4896bd5b-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-75j6q\" (UID: \"cc483880-bf40-4f4c-bf77-52eb4896bd5b\") " pod="openshift-controller-manager/controller-manager-879f6c89f-75j6q" Nov 28 10:00:00 crc kubenswrapper[4838]: I1128 10:00:00.936151 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lk4sw\" (UniqueName: \"kubernetes.io/projected/4831ba67-ae8f-40fc-9d30-1c4ce8e11f8a-kube-api-access-lk4sw\") pod \"authentication-operator-69f744f599-25qvk\" (UID: \"4831ba67-ae8f-40fc-9d30-1c4ce8e11f8a\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-25qvk" Nov 28 10:00:00 crc kubenswrapper[4838]: I1128 10:00:00.936180 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2w9dh\" (UniqueName: \"kubernetes.io/projected/fdac71d5-8b14-4c59-9d37-345456b26b36-kube-api-access-2w9dh\") pod \"openshift-config-operator-7777fb866f-z695f\" (UID: \"fdac71d5-8b14-4c59-9d37-345456b26b36\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-z695f" Nov 28 10:00:00 crc kubenswrapper[4838]: I1128 10:00:00.936213 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rtp97\" (UniqueName: \"kubernetes.io/projected/6524e1a4-6ca0-4e18-8be8-f7cd56757453-kube-api-access-rtp97\") pod \"machine-approver-56656f9798-465nq\" (UID: \"6524e1a4-6ca0-4e18-8be8-f7cd56757453\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-465nq" Nov 28 10:00:00 crc kubenswrapper[4838]: I1128 10:00:00.936240 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/8c869fc5-3a3b-41e2-8eac-4dc5835be740-metrics-tls\") pod \"dns-operator-744455d44c-qfpq5\" (UID: \"8c869fc5-3a3b-41e2-8eac-4dc5835be740\") " pod="openshift-dns-operator/dns-operator-744455d44c-qfpq5" Nov 28 10:00:00 crc kubenswrapper[4838]: I1128 10:00:00.936274 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6524e1a4-6ca0-4e18-8be8-f7cd56757453-config\") pod \"machine-approver-56656f9798-465nq\" (UID: \"6524e1a4-6ca0-4e18-8be8-f7cd56757453\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-465nq" Nov 28 10:00:00 crc kubenswrapper[4838]: I1128 10:00:00.936302 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/69a82003-94b3-4aaa-9904-8485cfa5f662-audit-policies\") pod \"apiserver-7bbb656c7d-jvhxj\" (UID: \"69a82003-94b3-4aaa-9904-8485cfa5f662\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-jvhxj" Nov 28 10:00:00 crc kubenswrapper[4838]: I1128 10:00:00.936333 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7hmmr\" (UniqueName: \"kubernetes.io/projected/69a82003-94b3-4aaa-9904-8485cfa5f662-kube-api-access-7hmmr\") pod \"apiserver-7bbb656c7d-jvhxj\" (UID: \"69a82003-94b3-4aaa-9904-8485cfa5f662\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-jvhxj" Nov 28 10:00:00 crc kubenswrapper[4838]: I1128 10:00:00.936362 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4831ba67-ae8f-40fc-9d30-1c4ce8e11f8a-config\") pod \"authentication-operator-69f744f599-25qvk\" (UID: \"4831ba67-ae8f-40fc-9d30-1c4ce8e11f8a\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-25qvk" Nov 28 10:00:00 crc kubenswrapper[4838]: I1128 10:00:00.936389 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/cc483880-bf40-4f4c-bf77-52eb4896bd5b-client-ca\") pod \"controller-manager-879f6c89f-75j6q\" (UID: \"cc483880-bf40-4f4c-bf77-52eb4896bd5b\") " pod="openshift-controller-manager/controller-manager-879f6c89f-75j6q" Nov 28 10:00:00 crc kubenswrapper[4838]: I1128 10:00:00.936419 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/488c1f95-482d-4a08-b83d-81b3f08090ab-trusted-ca\") pod \"console-operator-58897d9998-vn4kw\" (UID: \"488c1f95-482d-4a08-b83d-81b3f08090ab\") " pod="openshift-console-operator/console-operator-58897d9998-vn4kw" Nov 28 10:00:00 crc kubenswrapper[4838]: I1128 10:00:00.936448 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/6524e1a4-6ca0-4e18-8be8-f7cd56757453-auth-proxy-config\") pod \"machine-approver-56656f9798-465nq\" (UID: \"6524e1a4-6ca0-4e18-8be8-f7cd56757453\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-465nq" Nov 28 10:00:00 crc kubenswrapper[4838]: I1128 10:00:00.936476 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/69a82003-94b3-4aaa-9904-8485cfa5f662-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-jvhxj\" (UID: \"69a82003-94b3-4aaa-9904-8485cfa5f662\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-jvhxj" Nov 28 10:00:00 crc kubenswrapper[4838]: I1128 10:00:00.936508 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/69a82003-94b3-4aaa-9904-8485cfa5f662-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-jvhxj\" (UID: \"69a82003-94b3-4aaa-9904-8485cfa5f662\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-jvhxj" Nov 28 10:00:00 crc kubenswrapper[4838]: I1128 10:00:00.936544 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/488c1f95-482d-4a08-b83d-81b3f08090ab-serving-cert\") pod \"console-operator-58897d9998-vn4kw\" (UID: \"488c1f95-482d-4a08-b83d-81b3f08090ab\") " pod="openshift-console-operator/console-operator-58897d9998-vn4kw" Nov 28 10:00:00 crc kubenswrapper[4838]: I1128 10:00:00.936575 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fzrf6\" (UniqueName: \"kubernetes.io/projected/cc483880-bf40-4f4c-bf77-52eb4896bd5b-kube-api-access-fzrf6\") pod \"controller-manager-879f6c89f-75j6q\" (UID: \"cc483880-bf40-4f4c-bf77-52eb4896bd5b\") " pod="openshift-controller-manager/controller-manager-879f6c89f-75j6q" Nov 28 10:00:00 crc kubenswrapper[4838]: I1128 10:00:00.936605 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/69a82003-94b3-4aaa-9904-8485cfa5f662-etcd-client\") pod \"apiserver-7bbb656c7d-jvhxj\" (UID: \"69a82003-94b3-4aaa-9904-8485cfa5f662\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-jvhxj" Nov 28 10:00:00 crc kubenswrapper[4838]: I1128 10:00:00.936753 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Nov 28 10:00:00 crc kubenswrapper[4838]: I1128 10:00:00.936961 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"kube-root-ca.crt" Nov 28 10:00:00 crc kubenswrapper[4838]: I1128 10:00:00.936981 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-lsj7g"] Nov 28 10:00:00 crc kubenswrapper[4838]: I1128 10:00:00.937102 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"audit-1" Nov 28 10:00:00 crc kubenswrapper[4838]: I1128 10:00:00.937199 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"openshift-service-ca.crt" Nov 28 10:00:00 crc kubenswrapper[4838]: I1128 10:00:00.937296 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"encryption-config-1" Nov 28 10:00:00 crc kubenswrapper[4838]: I1128 10:00:00.937399 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"oauth-apiserver-sa-dockercfg-6r2bq" Nov 28 10:00:00 crc kubenswrapper[4838]: I1128 10:00:00.937476 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-lsj7g" Nov 28 10:00:00 crc kubenswrapper[4838]: I1128 10:00:00.937514 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Nov 28 10:00:00 crc kubenswrapper[4838]: I1128 10:00:00.937627 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-serving-cert" Nov 28 10:00:00 crc kubenswrapper[4838]: I1128 10:00:00.937748 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-ca-bundle" Nov 28 10:00:00 crc kubenswrapper[4838]: I1128 10:00:00.937851 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-dockercfg-r9srn" Nov 28 10:00:00 crc kubenswrapper[4838]: I1128 10:00:00.937933 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-service-ca.crt" Nov 28 10:00:00 crc kubenswrapper[4838]: I1128 10:00:00.937947 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-config" Nov 28 10:00:00 crc kubenswrapper[4838]: I1128 10:00:00.938020 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-operator-config" Nov 28 10:00:00 crc kubenswrapper[4838]: I1128 10:00:00.941570 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"kube-root-ca.crt" Nov 28 10:00:00 crc kubenswrapper[4838]: I1128 10:00:00.944584 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-bs85m"] Nov 28 10:00:00 crc kubenswrapper[4838]: I1128 10:00:00.946003 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-bs85m" Nov 28 10:00:00 crc kubenswrapper[4838]: I1128 10:00:00.947352 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Nov 28 10:00:00 crc kubenswrapper[4838]: I1128 10:00:00.947549 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"kube-root-ca.crt" Nov 28 10:00:00 crc kubenswrapper[4838]: I1128 10:00:00.947638 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"trusted-ca-bundle" Nov 28 10:00:00 crc kubenswrapper[4838]: I1128 10:00:00.947794 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-service-ca-bundle" Nov 28 10:00:00 crc kubenswrapper[4838]: I1128 10:00:00.947956 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Nov 28 10:00:00 crc kubenswrapper[4838]: I1128 10:00:00.948075 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"openshift-service-ca.crt" Nov 28 10:00:00 crc kubenswrapper[4838]: I1128 10:00:00.948195 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"etcd-client" Nov 28 10:00:00 crc kubenswrapper[4838]: I1128 10:00:00.948325 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-dockercfg-xtcjv" Nov 28 10:00:00 crc kubenswrapper[4838]: I1128 10:00:00.948487 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"kube-root-ca.crt" Nov 28 10:00:00 crc kubenswrapper[4838]: I1128 10:00:00.948635 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-client" Nov 28 10:00:00 crc kubenswrapper[4838]: I1128 10:00:00.948761 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-nm4jv"] Nov 28 10:00:00 crc kubenswrapper[4838]: I1128 10:00:00.948780 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Nov 28 10:00:00 crc kubenswrapper[4838]: I1128 10:00:00.949121 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-nm4jv" Nov 28 10:00:00 crc kubenswrapper[4838]: I1128 10:00:00.952504 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" Nov 28 10:00:00 crc kubenswrapper[4838]: I1128 10:00:00.952620 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"etcd-serving-ca" Nov 28 10:00:00 crc kubenswrapper[4838]: I1128 10:00:00.952865 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"kube-root-ca.crt" Nov 28 10:00:00 crc kubenswrapper[4838]: I1128 10:00:00.952984 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"serving-cert" Nov 28 10:00:00 crc kubenswrapper[4838]: I1128 10:00:00.953147 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" Nov 28 10:00:00 crc kubenswrapper[4838]: I1128 10:00:00.953204 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-root-ca.crt" Nov 28 10:00:00 crc kubenswrapper[4838]: I1128 10:00:00.953330 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Nov 28 10:00:00 crc kubenswrapper[4838]: I1128 10:00:00.953509 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Nov 28 10:00:00 crc kubenswrapper[4838]: I1128 10:00:00.953938 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"trusted-ca" Nov 28 10:00:00 crc kubenswrapper[4838]: I1128 10:00:00.954694 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-service-ca.crt" Nov 28 10:00:00 crc kubenswrapper[4838]: I1128 10:00:00.955497 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-tkw4f"] Nov 28 10:00:00 crc kubenswrapper[4838]: I1128 10:00:00.956022 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-tkw4f" Nov 28 10:00:00 crc kubenswrapper[4838]: I1128 10:00:00.956489 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-df5f6"] Nov 28 10:00:00 crc kubenswrapper[4838]: I1128 10:00:00.957099 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" Nov 28 10:00:00 crc kubenswrapper[4838]: I1128 10:00:00.957964 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" Nov 28 10:00:00 crc kubenswrapper[4838]: I1128 10:00:00.958201 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" Nov 28 10:00:00 crc kubenswrapper[4838]: I1128 10:00:00.958527 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-dockercfg-mfbb7" Nov 28 10:00:00 crc kubenswrapper[4838]: I1128 10:00:00.958793 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"openshift-service-ca.crt" Nov 28 10:00:00 crc kubenswrapper[4838]: I1128 10:00:00.959486 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-8b7z5"] Nov 28 10:00:00 crc kubenswrapper[4838]: I1128 10:00:00.959630 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-tls" Nov 28 10:00:00 crc kubenswrapper[4838]: I1128 10:00:00.959904 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-dockercfg-vw8fw" Nov 28 10:00:00 crc kubenswrapper[4838]: I1128 10:00:00.960054 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-8b7z5" Nov 28 10:00:00 crc kubenswrapper[4838]: I1128 10:00:00.959971 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-wpfps"] Nov 28 10:00:00 crc kubenswrapper[4838]: I1128 10:00:00.960530 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-df5f6" Nov 28 10:00:00 crc kubenswrapper[4838]: I1128 10:00:00.960749 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-bs82t"] Nov 28 10:00:00 crc kubenswrapper[4838]: I1128 10:00:00.961635 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-bs82t" Nov 28 10:00:00 crc kubenswrapper[4838]: I1128 10:00:00.962190 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-wpfps" Nov 28 10:00:00 crc kubenswrapper[4838]: I1128 10:00:00.963871 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-75j6q"] Nov 28 10:00:00 crc kubenswrapper[4838]: I1128 10:00:00.966022 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-rbac-proxy" Nov 28 10:00:00 crc kubenswrapper[4838]: I1128 10:00:00.991940 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"openshift-service-ca.crt" Nov 28 10:00:00 crc kubenswrapper[4838]: I1128 10:00:00.997191 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Nov 28 10:00:00 crc kubenswrapper[4838]: I1128 10:00:00.993182 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Nov 28 10:00:00 crc kubenswrapper[4838]: I1128 10:00:00.993370 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"installation-pull-secrets" Nov 28 10:00:00 crc kubenswrapper[4838]: I1128 10:00:00.993669 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-tls" Nov 28 10:00:00 crc kubenswrapper[4838]: I1128 10:00:00.994800 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Nov 28 10:00:00 crc kubenswrapper[4838]: I1128 10:00:00.994902 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"machine-api-operator-images" Nov 28 10:00:00 crc kubenswrapper[4838]: I1128 10:00:00.994906 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Nov 28 10:00:00 crc kubenswrapper[4838]: I1128 10:00:00.995088 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Nov 28 10:00:00 crc kubenswrapper[4838]: I1128 10:00:00.995184 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Nov 28 10:00:00 crc kubenswrapper[4838]: I1128 10:00:00.995410 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Nov 28 10:00:00 crc kubenswrapper[4838]: I1128 10:00:00.995459 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-operator-tls" Nov 28 10:00:00 crc kubenswrapper[4838]: I1128 10:00:00.994833 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-dockercfg-qt55r" Nov 28 10:00:00 crc kubenswrapper[4838]: I1128 10:00:00.995491 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"image-import-ca" Nov 28 10:00:00 crc kubenswrapper[4838]: I1128 10:00:00.995954 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Nov 28 10:00:00 crc kubenswrapper[4838]: I1128 10:00:00.996192 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"kube-root-ca.crt" Nov 28 10:00:00 crc kubenswrapper[4838]: I1128 10:00:00.996399 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"registry-dockercfg-kzzsd" Nov 28 10:00:00 crc kubenswrapper[4838]: I1128 10:00:00.996467 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Nov 28 10:00:00 crc kubenswrapper[4838]: I1128 10:00:00.996607 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"cluster-image-registry-operator-dockercfg-m4qtx" Nov 28 10:00:00 crc kubenswrapper[4838]: I1128 10:00:00.996809 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.024912 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"etcd-serving-ca" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.025768 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"trusted-ca" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.026547 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-f9d7485db-lvtzk"] Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.027256 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-w7htp"] Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.027926 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-w7htp" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.028292 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-lvtzk" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.030429 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.030562 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-fh5z9"] Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.031121 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-fh5z9" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.031393 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-9mrm4"] Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.031905 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-9mrm4" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.032486 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.036275 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-7vsn9"] Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.036777 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"encryption-config-1" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.036852 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-7vsn9" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.036782 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-x4rrn"] Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.037458 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-5xzrq"] Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.037772 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/fb0f7dc9-74c6-4031-8edb-7b10c219df34-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-bs85m\" (UID: \"fb0f7dc9-74c6-4031-8edb-7b10c219df34\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-bs85m" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.037810 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/69a82003-94b3-4aaa-9904-8485cfa5f662-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-jvhxj\" (UID: \"69a82003-94b3-4aaa-9904-8485cfa5f662\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-jvhxj" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.037832 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/488c1f95-482d-4a08-b83d-81b3f08090ab-trusted-ca\") pod \"console-operator-58897d9998-vn4kw\" (UID: \"488c1f95-482d-4a08-b83d-81b3f08090ab\") " pod="openshift-console-operator/console-operator-58897d9998-vn4kw" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.037850 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/6524e1a4-6ca0-4e18-8be8-f7cd56757453-auth-proxy-config\") pod \"machine-approver-56656f9798-465nq\" (UID: \"6524e1a4-6ca0-4e18-8be8-f7cd56757453\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-465nq" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.037869 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/74b81264-e855-4198-a063-9ef62eb9ad30-serving-cert\") pod \"route-controller-manager-6576b87f9c-xrgtq\" (UID: \"74b81264-e855-4198-a063-9ef62eb9ad30\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-xrgtq" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.037893 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/43982f1d-a55d-4870-be0a-c08c63a8e841-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-tkw4f\" (UID: \"43982f1d-a55d-4870-be0a-c08c63a8e841\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-tkw4f" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.037918 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/8d6cc687-8b13-44b1-a15b-488c17e8b50c-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-df5f6\" (UID: \"8d6cc687-8b13-44b1-a15b-488c17e8b50c\") " pod="openshift-authentication/oauth-openshift-558db77b4-df5f6" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.037944 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/69a82003-94b3-4aaa-9904-8485cfa5f662-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-jvhxj\" (UID: \"69a82003-94b3-4aaa-9904-8485cfa5f662\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-jvhxj" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.037963 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/488c1f95-482d-4a08-b83d-81b3f08090ab-serving-cert\") pod \"console-operator-58897d9998-vn4kw\" (UID: \"488c1f95-482d-4a08-b83d-81b3f08090ab\") " pod="openshift-console-operator/console-operator-58897d9998-vn4kw" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.037981 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fzrf6\" (UniqueName: \"kubernetes.io/projected/cc483880-bf40-4f4c-bf77-52eb4896bd5b-kube-api-access-fzrf6\") pod \"controller-manager-879f6c89f-75j6q\" (UID: \"cc483880-bf40-4f4c-bf77-52eb4896bd5b\") " pod="openshift-controller-manager/controller-manager-879f6c89f-75j6q" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.037997 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/910afa09-7086-4695-bd2f-9397ad54ad4f-etcd-service-ca\") pod \"etcd-operator-b45778765-hrtmd\" (UID: \"910afa09-7086-4695-bd2f-9397ad54ad4f\") " pod="openshift-etcd-operator/etcd-operator-b45778765-hrtmd" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.038011 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/69a82003-94b3-4aaa-9904-8485cfa5f662-etcd-client\") pod \"apiserver-7bbb656c7d-jvhxj\" (UID: \"69a82003-94b3-4aaa-9904-8485cfa5f662\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-jvhxj" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.038027 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/8d6cc687-8b13-44b1-a15b-488c17e8b50c-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-df5f6\" (UID: \"8d6cc687-8b13-44b1-a15b-488c17e8b50c\") " pod="openshift-authentication/oauth-openshift-558db77b4-df5f6" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.038042 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/8d6cc687-8b13-44b1-a15b-488c17e8b50c-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-df5f6\" (UID: \"8d6cc687-8b13-44b1-a15b-488c17e8b50c\") " pod="openshift-authentication/oauth-openshift-558db77b4-df5f6" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.038056 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/8d6cc687-8b13-44b1-a15b-488c17e8b50c-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-df5f6\" (UID: \"8d6cc687-8b13-44b1-a15b-488c17e8b50c\") " pod="openshift-authentication/oauth-openshift-558db77b4-df5f6" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.038070 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/8d6cc687-8b13-44b1-a15b-488c17e8b50c-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-df5f6\" (UID: \"8d6cc687-8b13-44b1-a15b-488c17e8b50c\") " pod="openshift-authentication/oauth-openshift-558db77b4-df5f6" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.038086 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/8d6cc687-8b13-44b1-a15b-488c17e8b50c-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-df5f6\" (UID: \"8d6cc687-8b13-44b1-a15b-488c17e8b50c\") " pod="openshift-authentication/oauth-openshift-558db77b4-df5f6" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.038107 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-88g5b\" (UniqueName: \"kubernetes.io/projected/fb0f7dc9-74c6-4031-8edb-7b10c219df34-kube-api-access-88g5b\") pod \"machine-api-operator-5694c8668f-bs85m\" (UID: \"fb0f7dc9-74c6-4031-8edb-7b10c219df34\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-bs85m" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.038124 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/6524e1a4-6ca0-4e18-8be8-f7cd56757453-machine-approver-tls\") pod \"machine-approver-56656f9798-465nq\" (UID: \"6524e1a4-6ca0-4e18-8be8-f7cd56757453\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-465nq" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.038138 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/69a82003-94b3-4aaa-9904-8485cfa5f662-serving-cert\") pod \"apiserver-7bbb656c7d-jvhxj\" (UID: \"69a82003-94b3-4aaa-9904-8485cfa5f662\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-jvhxj" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.038150 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-5xzrq" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.038151 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/69a82003-94b3-4aaa-9904-8485cfa5f662-encryption-config\") pod \"apiserver-7bbb656c7d-jvhxj\" (UID: \"69a82003-94b3-4aaa-9904-8485cfa5f662\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-jvhxj" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.039087 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/8d6cc687-8b13-44b1-a15b-488c17e8b50c-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-df5f6\" (UID: \"8d6cc687-8b13-44b1-a15b-488c17e8b50c\") " pod="openshift-authentication/oauth-openshift-558db77b4-df5f6" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.039118 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zjsxx\" (UniqueName: \"kubernetes.io/projected/ebabbe26-3c09-4a23-8dcc-aed864a3e4a4-kube-api-access-zjsxx\") pod \"downloads-7954f5f757-2w9k9\" (UID: \"ebabbe26-3c09-4a23-8dcc-aed864a3e4a4\") " pod="openshift-console/downloads-7954f5f757-2w9k9" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.039140 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/910afa09-7086-4695-bd2f-9397ad54ad4f-serving-cert\") pod \"etcd-operator-b45778765-hrtmd\" (UID: \"910afa09-7086-4695-bd2f-9397ad54ad4f\") " pod="openshift-etcd-operator/etcd-operator-b45778765-hrtmd" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.039169 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/fb0f7dc9-74c6-4031-8edb-7b10c219df34-images\") pod \"machine-api-operator-5694c8668f-bs85m\" (UID: \"fb0f7dc9-74c6-4031-8edb-7b10c219df34\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-bs85m" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.039184 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/8d6cc687-8b13-44b1-a15b-488c17e8b50c-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-df5f6\" (UID: \"8d6cc687-8b13-44b1-a15b-488c17e8b50c\") " pod="openshift-authentication/oauth-openshift-558db77b4-df5f6" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.039203 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nvl79\" (UniqueName: \"kubernetes.io/projected/8c869fc5-3a3b-41e2-8eac-4dc5835be740-kube-api-access-nvl79\") pod \"dns-operator-744455d44c-qfpq5\" (UID: \"8c869fc5-3a3b-41e2-8eac-4dc5835be740\") " pod="openshift-dns-operator/dns-operator-744455d44c-qfpq5" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.039219 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/cc483880-bf40-4f4c-bf77-52eb4896bd5b-serving-cert\") pod \"controller-manager-879f6c89f-75j6q\" (UID: \"cc483880-bf40-4f4c-bf77-52eb4896bd5b\") " pod="openshift-controller-manager/controller-manager-879f6c89f-75j6q" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.039234 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fb0f7dc9-74c6-4031-8edb-7b10c219df34-config\") pod \"machine-api-operator-5694c8668f-bs85m\" (UID: \"fb0f7dc9-74c6-4031-8edb-7b10c219df34\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-bs85m" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.039249 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/8d6cc687-8b13-44b1-a15b-488c17e8b50c-audit-policies\") pod \"oauth-openshift-558db77b4-df5f6\" (UID: \"8d6cc687-8b13-44b1-a15b-488c17e8b50c\") " pod="openshift-authentication/oauth-openshift-558db77b4-df5f6" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.039268 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9wsnx\" (UniqueName: \"kubernetes.io/projected/488c1f95-482d-4a08-b83d-81b3f08090ab-kube-api-access-9wsnx\") pod \"console-operator-58897d9998-vn4kw\" (UID: \"488c1f95-482d-4a08-b83d-81b3f08090ab\") " pod="openshift-console-operator/console-operator-58897d9998-vn4kw" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.039282 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/910afa09-7086-4695-bd2f-9397ad54ad4f-config\") pod \"etcd-operator-b45778765-hrtmd\" (UID: \"910afa09-7086-4695-bd2f-9397ad54ad4f\") " pod="openshift-etcd-operator/etcd-operator-b45778765-hrtmd" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.039298 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n4skq\" (UniqueName: \"kubernetes.io/projected/910afa09-7086-4695-bd2f-9397ad54ad4f-kube-api-access-n4skq\") pod \"etcd-operator-b45778765-hrtmd\" (UID: \"910afa09-7086-4695-bd2f-9397ad54ad4f\") " pod="openshift-etcd-operator/etcd-operator-b45778765-hrtmd" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.039312 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/43982f1d-a55d-4870-be0a-c08c63a8e841-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-tkw4f\" (UID: \"43982f1d-a55d-4870-be0a-c08c63a8e841\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-tkw4f" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.039329 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/8d6cc687-8b13-44b1-a15b-488c17e8b50c-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-df5f6\" (UID: \"8d6cc687-8b13-44b1-a15b-488c17e8b50c\") " pod="openshift-authentication/oauth-openshift-558db77b4-df5f6" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.039371 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cdccz\" (UniqueName: \"kubernetes.io/projected/4301958d-845f-449d-80a5-b81cd858368d-kube-api-access-cdccz\") pod \"ingress-operator-5b745b69d9-w7htp\" (UID: \"4301958d-845f-449d-80a5-b81cd858368d\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-w7htp" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.039798 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cc483880-bf40-4f4c-bf77-52eb4896bd5b-config\") pod \"controller-manager-879f6c89f-75j6q\" (UID: \"cc483880-bf40-4f4c-bf77-52eb4896bd5b\") " pod="openshift-controller-manager/controller-manager-879f6c89f-75j6q" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.039841 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/910afa09-7086-4695-bd2f-9397ad54ad4f-etcd-client\") pod \"etcd-operator-b45778765-hrtmd\" (UID: \"910afa09-7086-4695-bd2f-9397ad54ad4f\") " pod="openshift-etcd-operator/etcd-operator-b45778765-hrtmd" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.039866 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h9hwt\" (UniqueName: \"kubernetes.io/projected/de1e3837-d0de-4dcf-9c04-621f91ab3f52-kube-api-access-h9hwt\") pod \"openshift-apiserver-operator-796bbdcf4f-x4zn6\" (UID: \"de1e3837-d0de-4dcf-9c04-621f91ab3f52\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-x4zn6" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.039888 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4831ba67-ae8f-40fc-9d30-1c4ce8e11f8a-serving-cert\") pod \"authentication-operator-69f744f599-25qvk\" (UID: \"4831ba67-ae8f-40fc-9d30-1c4ce8e11f8a\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-25qvk" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.039905 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/4831ba67-ae8f-40fc-9d30-1c4ce8e11f8a-service-ca-bundle\") pod \"authentication-operator-69f744f599-25qvk\" (UID: \"4831ba67-ae8f-40fc-9d30-1c4ce8e11f8a\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-25qvk" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.039933 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/fdac71d5-8b14-4c59-9d37-345456b26b36-serving-cert\") pod \"openshift-config-operator-7777fb866f-z695f\" (UID: \"fdac71d5-8b14-4c59-9d37-345456b26b36\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-z695f" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.039949 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/4301958d-845f-449d-80a5-b81cd858368d-metrics-tls\") pod \"ingress-operator-5b745b69d9-w7htp\" (UID: \"4301958d-845f-449d-80a5-b81cd858368d\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-w7htp" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.039975 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/4301958d-845f-449d-80a5-b81cd858368d-trusted-ca\") pod \"ingress-operator-5b745b69d9-w7htp\" (UID: \"4301958d-845f-449d-80a5-b81cd858368d\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-w7htp" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.040090 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/6524e1a4-6ca0-4e18-8be8-f7cd56757453-auth-proxy-config\") pod \"machine-approver-56656f9798-465nq\" (UID: \"6524e1a4-6ca0-4e18-8be8-f7cd56757453\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-465nq" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.040189 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/69a82003-94b3-4aaa-9904-8485cfa5f662-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-jvhxj\" (UID: \"69a82003-94b3-4aaa-9904-8485cfa5f662\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-jvhxj" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.040404 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-x4rrn" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.042997 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/488c1f95-482d-4a08-b83d-81b3f08090ab-trusted-ca\") pod \"console-operator-58897d9998-vn4kw\" (UID: \"488c1f95-482d-4a08-b83d-81b3f08090ab\") " pod="openshift-console-operator/console-operator-58897d9998-vn4kw" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.043231 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"trusted-ca-bundle" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.043233 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-zn7tf"] Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.044070 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-zn7tf" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.044138 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/4831ba67-ae8f-40fc-9d30-1c4ce8e11f8a-service-ca-bundle\") pod \"authentication-operator-69f744f599-25qvk\" (UID: \"4831ba67-ae8f-40fc-9d30-1c4ce8e11f8a\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-25qvk" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.044465 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/69a82003-94b3-4aaa-9904-8485cfa5f662-audit-dir\") pod \"apiserver-7bbb656c7d-jvhxj\" (UID: \"69a82003-94b3-4aaa-9904-8485cfa5f662\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-jvhxj" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.044499 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/4301958d-845f-449d-80a5-b81cd858368d-bound-sa-token\") pod \"ingress-operator-5b745b69d9-w7htp\" (UID: \"4301958d-845f-449d-80a5-b81cd858368d\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-w7htp" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.044689 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/8d6cc687-8b13-44b1-a15b-488c17e8b50c-audit-dir\") pod \"oauth-openshift-558db77b4-df5f6\" (UID: \"8d6cc687-8b13-44b1-a15b-488c17e8b50c\") " pod="openshift-authentication/oauth-openshift-558db77b4-df5f6" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.044732 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9llj4\" (UniqueName: \"kubernetes.io/projected/8d6cc687-8b13-44b1-a15b-488c17e8b50c-kube-api-access-9llj4\") pod \"oauth-openshift-558db77b4-df5f6\" (UID: \"8d6cc687-8b13-44b1-a15b-488c17e8b50c\") " pod="openshift-authentication/oauth-openshift-558db77b4-df5f6" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.044755 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/fdac71d5-8b14-4c59-9d37-345456b26b36-available-featuregates\") pod \"openshift-config-operator-7777fb866f-z695f\" (UID: \"fdac71d5-8b14-4c59-9d37-345456b26b36\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-z695f" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.046137 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-59mxb\" (UniqueName: \"kubernetes.io/projected/01a7faa9-fda8-4f56-b472-e9165b66dab9-kube-api-access-59mxb\") pod \"cluster-samples-operator-665b6dd947-wpfps\" (UID: \"01a7faa9-fda8-4f56-b472-e9165b66dab9\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-wpfps" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.046161 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/74b81264-e855-4198-a063-9ef62eb9ad30-config\") pod \"route-controller-manager-6576b87f9c-xrgtq\" (UID: \"74b81264-e855-4198-a063-9ef62eb9ad30\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-xrgtq" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.046177 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/74b81264-e855-4198-a063-9ef62eb9ad30-client-ca\") pod \"route-controller-manager-6576b87f9c-xrgtq\" (UID: \"74b81264-e855-4198-a063-9ef62eb9ad30\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-xrgtq" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.046437 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/fdac71d5-8b14-4c59-9d37-345456b26b36-available-featuregates\") pod \"openshift-config-operator-7777fb866f-z695f\" (UID: \"fdac71d5-8b14-4c59-9d37-345456b26b36\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-z695f" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.046470 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/69a82003-94b3-4aaa-9904-8485cfa5f662-audit-dir\") pod \"apiserver-7bbb656c7d-jvhxj\" (UID: \"69a82003-94b3-4aaa-9904-8485cfa5f662\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-jvhxj" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.046549 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/488c1f95-482d-4a08-b83d-81b3f08090ab-config\") pod \"console-operator-58897d9998-vn4kw\" (UID: \"488c1f95-482d-4a08-b83d-81b3f08090ab\") " pod="openshift-console-operator/console-operator-58897d9998-vn4kw" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.046571 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/de1e3837-d0de-4dcf-9c04-621f91ab3f52-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-x4zn6\" (UID: \"de1e3837-d0de-4dcf-9c04-621f91ab3f52\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-x4zn6" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.046606 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/4831ba67-ae8f-40fc-9d30-1c4ce8e11f8a-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-25qvk\" (UID: \"4831ba67-ae8f-40fc-9d30-1c4ce8e11f8a\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-25qvk" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.046623 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/de1e3837-d0de-4dcf-9c04-621f91ab3f52-config\") pod \"openshift-apiserver-operator-796bbdcf4f-x4zn6\" (UID: \"de1e3837-d0de-4dcf-9c04-621f91ab3f52\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-x4zn6" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.046641 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/8d6cc687-8b13-44b1-a15b-488c17e8b50c-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-df5f6\" (UID: \"8d6cc687-8b13-44b1-a15b-488c17e8b50c\") " pod="openshift-authentication/oauth-openshift-558db77b4-df5f6" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.046702 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/cc483880-bf40-4f4c-bf77-52eb4896bd5b-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-75j6q\" (UID: \"cc483880-bf40-4f4c-bf77-52eb4896bd5b\") " pod="openshift-controller-manager/controller-manager-879f6c89f-75j6q" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.047409 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/488c1f95-482d-4a08-b83d-81b3f08090ab-config\") pod \"console-operator-58897d9998-vn4kw\" (UID: \"488c1f95-482d-4a08-b83d-81b3f08090ab\") " pod="openshift-console-operator/console-operator-58897d9998-vn4kw" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.049038 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/fdac71d5-8b14-4c59-9d37-345456b26b36-serving-cert\") pod \"openshift-config-operator-7777fb866f-z695f\" (UID: \"fdac71d5-8b14-4c59-9d37-345456b26b36\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-z695f" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.049374 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4831ba67-ae8f-40fc-9d30-1c4ce8e11f8a-serving-cert\") pod \"authentication-operator-69f744f599-25qvk\" (UID: \"4831ba67-ae8f-40fc-9d30-1c4ce8e11f8a\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-25qvk" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.049554 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"serving-cert" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.049694 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/69a82003-94b3-4aaa-9904-8485cfa5f662-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-jvhxj\" (UID: \"69a82003-94b3-4aaa-9904-8485cfa5f662\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-jvhxj" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.049783 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/01a7faa9-fda8-4f56-b472-e9165b66dab9-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-wpfps\" (UID: \"01a7faa9-fda8-4f56-b472-e9165b66dab9\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-wpfps" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.049818 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pfz6q\" (UniqueName: \"kubernetes.io/projected/74b81264-e855-4198-a063-9ef62eb9ad30-kube-api-access-pfz6q\") pod \"route-controller-manager-6576b87f9c-xrgtq\" (UID: \"74b81264-e855-4198-a063-9ef62eb9ad30\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-xrgtq" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.049862 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lk4sw\" (UniqueName: \"kubernetes.io/projected/4831ba67-ae8f-40fc-9d30-1c4ce8e11f8a-kube-api-access-lk4sw\") pod \"authentication-operator-69f744f599-25qvk\" (UID: \"4831ba67-ae8f-40fc-9d30-1c4ce8e11f8a\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-25qvk" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.049888 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2w9dh\" (UniqueName: \"kubernetes.io/projected/fdac71d5-8b14-4c59-9d37-345456b26b36-kube-api-access-2w9dh\") pod \"openshift-config-operator-7777fb866f-z695f\" (UID: \"fdac71d5-8b14-4c59-9d37-345456b26b36\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-z695f" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.049910 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rtp97\" (UniqueName: \"kubernetes.io/projected/6524e1a4-6ca0-4e18-8be8-f7cd56757453-kube-api-access-rtp97\") pod \"machine-approver-56656f9798-465nq\" (UID: \"6524e1a4-6ca0-4e18-8be8-f7cd56757453\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-465nq" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.049979 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/8c869fc5-3a3b-41e2-8eac-4dc5835be740-metrics-tls\") pod \"dns-operator-744455d44c-qfpq5\" (UID: \"8c869fc5-3a3b-41e2-8eac-4dc5835be740\") " pod="openshift-dns-operator/dns-operator-744455d44c-qfpq5" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.050008 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6524e1a4-6ca0-4e18-8be8-f7cd56757453-config\") pod \"machine-approver-56656f9798-465nq\" (UID: \"6524e1a4-6ca0-4e18-8be8-f7cd56757453\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-465nq" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.050030 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/69a82003-94b3-4aaa-9904-8485cfa5f662-audit-policies\") pod \"apiserver-7bbb656c7d-jvhxj\" (UID: \"69a82003-94b3-4aaa-9904-8485cfa5f662\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-jvhxj" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.050055 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/910afa09-7086-4695-bd2f-9397ad54ad4f-etcd-ca\") pod \"etcd-operator-b45778765-hrtmd\" (UID: \"910afa09-7086-4695-bd2f-9397ad54ad4f\") " pod="openshift-etcd-operator/etcd-operator-b45778765-hrtmd" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.050276 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/4831ba67-ae8f-40fc-9d30-1c4ce8e11f8a-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-25qvk\" (UID: \"4831ba67-ae8f-40fc-9d30-1c4ce8e11f8a\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-25qvk" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.050538 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/cc483880-bf40-4f4c-bf77-52eb4896bd5b-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-75j6q\" (UID: \"cc483880-bf40-4f4c-bf77-52eb4896bd5b\") " pod="openshift-controller-manager/controller-manager-879f6c89f-75j6q" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.050831 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6524e1a4-6ca0-4e18-8be8-f7cd56757453-config\") pod \"machine-approver-56656f9798-465nq\" (UID: \"6524e1a4-6ca0-4e18-8be8-f7cd56757453\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-465nq" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.050881 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/43982f1d-a55d-4870-be0a-c08c63a8e841-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-tkw4f\" (UID: \"43982f1d-a55d-4870-be0a-c08c63a8e841\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-tkw4f" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.050908 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bs9hd\" (UniqueName: \"kubernetes.io/projected/43982f1d-a55d-4870-be0a-c08c63a8e841-kube-api-access-bs9hd\") pod \"cluster-image-registry-operator-dc59b4c8b-tkw4f\" (UID: \"43982f1d-a55d-4870-be0a-c08c63a8e841\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-tkw4f" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.050947 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7hmmr\" (UniqueName: \"kubernetes.io/projected/69a82003-94b3-4aaa-9904-8485cfa5f662-kube-api-access-7hmmr\") pod \"apiserver-7bbb656c7d-jvhxj\" (UID: \"69a82003-94b3-4aaa-9904-8485cfa5f662\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-jvhxj" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.051089 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4831ba67-ae8f-40fc-9d30-1c4ce8e11f8a-config\") pod \"authentication-operator-69f744f599-25qvk\" (UID: \"4831ba67-ae8f-40fc-9d30-1c4ce8e11f8a\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-25qvk" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.051121 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/cc483880-bf40-4f4c-bf77-52eb4896bd5b-client-ca\") pod \"controller-manager-879f6c89f-75j6q\" (UID: \"cc483880-bf40-4f4c-bf77-52eb4896bd5b\") " pod="openshift-controller-manager/controller-manager-879f6c89f-75j6q" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.051140 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/8d6cc687-8b13-44b1-a15b-488c17e8b50c-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-df5f6\" (UID: \"8d6cc687-8b13-44b1-a15b-488c17e8b50c\") " pod="openshift-authentication/oauth-openshift-558db77b4-df5f6" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.053912 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4831ba67-ae8f-40fc-9d30-1c4ce8e11f8a-config\") pod \"authentication-operator-69f744f599-25qvk\" (UID: \"4831ba67-ae8f-40fc-9d30-1c4ce8e11f8a\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-25qvk" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.053917 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cc483880-bf40-4f4c-bf77-52eb4896bd5b-config\") pod \"controller-manager-879f6c89f-75j6q\" (UID: \"cc483880-bf40-4f4c-bf77-52eb4896bd5b\") " pod="openshift-controller-manager/controller-manager-879f6c89f-75j6q" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.054223 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/69a82003-94b3-4aaa-9904-8485cfa5f662-encryption-config\") pod \"apiserver-7bbb656c7d-jvhxj\" (UID: \"69a82003-94b3-4aaa-9904-8485cfa5f662\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-jvhxj" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.054309 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/69a82003-94b3-4aaa-9904-8485cfa5f662-serving-cert\") pod \"apiserver-7bbb656c7d-jvhxj\" (UID: \"69a82003-94b3-4aaa-9904-8485cfa5f662\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-jvhxj" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.054783 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/488c1f95-482d-4a08-b83d-81b3f08090ab-serving-cert\") pod \"console-operator-58897d9998-vn4kw\" (UID: \"488c1f95-482d-4a08-b83d-81b3f08090ab\") " pod="openshift-console-operator/console-operator-58897d9998-vn4kw" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.055012 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/cc483880-bf40-4f4c-bf77-52eb4896bd5b-serving-cert\") pod \"controller-manager-879f6c89f-75j6q\" (UID: \"cc483880-bf40-4f4c-bf77-52eb4896bd5b\") " pod="openshift-controller-manager/controller-manager-879f6c89f-75j6q" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.055671 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/69a82003-94b3-4aaa-9904-8485cfa5f662-audit-policies\") pod \"apiserver-7bbb656c7d-jvhxj\" (UID: \"69a82003-94b3-4aaa-9904-8485cfa5f662\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-jvhxj" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.056119 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/cc483880-bf40-4f4c-bf77-52eb4896bd5b-client-ca\") pod \"controller-manager-879f6c89f-75j6q\" (UID: \"cc483880-bf40-4f4c-bf77-52eb4896bd5b\") " pod="openshift-controller-manager/controller-manager-879f6c89f-75j6q" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.056814 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"config" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.057199 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/6524e1a4-6ca0-4e18-8be8-f7cd56757453-machine-approver-tls\") pod \"machine-approver-56656f9798-465nq\" (UID: \"6524e1a4-6ca0-4e18-8be8-f7cd56757453\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-465nq" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.058767 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/69a82003-94b3-4aaa-9904-8485cfa5f662-etcd-client\") pod \"apiserver-7bbb656c7d-jvhxj\" (UID: \"69a82003-94b3-4aaa-9904-8485cfa5f662\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-jvhxj" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.060697 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-vrzwk"] Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.061229 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-ss9lp"] Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.061307 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-vrzwk" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.061491 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-pnngr"] Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.061623 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/8c869fc5-3a3b-41e2-8eac-4dc5835be740-metrics-tls\") pod \"dns-operator-744455d44c-qfpq5\" (UID: \"8c869fc5-3a3b-41e2-8eac-4dc5835be740\") " pod="openshift-dns-operator/dns-operator-744455d44c-qfpq5" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.061756 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-ss9lp" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.061941 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-pnngr" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.062820 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-kmvqb"] Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.063562 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-kmvqb" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.065930 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress/router-default-5444994796-zmc4g"] Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.066855 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-zmc4g" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.066903 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-lng7h"] Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.067317 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-lng7h" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.071135 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-xsjrl"] Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.071576 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-k2xhp"] Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.075161 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-25qvk"] Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.075268 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-k2xhp" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.075515 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-xsjrl" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.078461 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-vn4kw"] Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.082792 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"kube-root-ca.crt" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.089433 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-qfpq5"] Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.091009 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-qlvp5"] Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.095228 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"audit-1" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.102511 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-7l5mk"] Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.103098 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-canary/ingress-canary-f4z4k"] Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.103546 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-qlvp5" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.103622 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-x4zn6"] Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.103640 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-hrtmd"] Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.103737 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-f4z4k" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.103994 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-7l5mk" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.110755 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-tkw4f"] Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.112658 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-jvhxj"] Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.113738 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.114260 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-x4rrn"] Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.115446 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-lng7h"] Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.116920 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-df5f6"] Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.119330 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-2w9k9"] Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.119353 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-bs85m"] Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.120843 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-nm4jv"] Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.121930 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-8b7z5"] Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.123345 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-xrgtq"] Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.126250 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-pnngr"] Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.127483 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-lsj7g"] Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.128981 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-lvtzk"] Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.130736 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-9mrm4"] Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.132195 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-vrzwk"] Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.134084 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-ss9lp"] Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.135454 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-5xzrq"] Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.136663 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-zn7tf"] Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.138126 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-w7htp"] Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.138989 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.139769 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-xsjrl"] Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.141080 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-7l5mk"] Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.142359 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-kmvqb"] Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.143959 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-bs82t"] Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.145438 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-7vsn9"] Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.146750 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-fh5z9"] Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.147973 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-f4z4k"] Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.150261 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-zj778"] Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.151692 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-k2xhp"] Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.151740 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-z695f"] Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.151841 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-zj778" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.151863 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/910afa09-7086-4695-bd2f-9397ad54ad4f-etcd-service-ca\") pod \"etcd-operator-b45778765-hrtmd\" (UID: \"910afa09-7086-4695-bd2f-9397ad54ad4f\") " pod="openshift-etcd-operator/etcd-operator-b45778765-hrtmd" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.151915 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/8d6cc687-8b13-44b1-a15b-488c17e8b50c-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-df5f6\" (UID: \"8d6cc687-8b13-44b1-a15b-488c17e8b50c\") " pod="openshift-authentication/oauth-openshift-558db77b4-df5f6" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.152525 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/910afa09-7086-4695-bd2f-9397ad54ad4f-etcd-service-ca\") pod \"etcd-operator-b45778765-hrtmd\" (UID: \"910afa09-7086-4695-bd2f-9397ad54ad4f\") " pod="openshift-etcd-operator/etcd-operator-b45778765-hrtmd" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.152941 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/8d6cc687-8b13-44b1-a15b-488c17e8b50c-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-df5f6\" (UID: \"8d6cc687-8b13-44b1-a15b-488c17e8b50c\") " pod="openshift-authentication/oauth-openshift-558db77b4-df5f6" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.151944 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/8d6cc687-8b13-44b1-a15b-488c17e8b50c-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-df5f6\" (UID: \"8d6cc687-8b13-44b1-a15b-488c17e8b50c\") " pod="openshift-authentication/oauth-openshift-558db77b4-df5f6" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.153032 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/8d6cc687-8b13-44b1-a15b-488c17e8b50c-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-df5f6\" (UID: \"8d6cc687-8b13-44b1-a15b-488c17e8b50c\") " pod="openshift-authentication/oauth-openshift-558db77b4-df5f6" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.153057 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/8d6cc687-8b13-44b1-a15b-488c17e8b50c-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-df5f6\" (UID: \"8d6cc687-8b13-44b1-a15b-488c17e8b50c\") " pod="openshift-authentication/oauth-openshift-558db77b4-df5f6" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.153450 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/8d6cc687-8b13-44b1-a15b-488c17e8b50c-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-df5f6\" (UID: \"8d6cc687-8b13-44b1-a15b-488c17e8b50c\") " pod="openshift-authentication/oauth-openshift-558db77b4-df5f6" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.153506 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-88g5b\" (UniqueName: \"kubernetes.io/projected/fb0f7dc9-74c6-4031-8edb-7b10c219df34-kube-api-access-88g5b\") pod \"machine-api-operator-5694c8668f-bs85m\" (UID: \"fb0f7dc9-74c6-4031-8edb-7b10c219df34\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-bs85m" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.153594 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/8d6cc687-8b13-44b1-a15b-488c17e8b50c-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-df5f6\" (UID: \"8d6cc687-8b13-44b1-a15b-488c17e8b50c\") " pod="openshift-authentication/oauth-openshift-558db77b4-df5f6" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.153628 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/910afa09-7086-4695-bd2f-9397ad54ad4f-serving-cert\") pod \"etcd-operator-b45778765-hrtmd\" (UID: \"910afa09-7086-4695-bd2f-9397ad54ad4f\") " pod="openshift-etcd-operator/etcd-operator-b45778765-hrtmd" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.153658 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.153659 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/fb0f7dc9-74c6-4031-8edb-7b10c219df34-images\") pod \"machine-api-operator-5694c8668f-bs85m\" (UID: \"fb0f7dc9-74c6-4031-8edb-7b10c219df34\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-bs85m" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.153846 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/8d6cc687-8b13-44b1-a15b-488c17e8b50c-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-df5f6\" (UID: \"8d6cc687-8b13-44b1-a15b-488c17e8b50c\") " pod="openshift-authentication/oauth-openshift-558db77b4-df5f6" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.153965 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fb0f7dc9-74c6-4031-8edb-7b10c219df34-config\") pod \"machine-api-operator-5694c8668f-bs85m\" (UID: \"fb0f7dc9-74c6-4031-8edb-7b10c219df34\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-bs85m" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.153999 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/8d6cc687-8b13-44b1-a15b-488c17e8b50c-audit-policies\") pod \"oauth-openshift-558db77b4-df5f6\" (UID: \"8d6cc687-8b13-44b1-a15b-488c17e8b50c\") " pod="openshift-authentication/oauth-openshift-558db77b4-df5f6" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.154035 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/910afa09-7086-4695-bd2f-9397ad54ad4f-config\") pod \"etcd-operator-b45778765-hrtmd\" (UID: \"910afa09-7086-4695-bd2f-9397ad54ad4f\") " pod="openshift-etcd-operator/etcd-operator-b45778765-hrtmd" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.154059 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n4skq\" (UniqueName: \"kubernetes.io/projected/910afa09-7086-4695-bd2f-9397ad54ad4f-kube-api-access-n4skq\") pod \"etcd-operator-b45778765-hrtmd\" (UID: \"910afa09-7086-4695-bd2f-9397ad54ad4f\") " pod="openshift-etcd-operator/etcd-operator-b45778765-hrtmd" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.154111 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/43982f1d-a55d-4870-be0a-c08c63a8e841-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-tkw4f\" (UID: \"43982f1d-a55d-4870-be0a-c08c63a8e841\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-tkw4f" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.154180 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/8d6cc687-8b13-44b1-a15b-488c17e8b50c-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-df5f6\" (UID: \"8d6cc687-8b13-44b1-a15b-488c17e8b50c\") " pod="openshift-authentication/oauth-openshift-558db77b4-df5f6" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.154205 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cdccz\" (UniqueName: \"kubernetes.io/projected/4301958d-845f-449d-80a5-b81cd858368d-kube-api-access-cdccz\") pod \"ingress-operator-5b745b69d9-w7htp\" (UID: \"4301958d-845f-449d-80a5-b81cd858368d\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-w7htp" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.154231 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/910afa09-7086-4695-bd2f-9397ad54ad4f-etcd-client\") pod \"etcd-operator-b45778765-hrtmd\" (UID: \"910afa09-7086-4695-bd2f-9397ad54ad4f\") " pod="openshift-etcd-operator/etcd-operator-b45778765-hrtmd" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.154255 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h9hwt\" (UniqueName: \"kubernetes.io/projected/de1e3837-d0de-4dcf-9c04-621f91ab3f52-kube-api-access-h9hwt\") pod \"openshift-apiserver-operator-796bbdcf4f-x4zn6\" (UID: \"de1e3837-d0de-4dcf-9c04-621f91ab3f52\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-x4zn6" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.154294 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/4301958d-845f-449d-80a5-b81cd858368d-metrics-tls\") pod \"ingress-operator-5b745b69d9-w7htp\" (UID: \"4301958d-845f-449d-80a5-b81cd858368d\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-w7htp" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.154332 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/4301958d-845f-449d-80a5-b81cd858368d-trusted-ca\") pod \"ingress-operator-5b745b69d9-w7htp\" (UID: \"4301958d-845f-449d-80a5-b81cd858368d\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-w7htp" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.154359 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/4301958d-845f-449d-80a5-b81cd858368d-bound-sa-token\") pod \"ingress-operator-5b745b69d9-w7htp\" (UID: \"4301958d-845f-449d-80a5-b81cd858368d\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-w7htp" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.154382 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/8d6cc687-8b13-44b1-a15b-488c17e8b50c-audit-dir\") pod \"oauth-openshift-558db77b4-df5f6\" (UID: \"8d6cc687-8b13-44b1-a15b-488c17e8b50c\") " pod="openshift-authentication/oauth-openshift-558db77b4-df5f6" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.154424 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/fb0f7dc9-74c6-4031-8edb-7b10c219df34-images\") pod \"machine-api-operator-5694c8668f-bs85m\" (UID: \"fb0f7dc9-74c6-4031-8edb-7b10c219df34\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-bs85m" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.154438 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9llj4\" (UniqueName: \"kubernetes.io/projected/8d6cc687-8b13-44b1-a15b-488c17e8b50c-kube-api-access-9llj4\") pod \"oauth-openshift-558db77b4-df5f6\" (UID: \"8d6cc687-8b13-44b1-a15b-488c17e8b50c\") " pod="openshift-authentication/oauth-openshift-558db77b4-df5f6" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.154540 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-59mxb\" (UniqueName: \"kubernetes.io/projected/01a7faa9-fda8-4f56-b472-e9165b66dab9-kube-api-access-59mxb\") pod \"cluster-samples-operator-665b6dd947-wpfps\" (UID: \"01a7faa9-fda8-4f56-b472-e9165b66dab9\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-wpfps" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.154569 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/74b81264-e855-4198-a063-9ef62eb9ad30-config\") pod \"route-controller-manager-6576b87f9c-xrgtq\" (UID: \"74b81264-e855-4198-a063-9ef62eb9ad30\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-xrgtq" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.154592 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/74b81264-e855-4198-a063-9ef62eb9ad30-client-ca\") pod \"route-controller-manager-6576b87f9c-xrgtq\" (UID: \"74b81264-e855-4198-a063-9ef62eb9ad30\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-xrgtq" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.154625 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/de1e3837-d0de-4dcf-9c04-621f91ab3f52-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-x4zn6\" (UID: \"de1e3837-d0de-4dcf-9c04-621f91ab3f52\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-x4zn6" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.154653 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/de1e3837-d0de-4dcf-9c04-621f91ab3f52-config\") pod \"openshift-apiserver-operator-796bbdcf4f-x4zn6\" (UID: \"de1e3837-d0de-4dcf-9c04-621f91ab3f52\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-x4zn6" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.154732 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/8d6cc687-8b13-44b1-a15b-488c17e8b50c-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-df5f6\" (UID: \"8d6cc687-8b13-44b1-a15b-488c17e8b50c\") " pod="openshift-authentication/oauth-openshift-558db77b4-df5f6" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.154780 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/01a7faa9-fda8-4f56-b472-e9165b66dab9-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-wpfps\" (UID: \"01a7faa9-fda8-4f56-b472-e9165b66dab9\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-wpfps" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.154804 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pfz6q\" (UniqueName: \"kubernetes.io/projected/74b81264-e855-4198-a063-9ef62eb9ad30-kube-api-access-pfz6q\") pod \"route-controller-manager-6576b87f9c-xrgtq\" (UID: \"74b81264-e855-4198-a063-9ef62eb9ad30\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-xrgtq" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.154854 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/910afa09-7086-4695-bd2f-9397ad54ad4f-etcd-ca\") pod \"etcd-operator-b45778765-hrtmd\" (UID: \"910afa09-7086-4695-bd2f-9397ad54ad4f\") " pod="openshift-etcd-operator/etcd-operator-b45778765-hrtmd" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.154881 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/43982f1d-a55d-4870-be0a-c08c63a8e841-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-tkw4f\" (UID: \"43982f1d-a55d-4870-be0a-c08c63a8e841\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-tkw4f" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.154903 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bs9hd\" (UniqueName: \"kubernetes.io/projected/43982f1d-a55d-4870-be0a-c08c63a8e841-kube-api-access-bs9hd\") pod \"cluster-image-registry-operator-dc59b4c8b-tkw4f\" (UID: \"43982f1d-a55d-4870-be0a-c08c63a8e841\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-tkw4f" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.154938 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/8d6cc687-8b13-44b1-a15b-488c17e8b50c-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-df5f6\" (UID: \"8d6cc687-8b13-44b1-a15b-488c17e8b50c\") " pod="openshift-authentication/oauth-openshift-558db77b4-df5f6" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.154977 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/fb0f7dc9-74c6-4031-8edb-7b10c219df34-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-bs85m\" (UID: \"fb0f7dc9-74c6-4031-8edb-7b10c219df34\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-bs85m" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.155031 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/74b81264-e855-4198-a063-9ef62eb9ad30-serving-cert\") pod \"route-controller-manager-6576b87f9c-xrgtq\" (UID: \"74b81264-e855-4198-a063-9ef62eb9ad30\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-xrgtq" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.155058 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/43982f1d-a55d-4870-be0a-c08c63a8e841-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-tkw4f\" (UID: \"43982f1d-a55d-4870-be0a-c08c63a8e841\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-tkw4f" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.155088 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/8d6cc687-8b13-44b1-a15b-488c17e8b50c-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-df5f6\" (UID: \"8d6cc687-8b13-44b1-a15b-488c17e8b50c\") " pod="openshift-authentication/oauth-openshift-558db77b4-df5f6" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.155423 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/910afa09-7086-4695-bd2f-9397ad54ad4f-config\") pod \"etcd-operator-b45778765-hrtmd\" (UID: \"910afa09-7086-4695-bd2f-9397ad54ad4f\") " pod="openshift-etcd-operator/etcd-operator-b45778765-hrtmd" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.155431 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/8d6cc687-8b13-44b1-a15b-488c17e8b50c-audit-dir\") pod \"oauth-openshift-558db77b4-df5f6\" (UID: \"8d6cc687-8b13-44b1-a15b-488c17e8b50c\") " pod="openshift-authentication/oauth-openshift-558db77b4-df5f6" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.155459 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/8d6cc687-8b13-44b1-a15b-488c17e8b50c-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-df5f6\" (UID: \"8d6cc687-8b13-44b1-a15b-488c17e8b50c\") " pod="openshift-authentication/oauth-openshift-558db77b4-df5f6" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.155675 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/8d6cc687-8b13-44b1-a15b-488c17e8b50c-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-df5f6\" (UID: \"8d6cc687-8b13-44b1-a15b-488c17e8b50c\") " pod="openshift-authentication/oauth-openshift-558db77b4-df5f6" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.156053 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fb0f7dc9-74c6-4031-8edb-7b10c219df34-config\") pod \"machine-api-operator-5694c8668f-bs85m\" (UID: \"fb0f7dc9-74c6-4031-8edb-7b10c219df34\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-bs85m" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.156181 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/8d6cc687-8b13-44b1-a15b-488c17e8b50c-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-df5f6\" (UID: \"8d6cc687-8b13-44b1-a15b-488c17e8b50c\") " pod="openshift-authentication/oauth-openshift-558db77b4-df5f6" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.156219 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/74b81264-e855-4198-a063-9ef62eb9ad30-client-ca\") pod \"route-controller-manager-6576b87f9c-xrgtq\" (UID: \"74b81264-e855-4198-a063-9ef62eb9ad30\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-xrgtq" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.156271 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/910afa09-7086-4695-bd2f-9397ad54ad4f-etcd-ca\") pod \"etcd-operator-b45778765-hrtmd\" (UID: \"910afa09-7086-4695-bd2f-9397ad54ad4f\") " pod="openshift-etcd-operator/etcd-operator-b45778765-hrtmd" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.156153 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/de1e3837-d0de-4dcf-9c04-621f91ab3f52-config\") pod \"openshift-apiserver-operator-796bbdcf4f-x4zn6\" (UID: \"de1e3837-d0de-4dcf-9c04-621f91ab3f52\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-x4zn6" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.156592 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns/dns-default-9pfms"] Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.157133 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/910afa09-7086-4695-bd2f-9397ad54ad4f-serving-cert\") pod \"etcd-operator-b45778765-hrtmd\" (UID: \"910afa09-7086-4695-bd2f-9397ad54ad4f\") " pod="openshift-etcd-operator/etcd-operator-b45778765-hrtmd" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.157292 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/43982f1d-a55d-4870-be0a-c08c63a8e841-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-tkw4f\" (UID: \"43982f1d-a55d-4870-be0a-c08c63a8e841\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-tkw4f" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.157345 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/74b81264-e855-4198-a063-9ef62eb9ad30-config\") pod \"route-controller-manager-6576b87f9c-xrgtq\" (UID: \"74b81264-e855-4198-a063-9ef62eb9ad30\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-xrgtq" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.157436 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-9pfms" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.157490 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/8d6cc687-8b13-44b1-a15b-488c17e8b50c-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-df5f6\" (UID: \"8d6cc687-8b13-44b1-a15b-488c17e8b50c\") " pod="openshift-authentication/oauth-openshift-558db77b4-df5f6" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.158664 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/43982f1d-a55d-4870-be0a-c08c63a8e841-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-tkw4f\" (UID: \"43982f1d-a55d-4870-be0a-c08c63a8e841\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-tkw4f" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.158709 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/fb0f7dc9-74c6-4031-8edb-7b10c219df34-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-bs85m\" (UID: \"fb0f7dc9-74c6-4031-8edb-7b10c219df34\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-bs85m" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.159022 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-qlvp5"] Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.159035 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/910afa09-7086-4695-bd2f-9397ad54ad4f-etcd-client\") pod \"etcd-operator-b45778765-hrtmd\" (UID: \"910afa09-7086-4695-bd2f-9397ad54ad4f\") " pod="openshift-etcd-operator/etcd-operator-b45778765-hrtmd" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.159935 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/8d6cc687-8b13-44b1-a15b-488c17e8b50c-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-df5f6\" (UID: \"8d6cc687-8b13-44b1-a15b-488c17e8b50c\") " pod="openshift-authentication/oauth-openshift-558db77b4-df5f6" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.159981 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/8d6cc687-8b13-44b1-a15b-488c17e8b50c-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-df5f6\" (UID: \"8d6cc687-8b13-44b1-a15b-488c17e8b50c\") " pod="openshift-authentication/oauth-openshift-558db77b4-df5f6" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.160134 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/8d6cc687-8b13-44b1-a15b-488c17e8b50c-audit-policies\") pod \"oauth-openshift-558db77b4-df5f6\" (UID: \"8d6cc687-8b13-44b1-a15b-488c17e8b50c\") " pod="openshift-authentication/oauth-openshift-558db77b4-df5f6" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.160666 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/8d6cc687-8b13-44b1-a15b-488c17e8b50c-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-df5f6\" (UID: \"8d6cc687-8b13-44b1-a15b-488c17e8b50c\") " pod="openshift-authentication/oauth-openshift-558db77b4-df5f6" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.160947 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/74b81264-e855-4198-a063-9ef62eb9ad30-serving-cert\") pod \"route-controller-manager-6576b87f9c-xrgtq\" (UID: \"74b81264-e855-4198-a063-9ef62eb9ad30\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-xrgtq" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.161027 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-zj778"] Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.160977 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/8d6cc687-8b13-44b1-a15b-488c17e8b50c-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-df5f6\" (UID: \"8d6cc687-8b13-44b1-a15b-488c17e8b50c\") " pod="openshift-authentication/oauth-openshift-558db77b4-df5f6" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.162378 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-9pfms"] Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.162381 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/8d6cc687-8b13-44b1-a15b-488c17e8b50c-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-df5f6\" (UID: \"8d6cc687-8b13-44b1-a15b-488c17e8b50c\") " pod="openshift-authentication/oauth-openshift-558db77b4-df5f6" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.162779 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/de1e3837-d0de-4dcf-9c04-621f91ab3f52-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-x4zn6\" (UID: \"de1e3837-d0de-4dcf-9c04-621f91ab3f52\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-x4zn6" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.164155 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/8d6cc687-8b13-44b1-a15b-488c17e8b50c-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-df5f6\" (UID: \"8d6cc687-8b13-44b1-a15b-488c17e8b50c\") " pod="openshift-authentication/oauth-openshift-558db77b4-df5f6" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.164782 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-wpfps"] Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.166409 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-server-8q49c"] Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.166926 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-8q49c" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.174728 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"openshift-apiserver-sa-dockercfg-djjff" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.193934 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"etcd-client" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.214117 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"cluster-samples-operator-dockercfg-xpp9w" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.234816 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"samples-operator-tls" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.240408 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/01a7faa9-fda8-4f56-b472-e9165b66dab9-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-wpfps\" (UID: \"01a7faa9-fda8-4f56-b472-e9165b66dab9\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-wpfps" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.256120 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"kube-root-ca.crt" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.275772 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"openshift-service-ca.crt" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.324293 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"trusted-ca" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.326932 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/4301958d-845f-449d-80a5-b81cd858368d-trusted-ca\") pod \"ingress-operator-5b745b69d9-w7htp\" (UID: \"4301958d-845f-449d-80a5-b81cd858368d\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-w7htp" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.334732 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"metrics-tls" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.339179 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/4301958d-845f-449d-80a5-b81cd858368d-metrics-tls\") pod \"ingress-operator-5b745b69d9-w7htp\" (UID: \"4301958d-845f-449d-80a5-b81cd858368d\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-w7htp" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.353986 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"oauth-serving-cert" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.375829 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"ingress-operator-dockercfg-7lnqk" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.394540 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"kube-root-ca.crt" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.415607 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-oauth-config" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.434494 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"console-config" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.454872 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"openshift-service-ca.crt" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.475230 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"service-ca" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.502676 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"trusted-ca-bundle" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.514593 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-dockercfg-f62pw" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.535474 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-serving-cert" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.561680 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 10:00:01 crc kubenswrapper[4838]: E1128 10:00:01.561970 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 10:02:03.561918102 +0000 UTC m=+295.260892322 (durationBeforeRetry 2m2s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.562210 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.562381 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.562450 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.562511 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.563575 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.566594 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.566998 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.570804 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.575012 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.594774 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"kube-storage-version-migrator-operator-dockercfg-2bh8d" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.614759 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"serving-cert" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.635011 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"config" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.655021 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.675026 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-root-ca.crt" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.686437 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.695130 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.695569 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-dockercfg-x57mr" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.704587 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.716051 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.736852 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.755396 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.774789 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.795879 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"pprof-cert" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.815887 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serviceaccount-dockercfg-rq7zk" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.861596 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"kube-root-ca.crt" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.861805 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.867780 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405400-6d4qp"] Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.868481 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405400-6d4qp" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.875572 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405400-6d4qp"] Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.879121 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator"/"kube-storage-version-migrator-sa-dockercfg-5xfcg" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.896308 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"kube-root-ca.crt" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.932909 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fzrf6\" (UniqueName: \"kubernetes.io/projected/cc483880-bf40-4f4c-bf77-52eb4896bd5b-kube-api-access-fzrf6\") pod \"controller-manager-879f6c89f-75j6q\" (UID: \"cc483880-bf40-4f4c-bf77-52eb4896bd5b\") " pod="openshift-controller-manager/controller-manager-879f6c89f-75j6q" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.951893 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zjsxx\" (UniqueName: \"kubernetes.io/projected/ebabbe26-3c09-4a23-8dcc-aed864a3e4a4-kube-api-access-zjsxx\") pod \"downloads-7954f5f757-2w9k9\" (UID: \"ebabbe26-3c09-4a23-8dcc-aed864a3e4a4\") " pod="openshift-console/downloads-7954f5f757-2w9k9" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.968767 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nvl79\" (UniqueName: \"kubernetes.io/projected/8c869fc5-3a3b-41e2-8eac-4dc5835be740-kube-api-access-nvl79\") pod \"dns-operator-744455d44c-qfpq5\" (UID: \"8c869fc5-3a3b-41e2-8eac-4dc5835be740\") " pod="openshift-dns-operator/dns-operator-744455d44c-qfpq5" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.975001 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mcc-proxy-tls" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.984320 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-2w9k9" Nov 28 10:00:01 crc kubenswrapper[4838]: I1128 10:00:01.995674 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-controller-dockercfg-c2lfx" Nov 28 10:00:02 crc kubenswrapper[4838]: W1128 10:00:02.004787 4838 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5fe485a1_e14f_4c09_b5b9_f252bc42b7e8.slice/crio-48a6d04c44e212d03d72c0e977776ff48416e756e473211ec58641039da74a0a WatchSource:0}: Error finding container 48a6d04c44e212d03d72c0e977776ff48416e756e473211ec58641039da74a0a: Status 404 returned error can't find the container with id 48a6d04c44e212d03d72c0e977776ff48416e756e473211ec58641039da74a0a Nov 28 10:00:02 crc kubenswrapper[4838]: W1128 10:00:02.005401 4838 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3b6479f0_333b_4a96_9adf_2099afdc2447.slice/crio-67d4de33cf52f6c0d2d75242c1d48c2c9e0db906747500f317af480d6c670640 WatchSource:0}: Error finding container 67d4de33cf52f6c0d2d75242c1d48c2c9e0db906747500f317af480d6c670640: Status 404 returned error can't find the container with id 67d4de33cf52f6c0d2d75242c1d48c2c9e0db906747500f317af480d6c670640 Nov 28 10:00:02 crc kubenswrapper[4838]: I1128 10:00:02.014204 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" Nov 28 10:00:02 crc kubenswrapper[4838]: I1128 10:00:02.053074 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9wsnx\" (UniqueName: \"kubernetes.io/projected/488c1f95-482d-4a08-b83d-81b3f08090ab-kube-api-access-9wsnx\") pod \"console-operator-58897d9998-vn4kw\" (UID: \"488c1f95-482d-4a08-b83d-81b3f08090ab\") " pod="openshift-console-operator/console-operator-58897d9998-vn4kw" Nov 28 10:00:02 crc kubenswrapper[4838]: I1128 10:00:02.053164 4838 request.go:700] Waited for 1.002968276s due to client-side throttling, not priority and fairness, request: POST:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-cluster-machine-approver/serviceaccounts/machine-approver-sa/token Nov 28 10:00:02 crc kubenswrapper[4838]: I1128 10:00:02.072566 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rtp97\" (UniqueName: \"kubernetes.io/projected/6524e1a4-6ca0-4e18-8be8-f7cd56757453-kube-api-access-rtp97\") pod \"machine-approver-56656f9798-465nq\" (UID: \"6524e1a4-6ca0-4e18-8be8-f7cd56757453\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-465nq" Nov 28 10:00:02 crc kubenswrapper[4838]: I1128 10:00:02.093411 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lk4sw\" (UniqueName: \"kubernetes.io/projected/4831ba67-ae8f-40fc-9d30-1c4ce8e11f8a-kube-api-access-lk4sw\") pod \"authentication-operator-69f744f599-25qvk\" (UID: \"4831ba67-ae8f-40fc-9d30-1c4ce8e11f8a\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-25qvk" Nov 28 10:00:02 crc kubenswrapper[4838]: I1128 10:00:02.093595 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-75j6q" Nov 28 10:00:02 crc kubenswrapper[4838]: I1128 10:00:02.107162 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-25qvk" Nov 28 10:00:02 crc kubenswrapper[4838]: I1128 10:00:02.112161 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2w9dh\" (UniqueName: \"kubernetes.io/projected/fdac71d5-8b14-4c59-9d37-345456b26b36-kube-api-access-2w9dh\") pod \"openshift-config-operator-7777fb866f-z695f\" (UID: \"fdac71d5-8b14-4c59-9d37-345456b26b36\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-z695f" Nov 28 10:00:02 crc kubenswrapper[4838]: I1128 10:00:02.132463 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7hmmr\" (UniqueName: \"kubernetes.io/projected/69a82003-94b3-4aaa-9904-8485cfa5f662-kube-api-access-7hmmr\") pod \"apiserver-7bbb656c7d-jvhxj\" (UID: \"69a82003-94b3-4aaa-9904-8485cfa5f662\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-jvhxj" Nov 28 10:00:02 crc kubenswrapper[4838]: I1128 10:00:02.134480 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-dockercfg-k9rxt" Nov 28 10:00:02 crc kubenswrapper[4838]: I1128 10:00:02.138358 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-vn4kw" Nov 28 10:00:02 crc kubenswrapper[4838]: I1128 10:00:02.154456 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-tls" Nov 28 10:00:02 crc kubenswrapper[4838]: I1128 10:00:02.155955 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-z695f" Nov 28 10:00:02 crc kubenswrapper[4838]: I1128 10:00:02.163229 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-2w9k9"] Nov 28 10:00:02 crc kubenswrapper[4838]: I1128 10:00:02.174820 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" Nov 28 10:00:02 crc kubenswrapper[4838]: I1128 10:00:02.194242 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"machine-config-operator-images" Nov 28 10:00:02 crc kubenswrapper[4838]: I1128 10:00:02.214500 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mco-proxy-tls" Nov 28 10:00:02 crc kubenswrapper[4838]: I1128 10:00:02.229551 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-465nq" Nov 28 10:00:02 crc kubenswrapper[4838]: I1128 10:00:02.234228 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-operator-dockercfg-98p87" Nov 28 10:00:02 crc kubenswrapper[4838]: I1128 10:00:02.238212 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-qfpq5" Nov 28 10:00:02 crc kubenswrapper[4838]: I1128 10:00:02.254569 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-admission-controller-secret" Nov 28 10:00:02 crc kubenswrapper[4838]: I1128 10:00:02.274779 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ac-dockercfg-9lkdf" Nov 28 10:00:02 crc kubenswrapper[4838]: I1128 10:00:02.295280 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"openshift-service-ca.crt" Nov 28 10:00:02 crc kubenswrapper[4838]: I1128 10:00:02.295398 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-jvhxj" Nov 28 10:00:02 crc kubenswrapper[4838]: I1128 10:00:02.315347 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-certs-default" Nov 28 10:00:02 crc kubenswrapper[4838]: I1128 10:00:02.335328 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-stats-default" Nov 28 10:00:02 crc kubenswrapper[4838]: I1128 10:00:02.354864 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-metrics-certs-default" Nov 28 10:00:02 crc kubenswrapper[4838]: I1128 10:00:02.374944 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-dockercfg-zdk86" Nov 28 10:00:02 crc kubenswrapper[4838]: I1128 10:00:02.394859 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"service-ca-bundle" Nov 28 10:00:02 crc kubenswrapper[4838]: I1128 10:00:02.415268 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"kube-root-ca.crt" Nov 28 10:00:02 crc kubenswrapper[4838]: I1128 10:00:02.436003 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-dockercfg-gkqpw" Nov 28 10:00:02 crc kubenswrapper[4838]: I1128 10:00:02.456512 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-root-ca.crt" Nov 28 10:00:02 crc kubenswrapper[4838]: I1128 10:00:02.475813 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" Nov 28 10:00:02 crc kubenswrapper[4838]: W1128 10:00:02.482369 4838 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podebabbe26_3c09_4a23_8dcc_aed864a3e4a4.slice/crio-04e4db2f690c6560727d9c895c7f052533afbef720b5918c4332c8a090690f40 WatchSource:0}: Error finding container 04e4db2f690c6560727d9c895c7f052533afbef720b5918c4332c8a090690f40: Status 404 returned error can't find the container with id 04e4db2f690c6560727d9c895c7f052533afbef720b5918c4332c8a090690f40 Nov 28 10:00:02 crc kubenswrapper[4838]: I1128 10:00:02.494978 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" Nov 28 10:00:02 crc kubenswrapper[4838]: I1128 10:00:02.514359 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"openshift-service-ca.crt" Nov 28 10:00:02 crc kubenswrapper[4838]: I1128 10:00:02.535038 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"openshift-service-ca.crt" Nov 28 10:00:02 crc kubenswrapper[4838]: W1128 10:00:02.546268 4838 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6524e1a4_6ca0_4e18_8be8_f7cd56757453.slice/crio-50a28ae1bc5d6b73fd0ef07001a0aa038205f5d40131410a02d275a9cf7c1e5c WatchSource:0}: Error finding container 50a28ae1bc5d6b73fd0ef07001a0aa038205f5d40131410a02d275a9cf7c1e5c: Status 404 returned error can't find the container with id 50a28ae1bc5d6b73fd0ef07001a0aa038205f5d40131410a02d275a9cf7c1e5c Nov 28 10:00:02 crc kubenswrapper[4838]: I1128 10:00:02.554447 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"service-ca-dockercfg-pn86c" Nov 28 10:00:02 crc kubenswrapper[4838]: I1128 10:00:02.584094 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"signing-key" Nov 28 10:00:02 crc kubenswrapper[4838]: I1128 10:00:02.594658 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"signing-cabundle" Nov 28 10:00:02 crc kubenswrapper[4838]: I1128 10:00:02.614186 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"kube-root-ca.crt" Nov 28 10:00:02 crc kubenswrapper[4838]: I1128 10:00:02.616084 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-2w9k9" event={"ID":"ebabbe26-3c09-4a23-8dcc-aed864a3e4a4","Type":"ContainerStarted","Data":"04e4db2f690c6560727d9c895c7f052533afbef720b5918c4332c8a090690f40"} Nov 28 10:00:02 crc kubenswrapper[4838]: I1128 10:00:02.618727 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"42fc97ccfeec9d99766c853201135447a5e6a4eb424542a1955e1c6941f49d56"} Nov 28 10:00:02 crc kubenswrapper[4838]: I1128 10:00:02.622616 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"48a6d04c44e212d03d72c0e977776ff48416e756e473211ec58641039da74a0a"} Nov 28 10:00:02 crc kubenswrapper[4838]: I1128 10:00:02.624011 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"67d4de33cf52f6c0d2d75242c1d48c2c9e0db906747500f317af480d6c670640"} Nov 28 10:00:02 crc kubenswrapper[4838]: I1128 10:00:02.630400 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-465nq" event={"ID":"6524e1a4-6ca0-4e18-8be8-f7cd56757453","Type":"ContainerStarted","Data":"50a28ae1bc5d6b73fd0ef07001a0aa038205f5d40131410a02d275a9cf7c1e5c"} Nov 28 10:00:02 crc kubenswrapper[4838]: I1128 10:00:02.634573 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-dockercfg-5nsgg" Nov 28 10:00:02 crc kubenswrapper[4838]: I1128 10:00:02.653797 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-metrics" Nov 28 10:00:02 crc kubenswrapper[4838]: I1128 10:00:02.688608 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"marketplace-trusted-ca" Nov 28 10:00:02 crc kubenswrapper[4838]: I1128 10:00:02.697794 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"kube-root-ca.crt" Nov 28 10:00:02 crc kubenswrapper[4838]: I1128 10:00:02.716057 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"openshift-service-ca.crt" Nov 28 10:00:02 crc kubenswrapper[4838]: I1128 10:00:02.747318 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"service-ca-operator-dockercfg-rg9jl" Nov 28 10:00:02 crc kubenswrapper[4838]: I1128 10:00:02.754669 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"serving-cert" Nov 28 10:00:02 crc kubenswrapper[4838]: I1128 10:00:02.774224 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"service-ca-operator-config" Nov 28 10:00:02 crc kubenswrapper[4838]: I1128 10:00:02.794680 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"kube-root-ca.crt" Nov 28 10:00:02 crc kubenswrapper[4838]: I1128 10:00:02.814103 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"kube-root-ca.crt" Nov 28 10:00:02 crc kubenswrapper[4838]: I1128 10:00:02.834018 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"default-dockercfg-2llfx" Nov 28 10:00:02 crc kubenswrapper[4838]: I1128 10:00:02.853961 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"canary-serving-cert" Nov 28 10:00:02 crc kubenswrapper[4838]: I1128 10:00:02.873936 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"openshift-service-ca.crt" Nov 28 10:00:02 crc kubenswrapper[4838]: I1128 10:00:02.893580 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"packageserver-service-cert" Nov 28 10:00:02 crc kubenswrapper[4838]: I1128 10:00:02.914530 4838 reflector.go:368] Caches populated for *v1.Secret from object-"hostpath-provisioner"/"csi-hostpath-provisioner-sa-dockercfg-qd74k" Nov 28 10:00:02 crc kubenswrapper[4838]: I1128 10:00:02.934952 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"openshift-service-ca.crt" Nov 28 10:00:02 crc kubenswrapper[4838]: I1128 10:00:02.954920 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"kube-root-ca.crt" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.002155 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-88g5b\" (UniqueName: \"kubernetes.io/projected/fb0f7dc9-74c6-4031-8edb-7b10c219df34-kube-api-access-88g5b\") pod \"machine-api-operator-5694c8668f-bs85m\" (UID: \"fb0f7dc9-74c6-4031-8edb-7b10c219df34\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-bs85m" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.023484 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/4301958d-845f-449d-80a5-b81cd858368d-bound-sa-token\") pod \"ingress-operator-5b745b69d9-w7htp\" (UID: \"4301958d-845f-449d-80a5-b81cd858368d\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-w7htp" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.050125 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h9hwt\" (UniqueName: \"kubernetes.io/projected/de1e3837-d0de-4dcf-9c04-621f91ab3f52-kube-api-access-h9hwt\") pod \"openshift-apiserver-operator-796bbdcf4f-x4zn6\" (UID: \"de1e3837-d0de-4dcf-9c04-621f91ab3f52\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-x4zn6" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.051632 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-qfpq5"] Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.053896 4838 request.go:700] Waited for 1.898424005s due to client-side throttling, not priority and fairness, request: POST:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-etcd-operator/serviceaccounts/etcd-operator/token Nov 28 10:00:03 crc kubenswrapper[4838]: W1128 10:00:03.056482 4838 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8c869fc5_3a3b_41e2_8eac_4dc5835be740.slice/crio-11efd716bbfcdf467cbf49a1615cfa817bc438060af897efb9d32be0353d3781 WatchSource:0}: Error finding container 11efd716bbfcdf467cbf49a1615cfa817bc438060af897efb9d32be0353d3781: Status 404 returned error can't find the container with id 11efd716bbfcdf467cbf49a1615cfa817bc438060af897efb9d32be0353d3781 Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.064700 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-jvhxj"] Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.067936 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/43982f1d-a55d-4870-be0a-c08c63a8e841-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-tkw4f\" (UID: \"43982f1d-a55d-4870-be0a-c08c63a8e841\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-tkw4f" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.071167 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-vn4kw"] Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.074012 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-z695f"] Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.079515 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-25qvk"] Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.079565 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-75j6q"] Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.081996 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n4skq\" (UniqueName: \"kubernetes.io/projected/910afa09-7086-4695-bd2f-9397ad54ad4f-kube-api-access-n4skq\") pod \"etcd-operator-b45778765-hrtmd\" (UID: \"910afa09-7086-4695-bd2f-9397ad54ad4f\") " pod="openshift-etcd-operator/etcd-operator-b45778765-hrtmd" Nov 28 10:00:03 crc kubenswrapper[4838]: W1128 10:00:03.097871 4838 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod488c1f95_482d_4a08_b83d_81b3f08090ab.slice/crio-e933d3490cc2f3470e780fdd1a8e3521f5a10fd3de62e65b79e3bf3d4a6d6083 WatchSource:0}: Error finding container e933d3490cc2f3470e780fdd1a8e3521f5a10fd3de62e65b79e3bf3d4a6d6083: Status 404 returned error can't find the container with id e933d3490cc2f3470e780fdd1a8e3521f5a10fd3de62e65b79e3bf3d4a6d6083 Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.104336 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9llj4\" (UniqueName: \"kubernetes.io/projected/8d6cc687-8b13-44b1-a15b-488c17e8b50c-kube-api-access-9llj4\") pod \"oauth-openshift-558db77b4-df5f6\" (UID: \"8d6cc687-8b13-44b1-a15b-488c17e8b50c\") " pod="openshift-authentication/oauth-openshift-558db77b4-df5f6" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.118518 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pfz6q\" (UniqueName: \"kubernetes.io/projected/74b81264-e855-4198-a063-9ef62eb9ad30-kube-api-access-pfz6q\") pod \"route-controller-manager-6576b87f9c-xrgtq\" (UID: \"74b81264-e855-4198-a063-9ef62eb9ad30\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-xrgtq" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.140611 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-59mxb\" (UniqueName: \"kubernetes.io/projected/01a7faa9-fda8-4f56-b472-e9165b66dab9-kube-api-access-59mxb\") pod \"cluster-samples-operator-665b6dd947-wpfps\" (UID: \"01a7faa9-fda8-4f56-b472-e9165b66dab9\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-wpfps" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.151506 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cdccz\" (UniqueName: \"kubernetes.io/projected/4301958d-845f-449d-80a5-b81cd858368d-kube-api-access-cdccz\") pod \"ingress-operator-5b745b69d9-w7htp\" (UID: \"4301958d-845f-449d-80a5-b81cd858368d\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-w7htp" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.173272 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bs9hd\" (UniqueName: \"kubernetes.io/projected/43982f1d-a55d-4870-be0a-c08c63a8e841-kube-api-access-bs9hd\") pod \"cluster-image-registry-operator-dc59b4c8b-tkw4f\" (UID: \"43982f1d-a55d-4870-be0a-c08c63a8e841\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-tkw4f" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.174581 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"dns-default" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.195678 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-dockercfg-jwfmh" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.207184 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-x4zn6" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.214467 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-default-metrics-tls" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.215752 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-hrtmd" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.221492 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-xrgtq" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.235291 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"node-bootstrapper-token" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.236505 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-bs85m" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.248279 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-tkw4f" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.255489 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-tls" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.271996 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-df5f6" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.274474 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-dockercfg-qx5rd" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.286824 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-wpfps" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.292619 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-w7htp" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.335171 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.354519 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.429857 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-x4zn6"] Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.565553 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/6a71181f-49a4-4b69-a3e6-2413929b81dc-ca-trust-extracted\") pod \"image-registry-697d97f7c8-8b7z5\" (UID: \"6a71181f-49a4-4b69-a3e6-2413929b81dc\") " pod="openshift-image-registry/image-registry-697d97f7c8-8b7z5" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.565607 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hqg6v\" (UniqueName: \"kubernetes.io/projected/6a71181f-49a4-4b69-a3e6-2413929b81dc-kube-api-access-hqg6v\") pod \"image-registry-697d97f7c8-8b7z5\" (UID: \"6a71181f-49a4-4b69-a3e6-2413929b81dc\") " pod="openshift-image-registry/image-registry-697d97f7c8-8b7z5" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.565657 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/9fb065c7-1402-4294-a8f6-f1aa662ecbb0-console-oauth-config\") pod \"console-f9d7485db-lvtzk\" (UID: \"9fb065c7-1402-4294-a8f6-f1aa662ecbb0\") " pod="openshift-console/console-f9d7485db-lvtzk" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.565767 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tdpxt\" (UniqueName: \"kubernetes.io/projected/9fb065c7-1402-4294-a8f6-f1aa662ecbb0-kube-api-access-tdpxt\") pod \"console-f9d7485db-lvtzk\" (UID: \"9fb065c7-1402-4294-a8f6-f1aa662ecbb0\") " pod="openshift-console/console-f9d7485db-lvtzk" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.565850 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8vb6p\" (UniqueName: \"kubernetes.io/projected/9f72bc48-a623-4365-8dfb-bbf5b0179798-kube-api-access-8vb6p\") pod \"openshift-controller-manager-operator-756b6f6bc6-nm4jv\" (UID: \"9f72bc48-a623-4365-8dfb-bbf5b0179798\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-nm4jv" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.565882 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/9fb065c7-1402-4294-a8f6-f1aa662ecbb0-service-ca\") pod \"console-f9d7485db-lvtzk\" (UID: \"9fb065c7-1402-4294-a8f6-f1aa662ecbb0\") " pod="openshift-console/console-f9d7485db-lvtzk" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.565906 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d97b2299-c955-4314-8b0a-a952e7bb53da-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-lsj7g\" (UID: \"d97b2299-c955-4314-8b0a-a952e7bb53da\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-lsj7g" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.565929 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d91a5fa7-8c8e-455a-a237-e0ec0baa2197-serving-cert\") pod \"apiserver-76f77b778f-bs82t\" (UID: \"d91a5fa7-8c8e-455a-a237-e0ec0baa2197\") " pod="openshift-apiserver/apiserver-76f77b778f-bs82t" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.565950 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/d97b2299-c955-4314-8b0a-a952e7bb53da-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-lsj7g\" (UID: \"d97b2299-c955-4314-8b0a-a952e7bb53da\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-lsj7g" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.565972 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/d91a5fa7-8c8e-455a-a237-e0ec0baa2197-node-pullsecrets\") pod \"apiserver-76f77b778f-bs82t\" (UID: \"d91a5fa7-8c8e-455a-a237-e0ec0baa2197\") " pod="openshift-apiserver/apiserver-76f77b778f-bs82t" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.565993 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5h4t8\" (UniqueName: \"kubernetes.io/projected/d91a5fa7-8c8e-455a-a237-e0ec0baa2197-kube-api-access-5h4t8\") pod \"apiserver-76f77b778f-bs82t\" (UID: \"d91a5fa7-8c8e-455a-a237-e0ec0baa2197\") " pod="openshift-apiserver/apiserver-76f77b778f-bs82t" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.566019 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/9fb065c7-1402-4294-a8f6-f1aa662ecbb0-console-config\") pod \"console-f9d7485db-lvtzk\" (UID: \"9fb065c7-1402-4294-a8f6-f1aa662ecbb0\") " pod="openshift-console/console-f9d7485db-lvtzk" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.566061 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/6a71181f-49a4-4b69-a3e6-2413929b81dc-installation-pull-secrets\") pod \"image-registry-697d97f7c8-8b7z5\" (UID: \"6a71181f-49a4-4b69-a3e6-2413929b81dc\") " pod="openshift-image-registry/image-registry-697d97f7c8-8b7z5" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.566084 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/6a71181f-49a4-4b69-a3e6-2413929b81dc-bound-sa-token\") pod \"image-registry-697d97f7c8-8b7z5\" (UID: \"6a71181f-49a4-4b69-a3e6-2413929b81dc\") " pod="openshift-image-registry/image-registry-697d97f7c8-8b7z5" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.566104 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d91a5fa7-8c8e-455a-a237-e0ec0baa2197-config\") pod \"apiserver-76f77b778f-bs82t\" (UID: \"d91a5fa7-8c8e-455a-a237-e0ec0baa2197\") " pod="openshift-apiserver/apiserver-76f77b778f-bs82t" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.566127 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/d91a5fa7-8c8e-455a-a237-e0ec0baa2197-image-import-ca\") pod \"apiserver-76f77b778f-bs82t\" (UID: \"d91a5fa7-8c8e-455a-a237-e0ec0baa2197\") " pod="openshift-apiserver/apiserver-76f77b778f-bs82t" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.566145 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/d91a5fa7-8c8e-455a-a237-e0ec0baa2197-trusted-ca-bundle\") pod \"apiserver-76f77b778f-bs82t\" (UID: \"d91a5fa7-8c8e-455a-a237-e0ec0baa2197\") " pod="openshift-apiserver/apiserver-76f77b778f-bs82t" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.566169 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d97b2299-c955-4314-8b0a-a952e7bb53da-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-lsj7g\" (UID: \"d97b2299-c955-4314-8b0a-a952e7bb53da\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-lsj7g" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.566191 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/9fb065c7-1402-4294-a8f6-f1aa662ecbb0-console-serving-cert\") pod \"console-f9d7485db-lvtzk\" (UID: \"9fb065c7-1402-4294-a8f6-f1aa662ecbb0\") " pod="openshift-console/console-f9d7485db-lvtzk" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.566218 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/6a71181f-49a4-4b69-a3e6-2413929b81dc-trusted-ca\") pod \"image-registry-697d97f7c8-8b7z5\" (UID: \"6a71181f-49a4-4b69-a3e6-2413929b81dc\") " pod="openshift-image-registry/image-registry-697d97f7c8-8b7z5" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.566285 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/9fb065c7-1402-4294-a8f6-f1aa662ecbb0-oauth-serving-cert\") pod \"console-f9d7485db-lvtzk\" (UID: \"9fb065c7-1402-4294-a8f6-f1aa662ecbb0\") " pod="openshift-console/console-f9d7485db-lvtzk" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.566314 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/d91a5fa7-8c8e-455a-a237-e0ec0baa2197-audit\") pod \"apiserver-76f77b778f-bs82t\" (UID: \"d91a5fa7-8c8e-455a-a237-e0ec0baa2197\") " pod="openshift-apiserver/apiserver-76f77b778f-bs82t" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.566334 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/d91a5fa7-8c8e-455a-a237-e0ec0baa2197-encryption-config\") pod \"apiserver-76f77b778f-bs82t\" (UID: \"d91a5fa7-8c8e-455a-a237-e0ec0baa2197\") " pod="openshift-apiserver/apiserver-76f77b778f-bs82t" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.566356 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/6a71181f-49a4-4b69-a3e6-2413929b81dc-registry-certificates\") pod \"image-registry-697d97f7c8-8b7z5\" (UID: \"6a71181f-49a4-4b69-a3e6-2413929b81dc\") " pod="openshift-image-registry/image-registry-697d97f7c8-8b7z5" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.566376 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/9fb065c7-1402-4294-a8f6-f1aa662ecbb0-trusted-ca-bundle\") pod \"console-f9d7485db-lvtzk\" (UID: \"9fb065c7-1402-4294-a8f6-f1aa662ecbb0\") " pod="openshift-console/console-f9d7485db-lvtzk" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.566411 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/d91a5fa7-8c8e-455a-a237-e0ec0baa2197-etcd-client\") pod \"apiserver-76f77b778f-bs82t\" (UID: \"d91a5fa7-8c8e-455a-a237-e0ec0baa2197\") " pod="openshift-apiserver/apiserver-76f77b778f-bs82t" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.566434 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9f72bc48-a623-4365-8dfb-bbf5b0179798-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-nm4jv\" (UID: \"9f72bc48-a623-4365-8dfb-bbf5b0179798\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-nm4jv" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.566467 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8b7z5\" (UID: \"6a71181f-49a4-4b69-a3e6-2413929b81dc\") " pod="openshift-image-registry/image-registry-697d97f7c8-8b7z5" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.566523 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/d91a5fa7-8c8e-455a-a237-e0ec0baa2197-etcd-serving-ca\") pod \"apiserver-76f77b778f-bs82t\" (UID: \"d91a5fa7-8c8e-455a-a237-e0ec0baa2197\") " pod="openshift-apiserver/apiserver-76f77b778f-bs82t" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.566627 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/d91a5fa7-8c8e-455a-a237-e0ec0baa2197-audit-dir\") pod \"apiserver-76f77b778f-bs82t\" (UID: \"d91a5fa7-8c8e-455a-a237-e0ec0baa2197\") " pod="openshift-apiserver/apiserver-76f77b778f-bs82t" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.566663 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/6a71181f-49a4-4b69-a3e6-2413929b81dc-registry-tls\") pod \"image-registry-697d97f7c8-8b7z5\" (UID: \"6a71181f-49a4-4b69-a3e6-2413929b81dc\") " pod="openshift-image-registry/image-registry-697d97f7c8-8b7z5" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.566708 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9f72bc48-a623-4365-8dfb-bbf5b0179798-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-nm4jv\" (UID: \"9f72bc48-a623-4365-8dfb-bbf5b0179798\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-nm4jv" Nov 28 10:00:03 crc kubenswrapper[4838]: E1128 10:00:03.566859 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 10:00:04.066844113 +0000 UTC m=+175.765818293 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8b7z5" (UID: "6a71181f-49a4-4b69-a3e6-2413929b81dc") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.637198 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-z695f" event={"ID":"fdac71d5-8b14-4c59-9d37-345456b26b36","Type":"ContainerStarted","Data":"da2b8bccf72a5c489832f576bedea88feaf00dfdf90bbaf0abcb45468a6dbb99"} Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.637243 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-z695f" event={"ID":"fdac71d5-8b14-4c59-9d37-345456b26b36","Type":"ContainerStarted","Data":"3edc60e6de5fd63f030930a0bd9804d4627dad2420a0510ff0b583c08e0a476c"} Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.638922 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-75j6q" event={"ID":"cc483880-bf40-4f4c-bf77-52eb4896bd5b","Type":"ContainerStarted","Data":"91baa3b5b59a6db9b0bc9d99bcd3896eef74bdd31fb32b9d35840a3de0a3973f"} Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.642532 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-2w9k9" event={"ID":"ebabbe26-3c09-4a23-8dcc-aed864a3e4a4","Type":"ContainerStarted","Data":"46f3dbb2f9c6cdfa77df7c36da72df34d204ccdb93d761c1641407c07a2a7024"} Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.643363 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/downloads-7954f5f757-2w9k9" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.646666 4838 patch_prober.go:28] interesting pod/downloads-7954f5f757-2w9k9 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.9:8080/\": dial tcp 10.217.0.9:8080: connect: connection refused" start-of-body= Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.646731 4838 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-2w9k9" podUID="ebabbe26-3c09-4a23-8dcc-aed864a3e4a4" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.9:8080/\": dial tcp 10.217.0.9:8080: connect: connection refused" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.648052 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-x4zn6" event={"ID":"de1e3837-d0de-4dcf-9c04-621f91ab3f52","Type":"ContainerStarted","Data":"97020c5a2dbeddfc34d57d80f41db76d95b86f61200f0d44a4f06742f5f3c2ff"} Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.649944 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-vn4kw" event={"ID":"488c1f95-482d-4a08-b83d-81b3f08090ab","Type":"ContainerStarted","Data":"e933d3490cc2f3470e780fdd1a8e3521f5a10fd3de62e65b79e3bf3d4a6d6083"} Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.652291 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-465nq" event={"ID":"6524e1a4-6ca0-4e18-8be8-f7cd56757453","Type":"ContainerStarted","Data":"bcbfc3a51acd03b5524f717bc16e36c2cbd220c3c6af154dd4bf1a5c54611600"} Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.653589 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-jvhxj" event={"ID":"69a82003-94b3-4aaa-9904-8485cfa5f662","Type":"ContainerStarted","Data":"b92ab121d64532abe4f12e8b07f2be6b28b464dc41896cce9f711e00af1ccacd"} Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.654697 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"a88697d42855ef451d08da0e1fd72254ea87b723cfbf036ac9e8480f917b647d"} Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.657911 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-25qvk" event={"ID":"4831ba67-ae8f-40fc-9d30-1c4ce8e11f8a","Type":"ContainerStarted","Data":"765b739d7a7328d59e2ace772e596eaff1473042342c4a7ffa6de3bb268e138d"} Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.660338 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"9a567584f52f9d72fc7ed6602d395a1519948837929e0f7e26f38bf0ce7e4e56"} Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.660406 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.661192 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-qfpq5" event={"ID":"8c869fc5-3a3b-41e2-8eac-4dc5835be740","Type":"ContainerStarted","Data":"11efd716bbfcdf467cbf49a1615cfa817bc438060af897efb9d32be0353d3781"} Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.662150 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"5d472237a4a760575df3fee270dcc3d63fb4668fb943ef60f9c90433da4a1f38"} Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.667432 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 10:00:03 crc kubenswrapper[4838]: E1128 10:00:03.667497 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 10:00:04.16748287 +0000 UTC m=+175.866457030 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.667833 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tdpxt\" (UniqueName: \"kubernetes.io/projected/9fb065c7-1402-4294-a8f6-f1aa662ecbb0-kube-api-access-tdpxt\") pod \"console-f9d7485db-lvtzk\" (UID: \"9fb065c7-1402-4294-a8f6-f1aa662ecbb0\") " pod="openshift-console/console-f9d7485db-lvtzk" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.667879 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/da520ae0-3472-44e1-af12-5e1fcdfec000-images\") pod \"machine-config-operator-74547568cd-pnngr\" (UID: \"da520ae0-3472-44e1-af12-5e1fcdfec000\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-pnngr" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.667897 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8vb6p\" (UniqueName: \"kubernetes.io/projected/9f72bc48-a623-4365-8dfb-bbf5b0179798-kube-api-access-8vb6p\") pod \"openshift-controller-manager-operator-756b6f6bc6-nm4jv\" (UID: \"9f72bc48-a623-4365-8dfb-bbf5b0179798\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-nm4jv" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.667915 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/4752b167-aa81-4844-98ab-07cc66b152bd-srv-cert\") pod \"catalog-operator-68c6474976-ss9lp\" (UID: \"4752b167-aa81-4844-98ab-07cc66b152bd\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-ss9lp" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.667931 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/d91a5fa7-8c8e-455a-a237-e0ec0baa2197-node-pullsecrets\") pod \"apiserver-76f77b778f-bs82t\" (UID: \"d91a5fa7-8c8e-455a-a237-e0ec0baa2197\") " pod="openshift-apiserver/apiserver-76f77b778f-bs82t" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.667947 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/d97b2299-c955-4314-8b0a-a952e7bb53da-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-lsj7g\" (UID: \"d97b2299-c955-4314-8b0a-a952e7bb53da\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-lsj7g" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.667970 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/8a281de3-cc12-4dd1-b9be-0ca03a0613ec-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-xsjrl\" (UID: \"8a281de3-cc12-4dd1-b9be-0ca03a0613ec\") " pod="openshift-marketplace/marketplace-operator-79b997595-xsjrl" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.667984 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/0ba0db9f-c351-49d0-a9c2-87b0edcfccff-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-kmvqb\" (UID: \"0ba0db9f-c351-49d0-a9c2-87b0edcfccff\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-kmvqb" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.668001 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/502acc2d-a5e3-4240-b2fb-7f67b7518b82-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-vrzwk\" (UID: \"502acc2d-a5e3-4240-b2fb-7f67b7518b82\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-vrzwk" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.668017 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/9fb065c7-1402-4294-a8f6-f1aa662ecbb0-console-config\") pod \"console-f9d7485db-lvtzk\" (UID: \"9fb065c7-1402-4294-a8f6-f1aa662ecbb0\") " pod="openshift-console/console-f9d7485db-lvtzk" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.668046 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bae52de6-77be-47d7-a5fe-8e22e5f24bf4-config\") pod \"kube-apiserver-operator-766d6c64bb-9mrm4\" (UID: \"bae52de6-77be-47d7-a5fe-8e22e5f24bf4\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-9mrm4" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.668065 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wpldx\" (UniqueName: \"kubernetes.io/projected/e81eda87-1848-48b9-8a4d-6c1c184cb421-kube-api-access-wpldx\") pod \"machine-config-controller-84d6567774-x4rrn\" (UID: \"e81eda87-1848-48b9-8a4d-6c1c184cb421\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-x4rrn" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.668079 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5gkkv\" (UniqueName: \"kubernetes.io/projected/0ba0db9f-c351-49d0-a9c2-87b0edcfccff-kube-api-access-5gkkv\") pod \"multus-admission-controller-857f4d67dd-kmvqb\" (UID: \"0ba0db9f-c351-49d0-a9c2-87b0edcfccff\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-kmvqb" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.668093 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/00e915fe-78b7-4c8f-bc2b-1357c1beeee3-config\") pod \"service-ca-operator-777779d784-qlvp5\" (UID: \"00e915fe-78b7-4c8f-bc2b-1357c1beeee3\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-qlvp5" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.668111 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/6a71181f-49a4-4b69-a3e6-2413929b81dc-installation-pull-secrets\") pod \"image-registry-697d97f7c8-8b7z5\" (UID: \"6a71181f-49a4-4b69-a3e6-2413929b81dc\") " pod="openshift-image-registry/image-registry-697d97f7c8-8b7z5" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.668128 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/6a71181f-49a4-4b69-a3e6-2413929b81dc-bound-sa-token\") pod \"image-registry-697d97f7c8-8b7z5\" (UID: \"6a71181f-49a4-4b69-a3e6-2413929b81dc\") " pod="openshift-image-registry/image-registry-697d97f7c8-8b7z5" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.668144 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/e3505c3b-461c-489c-8af1-117b3cbc433b-service-ca-bundle\") pod \"router-default-5444994796-zmc4g\" (UID: \"e3505c3b-461c-489c-8af1-117b3cbc433b\") " pod="openshift-ingress/router-default-5444994796-zmc4g" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.668159 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/bae52de6-77be-47d7-a5fe-8e22e5f24bf4-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-9mrm4\" (UID: \"bae52de6-77be-47d7-a5fe-8e22e5f24bf4\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-9mrm4" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.668183 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d91a5fa7-8c8e-455a-a237-e0ec0baa2197-config\") pod \"apiserver-76f77b778f-bs82t\" (UID: \"d91a5fa7-8c8e-455a-a237-e0ec0baa2197\") " pod="openshift-apiserver/apiserver-76f77b778f-bs82t" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.668199 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/da520ae0-3472-44e1-af12-5e1fcdfec000-auth-proxy-config\") pod \"machine-config-operator-74547568cd-pnngr\" (UID: \"da520ae0-3472-44e1-af12-5e1fcdfec000\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-pnngr" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.668245 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/6a71181f-49a4-4b69-a3e6-2413929b81dc-trusted-ca\") pod \"image-registry-697d97f7c8-8b7z5\" (UID: \"6a71181f-49a4-4b69-a3e6-2413929b81dc\") " pod="openshift-image-registry/image-registry-697d97f7c8-8b7z5" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.668279 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/89ca4c63-f97b-4a71-8b2e-613170fdde6b-config-volume\") pod \"collect-profiles-29405400-6d4qp\" (UID: \"89ca4c63-f97b-4a71-8b2e-613170fdde6b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405400-6d4qp" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.668307 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/9fb065c7-1402-4294-a8f6-f1aa662ecbb0-oauth-serving-cert\") pod \"console-f9d7485db-lvtzk\" (UID: \"9fb065c7-1402-4294-a8f6-f1aa662ecbb0\") " pod="openshift-console/console-f9d7485db-lvtzk" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.668322 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/4752b167-aa81-4844-98ab-07cc66b152bd-profile-collector-cert\") pod \"catalog-operator-68c6474976-ss9lp\" (UID: \"4752b167-aa81-4844-98ab-07cc66b152bd\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-ss9lp" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.668337 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/a8af0096-480e-4c84-8da1-e6ff489addd3-certs\") pod \"machine-config-server-8q49c\" (UID: \"a8af0096-480e-4c84-8da1-e6ff489addd3\") " pod="openshift-machine-config-operator/machine-config-server-8q49c" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.668364 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/39d8a22f-d5ef-427b-aae0-0e8faad08f6e-config-volume\") pod \"dns-default-9pfms\" (UID: \"39d8a22f-d5ef-427b-aae0-0e8faad08f6e\") " pod="openshift-dns/dns-default-9pfms" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.668383 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/6a71181f-49a4-4b69-a3e6-2413929b81dc-registry-certificates\") pod \"image-registry-697d97f7c8-8b7z5\" (UID: \"6a71181f-49a4-4b69-a3e6-2413929b81dc\") " pod="openshift-image-registry/image-registry-697d97f7c8-8b7z5" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.668401 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/9fb065c7-1402-4294-a8f6-f1aa662ecbb0-trusted-ca-bundle\") pod \"console-f9d7485db-lvtzk\" (UID: \"9fb065c7-1402-4294-a8f6-f1aa662ecbb0\") " pod="openshift-console/console-f9d7485db-lvtzk" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.668417 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/e3505c3b-461c-489c-8af1-117b3cbc433b-metrics-certs\") pod \"router-default-5444994796-zmc4g\" (UID: \"e3505c3b-461c-489c-8af1-117b3cbc433b\") " pod="openshift-ingress/router-default-5444994796-zmc4g" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.668433 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/a8af0096-480e-4c84-8da1-e6ff489addd3-node-bootstrap-token\") pod \"machine-config-server-8q49c\" (UID: \"a8af0096-480e-4c84-8da1-e6ff489addd3\") " pod="openshift-machine-config-operator/machine-config-server-8q49c" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.668448 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fjhff\" (UniqueName: \"kubernetes.io/projected/16dead36-b11f-4dec-ad63-a25840480761-kube-api-access-fjhff\") pod \"ingress-canary-f4z4k\" (UID: \"16dead36-b11f-4dec-ad63-a25840480761\") " pod="openshift-ingress-canary/ingress-canary-f4z4k" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.668479 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9f72bc48-a623-4365-8dfb-bbf5b0179798-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-nm4jv\" (UID: \"9f72bc48-a623-4365-8dfb-bbf5b0179798\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-nm4jv" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.668499 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-msngm\" (UniqueName: \"kubernetes.io/projected/d556750d-84c7-4fdc-8080-33cf240704df-kube-api-access-msngm\") pod \"service-ca-9c57cc56f-k2xhp\" (UID: \"d556750d-84c7-4fdc-8080-33cf240704df\") " pod="openshift-service-ca/service-ca-9c57cc56f-k2xhp" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.668554 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bae52de6-77be-47d7-a5fe-8e22e5f24bf4-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-9mrm4\" (UID: \"bae52de6-77be-47d7-a5fe-8e22e5f24bf4\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-9mrm4" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.668584 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/39d8a22f-d5ef-427b-aae0-0e8faad08f6e-metrics-tls\") pod \"dns-default-9pfms\" (UID: \"39d8a22f-d5ef-427b-aae0-0e8faad08f6e\") " pod="openshift-dns/dns-default-9pfms" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.668612 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/d91a5fa7-8c8e-455a-a237-e0ec0baa2197-audit-dir\") pod \"apiserver-76f77b778f-bs82t\" (UID: \"d91a5fa7-8c8e-455a-a237-e0ec0baa2197\") " pod="openshift-apiserver/apiserver-76f77b778f-bs82t" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.668633 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/d556750d-84c7-4fdc-8080-33cf240704df-signing-key\") pod \"service-ca-9c57cc56f-k2xhp\" (UID: \"d556750d-84c7-4fdc-8080-33cf240704df\") " pod="openshift-service-ca/service-ca-9c57cc56f-k2xhp" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.668665 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/e81eda87-1848-48b9-8a4d-6c1c184cb421-proxy-tls\") pod \"machine-config-controller-84d6567774-x4rrn\" (UID: \"e81eda87-1848-48b9-8a4d-6c1c184cb421\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-x4rrn" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.668696 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/d556750d-84c7-4fdc-8080-33cf240704df-signing-cabundle\") pod \"service-ca-9c57cc56f-k2xhp\" (UID: \"d556750d-84c7-4fdc-8080-33cf240704df\") " pod="openshift-service-ca/service-ca-9c57cc56f-k2xhp" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.668711 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/bfd87dd0-c7b3-4e3c-940c-0229718e3579-profile-collector-cert\") pod \"olm-operator-6b444d44fb-7vsn9\" (UID: \"bfd87dd0-c7b3-4e3c-940c-0229718e3579\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-7vsn9" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.668756 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/9fb065c7-1402-4294-a8f6-f1aa662ecbb0-console-oauth-config\") pod \"console-f9d7485db-lvtzk\" (UID: \"9fb065c7-1402-4294-a8f6-f1aa662ecbb0\") " pod="openshift-console/console-f9d7485db-lvtzk" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.668779 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/9fb065c7-1402-4294-a8f6-f1aa662ecbb0-service-ca\") pod \"console-f9d7485db-lvtzk\" (UID: \"9fb065c7-1402-4294-a8f6-f1aa662ecbb0\") " pod="openshift-console/console-f9d7485db-lvtzk" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.668794 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d97b2299-c955-4314-8b0a-a952e7bb53da-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-lsj7g\" (UID: \"d97b2299-c955-4314-8b0a-a952e7bb53da\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-lsj7g" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.668808 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/00e915fe-78b7-4c8f-bc2b-1357c1beeee3-serving-cert\") pod \"service-ca-operator-777779d784-qlvp5\" (UID: \"00e915fe-78b7-4c8f-bc2b-1357c1beeee3\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-qlvp5" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.668822 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r2csw\" (UniqueName: \"kubernetes.io/projected/da520ae0-3472-44e1-af12-5e1fcdfec000-kube-api-access-r2csw\") pod \"machine-config-operator-74547568cd-pnngr\" (UID: \"da520ae0-3472-44e1-af12-5e1fcdfec000\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-pnngr" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.668838 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d91a5fa7-8c8e-455a-a237-e0ec0baa2197-serving-cert\") pod \"apiserver-76f77b778f-bs82t\" (UID: \"d91a5fa7-8c8e-455a-a237-e0ec0baa2197\") " pod="openshift-apiserver/apiserver-76f77b778f-bs82t" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.668854 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/89ca4c63-f97b-4a71-8b2e-613170fdde6b-secret-volume\") pod \"collect-profiles-29405400-6d4qp\" (UID: \"89ca4c63-f97b-4a71-8b2e-613170fdde6b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405400-6d4qp" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.668879 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5h4t8\" (UniqueName: \"kubernetes.io/projected/d91a5fa7-8c8e-455a-a237-e0ec0baa2197-kube-api-access-5h4t8\") pod \"apiserver-76f77b778f-bs82t\" (UID: \"d91a5fa7-8c8e-455a-a237-e0ec0baa2197\") " pod="openshift-apiserver/apiserver-76f77b778f-bs82t" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.668895 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/f16a2d31-72dd-4ec0-b516-a3d10b0b9aed-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-zn7tf\" (UID: \"f16a2d31-72dd-4ec0-b516-a3d10b0b9aed\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-zn7tf" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.668912 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qkpgr\" (UniqueName: \"kubernetes.io/projected/152c1627-347b-483f-aa32-a65e67de0e5e-kube-api-access-qkpgr\") pod \"packageserver-d55dfcdfc-7l5mk\" (UID: \"152c1627-347b-483f-aa32-a65e67de0e5e\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-7l5mk" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.669306 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/152c1627-347b-483f-aa32-a65e67de0e5e-tmpfs\") pod \"packageserver-d55dfcdfc-7l5mk\" (UID: \"152c1627-347b-483f-aa32-a65e67de0e5e\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-7l5mk" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.669339 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sf77v\" (UniqueName: \"kubernetes.io/projected/89ca4c63-f97b-4a71-8b2e-613170fdde6b-kube-api-access-sf77v\") pod \"collect-profiles-29405400-6d4qp\" (UID: \"89ca4c63-f97b-4a71-8b2e-613170fdde6b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405400-6d4qp" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.669372 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b2xxm\" (UniqueName: \"kubernetes.io/projected/bfd87dd0-c7b3-4e3c-940c-0229718e3579-kube-api-access-b2xxm\") pod \"olm-operator-6b444d44fb-7vsn9\" (UID: \"bfd87dd0-c7b3-4e3c-940c-0229718e3579\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-7vsn9" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.669424 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/d91a5fa7-8c8e-455a-a237-e0ec0baa2197-image-import-ca\") pod \"apiserver-76f77b778f-bs82t\" (UID: \"d91a5fa7-8c8e-455a-a237-e0ec0baa2197\") " pod="openshift-apiserver/apiserver-76f77b778f-bs82t" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.669439 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/d91a5fa7-8c8e-455a-a237-e0ec0baa2197-trusted-ca-bundle\") pod \"apiserver-76f77b778f-bs82t\" (UID: \"d91a5fa7-8c8e-455a-a237-e0ec0baa2197\") " pod="openshift-apiserver/apiserver-76f77b778f-bs82t" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.669455 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d97b2299-c955-4314-8b0a-a952e7bb53da-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-lsj7g\" (UID: \"d97b2299-c955-4314-8b0a-a952e7bb53da\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-lsj7g" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.669479 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/9fb065c7-1402-4294-a8f6-f1aa662ecbb0-console-serving-cert\") pod \"console-f9d7485db-lvtzk\" (UID: \"9fb065c7-1402-4294-a8f6-f1aa662ecbb0\") " pod="openshift-console/console-f9d7485db-lvtzk" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.670211 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/d91a5fa7-8c8e-455a-a237-e0ec0baa2197-node-pullsecrets\") pod \"apiserver-76f77b778f-bs82t\" (UID: \"d91a5fa7-8c8e-455a-a237-e0ec0baa2197\") " pod="openshift-apiserver/apiserver-76f77b778f-bs82t" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.671858 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/9fb065c7-1402-4294-a8f6-f1aa662ecbb0-console-config\") pod \"console-f9d7485db-lvtzk\" (UID: \"9fb065c7-1402-4294-a8f6-f1aa662ecbb0\") " pod="openshift-console/console-f9d7485db-lvtzk" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.672019 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d91a5fa7-8c8e-455a-a237-e0ec0baa2197-config\") pod \"apiserver-76f77b778f-bs82t\" (UID: \"d91a5fa7-8c8e-455a-a237-e0ec0baa2197\") " pod="openshift-apiserver/apiserver-76f77b778f-bs82t" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.672047 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/e81eda87-1848-48b9-8a4d-6c1c184cb421-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-x4rrn\" (UID: \"e81eda87-1848-48b9-8a4d-6c1c184cb421\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-x4rrn" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.672131 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8a281de3-cc12-4dd1-b9be-0ca03a0613ec-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-xsjrl\" (UID: \"8a281de3-cc12-4dd1-b9be-0ca03a0613ec\") " pod="openshift-marketplace/marketplace-operator-79b997595-xsjrl" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.672169 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/d91a5fa7-8c8e-455a-a237-e0ec0baa2197-audit\") pod \"apiserver-76f77b778f-bs82t\" (UID: \"d91a5fa7-8c8e-455a-a237-e0ec0baa2197\") " pod="openshift-apiserver/apiserver-76f77b778f-bs82t" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.672199 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/d91a5fa7-8c8e-455a-a237-e0ec0baa2197-encryption-config\") pod \"apiserver-76f77b778f-bs82t\" (UID: \"d91a5fa7-8c8e-455a-a237-e0ec0baa2197\") " pod="openshift-apiserver/apiserver-76f77b778f-bs82t" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.672254 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gp4bv\" (UniqueName: \"kubernetes.io/projected/e3505c3b-461c-489c-8af1-117b3cbc433b-kube-api-access-gp4bv\") pod \"router-default-5444994796-zmc4g\" (UID: \"e3505c3b-461c-489c-8af1-117b3cbc433b\") " pod="openshift-ingress/router-default-5444994796-zmc4g" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.672289 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t4qwl\" (UniqueName: \"kubernetes.io/projected/39d8a22f-d5ef-427b-aae0-0e8faad08f6e-kube-api-access-t4qwl\") pod \"dns-default-9pfms\" (UID: \"39d8a22f-d5ef-427b-aae0-0e8faad08f6e\") " pod="openshift-dns/dns-default-9pfms" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.672329 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/d91a5fa7-8c8e-455a-a237-e0ec0baa2197-trusted-ca-bundle\") pod \"apiserver-76f77b778f-bs82t\" (UID: \"d91a5fa7-8c8e-455a-a237-e0ec0baa2197\") " pod="openshift-apiserver/apiserver-76f77b778f-bs82t" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.672662 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/d91a5fa7-8c8e-455a-a237-e0ec0baa2197-etcd-client\") pod \"apiserver-76f77b778f-bs82t\" (UID: \"d91a5fa7-8c8e-455a-a237-e0ec0baa2197\") " pod="openshift-apiserver/apiserver-76f77b778f-bs82t" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.672704 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8b7z5\" (UID: \"6a71181f-49a4-4b69-a3e6-2413929b81dc\") " pod="openshift-image-registry/image-registry-697d97f7c8-8b7z5" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.673139 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/6a71181f-49a4-4b69-a3e6-2413929b81dc-trusted-ca\") pod \"image-registry-697d97f7c8-8b7z5\" (UID: \"6a71181f-49a4-4b69-a3e6-2413929b81dc\") " pod="openshift-image-registry/image-registry-697d97f7c8-8b7z5" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.673202 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/6a71181f-49a4-4b69-a3e6-2413929b81dc-registry-certificates\") pod \"image-registry-697d97f7c8-8b7z5\" (UID: \"6a71181f-49a4-4b69-a3e6-2413929b81dc\") " pod="openshift-image-registry/image-registry-697d97f7c8-8b7z5" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.673767 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/d91a5fa7-8c8e-455a-a237-e0ec0baa2197-image-import-ca\") pod \"apiserver-76f77b778f-bs82t\" (UID: \"d91a5fa7-8c8e-455a-a237-e0ec0baa2197\") " pod="openshift-apiserver/apiserver-76f77b778f-bs82t" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.673865 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/d91a5fa7-8c8e-455a-a237-e0ec0baa2197-audit\") pod \"apiserver-76f77b778f-bs82t\" (UID: \"d91a5fa7-8c8e-455a-a237-e0ec0baa2197\") " pod="openshift-apiserver/apiserver-76f77b778f-bs82t" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.674369 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/d91a5fa7-8c8e-455a-a237-e0ec0baa2197-audit-dir\") pod \"apiserver-76f77b778f-bs82t\" (UID: \"d91a5fa7-8c8e-455a-a237-e0ec0baa2197\") " pod="openshift-apiserver/apiserver-76f77b778f-bs82t" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.674507 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d97b2299-c955-4314-8b0a-a952e7bb53da-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-lsj7g\" (UID: \"d97b2299-c955-4314-8b0a-a952e7bb53da\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-lsj7g" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.674507 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/9fb065c7-1402-4294-a8f6-f1aa662ecbb0-oauth-serving-cert\") pod \"console-f9d7485db-lvtzk\" (UID: \"9fb065c7-1402-4294-a8f6-f1aa662ecbb0\") " pod="openshift-console/console-f9d7485db-lvtzk" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.674581 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/9fb065c7-1402-4294-a8f6-f1aa662ecbb0-trusted-ca-bundle\") pod \"console-f9d7485db-lvtzk\" (UID: \"9fb065c7-1402-4294-a8f6-f1aa662ecbb0\") " pod="openshift-console/console-f9d7485db-lvtzk" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.676202 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kpnzx\" (UniqueName: \"kubernetes.io/projected/f16a2d31-72dd-4ec0-b516-a3d10b0b9aed-kube-api-access-kpnzx\") pod \"package-server-manager-789f6589d5-zn7tf\" (UID: \"f16a2d31-72dd-4ec0-b516-a3d10b0b9aed\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-zn7tf" Nov 28 10:00:03 crc kubenswrapper[4838]: E1128 10:00:03.676509 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 10:00:04.176495216 +0000 UTC m=+175.875469376 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8b7z5" (UID: "6a71181f-49a4-4b69-a3e6-2413929b81dc") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.676655 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/d91a5fa7-8c8e-455a-a237-e0ec0baa2197-etcd-serving-ca\") pod \"apiserver-76f77b778f-bs82t\" (UID: \"d91a5fa7-8c8e-455a-a237-e0ec0baa2197\") " pod="openshift-apiserver/apiserver-76f77b778f-bs82t" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.676816 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/152c1627-347b-483f-aa32-a65e67de0e5e-webhook-cert\") pod \"packageserver-d55dfcdfc-7l5mk\" (UID: \"152c1627-347b-483f-aa32-a65e67de0e5e\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-7l5mk" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.676839 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z8p4m\" (UniqueName: \"kubernetes.io/projected/4752b167-aa81-4844-98ab-07cc66b152bd-kube-api-access-z8p4m\") pod \"catalog-operator-68c6474976-ss9lp\" (UID: \"4752b167-aa81-4844-98ab-07cc66b152bd\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-ss9lp" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.676921 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/152c1627-347b-483f-aa32-a65e67de0e5e-apiservice-cert\") pod \"packageserver-d55dfcdfc-7l5mk\" (UID: \"152c1627-347b-483f-aa32-a65e67de0e5e\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-7l5mk" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.676940 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/16dead36-b11f-4dec-ad63-a25840480761-cert\") pod \"ingress-canary-f4z4k\" (UID: \"16dead36-b11f-4dec-ad63-a25840480761\") " pod="openshift-ingress-canary/ingress-canary-f4z4k" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.676957 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/da520ae0-3472-44e1-af12-5e1fcdfec000-proxy-tls\") pod \"machine-config-operator-74547568cd-pnngr\" (UID: \"da520ae0-3472-44e1-af12-5e1fcdfec000\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-pnngr" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.677002 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mbpr7\" (UniqueName: \"kubernetes.io/projected/8a281de3-cc12-4dd1-b9be-0ca03a0613ec-kube-api-access-mbpr7\") pod \"marketplace-operator-79b997595-xsjrl\" (UID: \"8a281de3-cc12-4dd1-b9be-0ca03a0613ec\") " pod="openshift-marketplace/marketplace-operator-79b997595-xsjrl" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.677020 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cbcfj\" (UniqueName: \"kubernetes.io/projected/a8af0096-480e-4c84-8da1-e6ff489addd3-kube-api-access-cbcfj\") pod \"machine-config-server-8q49c\" (UID: \"a8af0096-480e-4c84-8da1-e6ff489addd3\") " pod="openshift-machine-config-operator/machine-config-server-8q49c" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.677377 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/d91a5fa7-8c8e-455a-a237-e0ec0baa2197-etcd-serving-ca\") pod \"apiserver-76f77b778f-bs82t\" (UID: \"d91a5fa7-8c8e-455a-a237-e0ec0baa2197\") " pod="openshift-apiserver/apiserver-76f77b778f-bs82t" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.677573 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-bs85m"] Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.677748 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/e3505c3b-461c-489c-8af1-117b3cbc433b-default-certificate\") pod \"router-default-5444994796-zmc4g\" (UID: \"e3505c3b-461c-489c-8af1-117b3cbc433b\") " pod="openshift-ingress/router-default-5444994796-zmc4g" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.677770 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g7fff\" (UniqueName: \"kubernetes.io/projected/00e915fe-78b7-4c8f-bc2b-1357c1beeee3-kube-api-access-g7fff\") pod \"service-ca-operator-777779d784-qlvp5\" (UID: \"00e915fe-78b7-4c8f-bc2b-1357c1beeee3\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-qlvp5" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.677811 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/6a71181f-49a4-4b69-a3e6-2413929b81dc-registry-tls\") pod \"image-registry-697d97f7c8-8b7z5\" (UID: \"6a71181f-49a4-4b69-a3e6-2413929b81dc\") " pod="openshift-image-registry/image-registry-697d97f7c8-8b7z5" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.677850 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9f72bc48-a623-4365-8dfb-bbf5b0179798-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-nm4jv\" (UID: \"9f72bc48-a623-4365-8dfb-bbf5b0179798\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-nm4jv" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.678606 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tfc7g\" (UniqueName: \"kubernetes.io/projected/502acc2d-a5e3-4240-b2fb-7f67b7518b82-kube-api-access-tfc7g\") pod \"control-plane-machine-set-operator-78cbb6b69f-vrzwk\" (UID: \"502acc2d-a5e3-4240-b2fb-7f67b7518b82\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-vrzwk" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.678663 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9f72bc48-a623-4365-8dfb-bbf5b0179798-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-nm4jv\" (UID: \"9f72bc48-a623-4365-8dfb-bbf5b0179798\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-nm4jv" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.678664 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/6a71181f-49a4-4b69-a3e6-2413929b81dc-ca-trust-extracted\") pod \"image-registry-697d97f7c8-8b7z5\" (UID: \"6a71181f-49a4-4b69-a3e6-2413929b81dc\") " pod="openshift-image-registry/image-registry-697d97f7c8-8b7z5" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.678726 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hqg6v\" (UniqueName: \"kubernetes.io/projected/6a71181f-49a4-4b69-a3e6-2413929b81dc-kube-api-access-hqg6v\") pod \"image-registry-697d97f7c8-8b7z5\" (UID: \"6a71181f-49a4-4b69-a3e6-2413929b81dc\") " pod="openshift-image-registry/image-registry-697d97f7c8-8b7z5" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.678762 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/e3505c3b-461c-489c-8af1-117b3cbc433b-stats-auth\") pod \"router-default-5444994796-zmc4g\" (UID: \"e3505c3b-461c-489c-8af1-117b3cbc433b\") " pod="openshift-ingress/router-default-5444994796-zmc4g" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.678779 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/bfd87dd0-c7b3-4e3c-940c-0229718e3579-srv-cert\") pod \"olm-operator-6b444d44fb-7vsn9\" (UID: \"bfd87dd0-c7b3-4e3c-940c-0229718e3579\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-7vsn9" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.679010 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/6a71181f-49a4-4b69-a3e6-2413929b81dc-ca-trust-extracted\") pod \"image-registry-697d97f7c8-8b7z5\" (UID: \"6a71181f-49a4-4b69-a3e6-2413929b81dc\") " pod="openshift-image-registry/image-registry-697d97f7c8-8b7z5" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.682368 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/6a71181f-49a4-4b69-a3e6-2413929b81dc-installation-pull-secrets\") pod \"image-registry-697d97f7c8-8b7z5\" (UID: \"6a71181f-49a4-4b69-a3e6-2413929b81dc\") " pod="openshift-image-registry/image-registry-697d97f7c8-8b7z5" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.682672 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9f72bc48-a623-4365-8dfb-bbf5b0179798-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-nm4jv\" (UID: \"9f72bc48-a623-4365-8dfb-bbf5b0179798\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-nm4jv" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.684009 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/9fb065c7-1402-4294-a8f6-f1aa662ecbb0-console-oauth-config\") pod \"console-f9d7485db-lvtzk\" (UID: \"9fb065c7-1402-4294-a8f6-f1aa662ecbb0\") " pod="openshift-console/console-f9d7485db-lvtzk" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.686197 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d91a5fa7-8c8e-455a-a237-e0ec0baa2197-serving-cert\") pod \"apiserver-76f77b778f-bs82t\" (UID: \"d91a5fa7-8c8e-455a-a237-e0ec0baa2197\") " pod="openshift-apiserver/apiserver-76f77b778f-bs82t" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.686500 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d97b2299-c955-4314-8b0a-a952e7bb53da-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-lsj7g\" (UID: \"d97b2299-c955-4314-8b0a-a952e7bb53da\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-lsj7g" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.686558 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/9fb065c7-1402-4294-a8f6-f1aa662ecbb0-console-serving-cert\") pod \"console-f9d7485db-lvtzk\" (UID: \"9fb065c7-1402-4294-a8f6-f1aa662ecbb0\") " pod="openshift-console/console-f9d7485db-lvtzk" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.688352 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/d91a5fa7-8c8e-455a-a237-e0ec0baa2197-encryption-config\") pod \"apiserver-76f77b778f-bs82t\" (UID: \"d91a5fa7-8c8e-455a-a237-e0ec0baa2197\") " pod="openshift-apiserver/apiserver-76f77b778f-bs82t" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.689182 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/6a71181f-49a4-4b69-a3e6-2413929b81dc-registry-tls\") pod \"image-registry-697d97f7c8-8b7z5\" (UID: \"6a71181f-49a4-4b69-a3e6-2413929b81dc\") " pod="openshift-image-registry/image-registry-697d97f7c8-8b7z5" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.690515 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/d91a5fa7-8c8e-455a-a237-e0ec0baa2197-etcd-client\") pod \"apiserver-76f77b778f-bs82t\" (UID: \"d91a5fa7-8c8e-455a-a237-e0ec0baa2197\") " pod="openshift-apiserver/apiserver-76f77b778f-bs82t" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.707688 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tdpxt\" (UniqueName: \"kubernetes.io/projected/9fb065c7-1402-4294-a8f6-f1aa662ecbb0-kube-api-access-tdpxt\") pod \"console-f9d7485db-lvtzk\" (UID: \"9fb065c7-1402-4294-a8f6-f1aa662ecbb0\") " pod="openshift-console/console-f9d7485db-lvtzk" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.747065 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/d97b2299-c955-4314-8b0a-a952e7bb53da-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-lsj7g\" (UID: \"d97b2299-c955-4314-8b0a-a952e7bb53da\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-lsj7g" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.766276 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/6a71181f-49a4-4b69-a3e6-2413929b81dc-bound-sa-token\") pod \"image-registry-697d97f7c8-8b7z5\" (UID: \"6a71181f-49a4-4b69-a3e6-2413929b81dc\") " pod="openshift-image-registry/image-registry-697d97f7c8-8b7z5" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.779631 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 10:00:03 crc kubenswrapper[4838]: E1128 10:00:03.779739 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 10:00:04.279705458 +0000 UTC m=+175.978679628 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.779877 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/bfd87dd0-c7b3-4e3c-940c-0229718e3579-profile-collector-cert\") pod \"olm-operator-6b444d44fb-7vsn9\" (UID: \"bfd87dd0-c7b3-4e3c-940c-0229718e3579\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-7vsn9" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.779900 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/35b6ed6b-3088-4a12-8bd6-f8def1138e85-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-lng7h\" (UID: \"35b6ed6b-3088-4a12-8bd6-f8def1138e85\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-lng7h" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.779937 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/00e915fe-78b7-4c8f-bc2b-1357c1beeee3-serving-cert\") pod \"service-ca-operator-777779d784-qlvp5\" (UID: \"00e915fe-78b7-4c8f-bc2b-1357c1beeee3\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-qlvp5" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.779955 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r2csw\" (UniqueName: \"kubernetes.io/projected/da520ae0-3472-44e1-af12-5e1fcdfec000-kube-api-access-r2csw\") pod \"machine-config-operator-74547568cd-pnngr\" (UID: \"da520ae0-3472-44e1-af12-5e1fcdfec000\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-pnngr" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.779989 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/89ca4c63-f97b-4a71-8b2e-613170fdde6b-secret-volume\") pod \"collect-profiles-29405400-6d4qp\" (UID: \"89ca4c63-f97b-4a71-8b2e-613170fdde6b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405400-6d4qp" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.780007 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/35b6ed6b-3088-4a12-8bd6-f8def1138e85-config\") pod \"kube-controller-manager-operator-78b949d7b-lng7h\" (UID: \"35b6ed6b-3088-4a12-8bd6-f8def1138e85\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-lng7h" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.780031 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/f16a2d31-72dd-4ec0-b516-a3d10b0b9aed-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-zn7tf\" (UID: \"f16a2d31-72dd-4ec0-b516-a3d10b0b9aed\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-zn7tf" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.780046 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qkpgr\" (UniqueName: \"kubernetes.io/projected/152c1627-347b-483f-aa32-a65e67de0e5e-kube-api-access-qkpgr\") pod \"packageserver-d55dfcdfc-7l5mk\" (UID: \"152c1627-347b-483f-aa32-a65e67de0e5e\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-7l5mk" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.780062 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/152c1627-347b-483f-aa32-a65e67de0e5e-tmpfs\") pod \"packageserver-d55dfcdfc-7l5mk\" (UID: \"152c1627-347b-483f-aa32-a65e67de0e5e\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-7l5mk" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.780080 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sf77v\" (UniqueName: \"kubernetes.io/projected/89ca4c63-f97b-4a71-8b2e-613170fdde6b-kube-api-access-sf77v\") pod \"collect-profiles-29405400-6d4qp\" (UID: \"89ca4c63-f97b-4a71-8b2e-613170fdde6b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405400-6d4qp" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.780097 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b2xxm\" (UniqueName: \"kubernetes.io/projected/bfd87dd0-c7b3-4e3c-940c-0229718e3579-kube-api-access-b2xxm\") pod \"olm-operator-6b444d44fb-7vsn9\" (UID: \"bfd87dd0-c7b3-4e3c-940c-0229718e3579\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-7vsn9" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.780114 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/02ae83a0-06cd-46f5-a5de-3127a57b8d5c-plugins-dir\") pod \"csi-hostpathplugin-zj778\" (UID: \"02ae83a0-06cd-46f5-a5de-3127a57b8d5c\") " pod="hostpath-provisioner/csi-hostpathplugin-zj778" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.780134 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hzjvn\" (UniqueName: \"kubernetes.io/projected/9d2e0aff-7ecb-4126-add3-57c7a24e781c-kube-api-access-hzjvn\") pod \"migrator-59844c95c7-5xzrq\" (UID: \"9d2e0aff-7ecb-4126-add3-57c7a24e781c\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-5xzrq" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.780155 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/e81eda87-1848-48b9-8a4d-6c1c184cb421-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-x4rrn\" (UID: \"e81eda87-1848-48b9-8a4d-6c1c184cb421\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-x4rrn" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.780173 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8a281de3-cc12-4dd1-b9be-0ca03a0613ec-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-xsjrl\" (UID: \"8a281de3-cc12-4dd1-b9be-0ca03a0613ec\") " pod="openshift-marketplace/marketplace-operator-79b997595-xsjrl" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.780190 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gp4bv\" (UniqueName: \"kubernetes.io/projected/e3505c3b-461c-489c-8af1-117b3cbc433b-kube-api-access-gp4bv\") pod \"router-default-5444994796-zmc4g\" (UID: \"e3505c3b-461c-489c-8af1-117b3cbc433b\") " pod="openshift-ingress/router-default-5444994796-zmc4g" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.780206 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t4qwl\" (UniqueName: \"kubernetes.io/projected/39d8a22f-d5ef-427b-aae0-0e8faad08f6e-kube-api-access-t4qwl\") pod \"dns-default-9pfms\" (UID: \"39d8a22f-d5ef-427b-aae0-0e8faad08f6e\") " pod="openshift-dns/dns-default-9pfms" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.780222 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/02ae83a0-06cd-46f5-a5de-3127a57b8d5c-csi-data-dir\") pod \"csi-hostpathplugin-zj778\" (UID: \"02ae83a0-06cd-46f5-a5de-3127a57b8d5c\") " pod="hostpath-provisioner/csi-hostpathplugin-zj778" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.780243 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8b7z5\" (UID: \"6a71181f-49a4-4b69-a3e6-2413929b81dc\") " pod="openshift-image-registry/image-registry-697d97f7c8-8b7z5" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.780258 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kpnzx\" (UniqueName: \"kubernetes.io/projected/f16a2d31-72dd-4ec0-b516-a3d10b0b9aed-kube-api-access-kpnzx\") pod \"package-server-manager-789f6589d5-zn7tf\" (UID: \"f16a2d31-72dd-4ec0-b516-a3d10b0b9aed\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-zn7tf" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.780274 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/152c1627-347b-483f-aa32-a65e67de0e5e-webhook-cert\") pod \"packageserver-d55dfcdfc-7l5mk\" (UID: \"152c1627-347b-483f-aa32-a65e67de0e5e\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-7l5mk" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.780290 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zg84s\" (UniqueName: \"kubernetes.io/projected/02ae83a0-06cd-46f5-a5de-3127a57b8d5c-kube-api-access-zg84s\") pod \"csi-hostpathplugin-zj778\" (UID: \"02ae83a0-06cd-46f5-a5de-3127a57b8d5c\") " pod="hostpath-provisioner/csi-hostpathplugin-zj778" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.780307 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z8p4m\" (UniqueName: \"kubernetes.io/projected/4752b167-aa81-4844-98ab-07cc66b152bd-kube-api-access-z8p4m\") pod \"catalog-operator-68c6474976-ss9lp\" (UID: \"4752b167-aa81-4844-98ab-07cc66b152bd\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-ss9lp" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.780324 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/152c1627-347b-483f-aa32-a65e67de0e5e-apiservice-cert\") pod \"packageserver-d55dfcdfc-7l5mk\" (UID: \"152c1627-347b-483f-aa32-a65e67de0e5e\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-7l5mk" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.780386 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/16dead36-b11f-4dec-ad63-a25840480761-cert\") pod \"ingress-canary-f4z4k\" (UID: \"16dead36-b11f-4dec-ad63-a25840480761\") " pod="openshift-ingress-canary/ingress-canary-f4z4k" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.780402 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/da520ae0-3472-44e1-af12-5e1fcdfec000-proxy-tls\") pod \"machine-config-operator-74547568cd-pnngr\" (UID: \"da520ae0-3472-44e1-af12-5e1fcdfec000\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-pnngr" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.780441 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/02ae83a0-06cd-46f5-a5de-3127a57b8d5c-socket-dir\") pod \"csi-hostpathplugin-zj778\" (UID: \"02ae83a0-06cd-46f5-a5de-3127a57b8d5c\") " pod="hostpath-provisioner/csi-hostpathplugin-zj778" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.780458 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6316f735-c6d6-4b80-95be-c548707cd386-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-fh5z9\" (UID: \"6316f735-c6d6-4b80-95be-c548707cd386\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-fh5z9" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.780475 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mbpr7\" (UniqueName: \"kubernetes.io/projected/8a281de3-cc12-4dd1-b9be-0ca03a0613ec-kube-api-access-mbpr7\") pod \"marketplace-operator-79b997595-xsjrl\" (UID: \"8a281de3-cc12-4dd1-b9be-0ca03a0613ec\") " pod="openshift-marketplace/marketplace-operator-79b997595-xsjrl" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.780493 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cbcfj\" (UniqueName: \"kubernetes.io/projected/a8af0096-480e-4c84-8da1-e6ff489addd3-kube-api-access-cbcfj\") pod \"machine-config-server-8q49c\" (UID: \"a8af0096-480e-4c84-8da1-e6ff489addd3\") " pod="openshift-machine-config-operator/machine-config-server-8q49c" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.780510 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/e3505c3b-461c-489c-8af1-117b3cbc433b-default-certificate\") pod \"router-default-5444994796-zmc4g\" (UID: \"e3505c3b-461c-489c-8af1-117b3cbc433b\") " pod="openshift-ingress/router-default-5444994796-zmc4g" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.780527 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g7fff\" (UniqueName: \"kubernetes.io/projected/00e915fe-78b7-4c8f-bc2b-1357c1beeee3-kube-api-access-g7fff\") pod \"service-ca-operator-777779d784-qlvp5\" (UID: \"00e915fe-78b7-4c8f-bc2b-1357c1beeee3\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-qlvp5" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.780548 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tfc7g\" (UniqueName: \"kubernetes.io/projected/502acc2d-a5e3-4240-b2fb-7f67b7518b82-kube-api-access-tfc7g\") pod \"control-plane-machine-set-operator-78cbb6b69f-vrzwk\" (UID: \"502acc2d-a5e3-4240-b2fb-7f67b7518b82\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-vrzwk" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.780568 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/e3505c3b-461c-489c-8af1-117b3cbc433b-stats-auth\") pod \"router-default-5444994796-zmc4g\" (UID: \"e3505c3b-461c-489c-8af1-117b3cbc433b\") " pod="openshift-ingress/router-default-5444994796-zmc4g" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.780583 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/bfd87dd0-c7b3-4e3c-940c-0229718e3579-srv-cert\") pod \"olm-operator-6b444d44fb-7vsn9\" (UID: \"bfd87dd0-c7b3-4e3c-940c-0229718e3579\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-7vsn9" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.780598 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/da520ae0-3472-44e1-af12-5e1fcdfec000-images\") pod \"machine-config-operator-74547568cd-pnngr\" (UID: \"da520ae0-3472-44e1-af12-5e1fcdfec000\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-pnngr" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.780627 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/4752b167-aa81-4844-98ab-07cc66b152bd-srv-cert\") pod \"catalog-operator-68c6474976-ss9lp\" (UID: \"4752b167-aa81-4844-98ab-07cc66b152bd\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-ss9lp" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.780654 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/8a281de3-cc12-4dd1-b9be-0ca03a0613ec-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-xsjrl\" (UID: \"8a281de3-cc12-4dd1-b9be-0ca03a0613ec\") " pod="openshift-marketplace/marketplace-operator-79b997595-xsjrl" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.780671 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/0ba0db9f-c351-49d0-a9c2-87b0edcfccff-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-kmvqb\" (UID: \"0ba0db9f-c351-49d0-a9c2-87b0edcfccff\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-kmvqb" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.780688 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/502acc2d-a5e3-4240-b2fb-7f67b7518b82-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-vrzwk\" (UID: \"502acc2d-a5e3-4240-b2fb-7f67b7518b82\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-vrzwk" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.780743 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bae52de6-77be-47d7-a5fe-8e22e5f24bf4-config\") pod \"kube-apiserver-operator-766d6c64bb-9mrm4\" (UID: \"bae52de6-77be-47d7-a5fe-8e22e5f24bf4\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-9mrm4" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.780763 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/02ae83a0-06cd-46f5-a5de-3127a57b8d5c-registration-dir\") pod \"csi-hostpathplugin-zj778\" (UID: \"02ae83a0-06cd-46f5-a5de-3127a57b8d5c\") " pod="hostpath-provisioner/csi-hostpathplugin-zj778" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.780780 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wpldx\" (UniqueName: \"kubernetes.io/projected/e81eda87-1848-48b9-8a4d-6c1c184cb421-kube-api-access-wpldx\") pod \"machine-config-controller-84d6567774-x4rrn\" (UID: \"e81eda87-1848-48b9-8a4d-6c1c184cb421\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-x4rrn" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.780797 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5gkkv\" (UniqueName: \"kubernetes.io/projected/0ba0db9f-c351-49d0-a9c2-87b0edcfccff-kube-api-access-5gkkv\") pod \"multus-admission-controller-857f4d67dd-kmvqb\" (UID: \"0ba0db9f-c351-49d0-a9c2-87b0edcfccff\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-kmvqb" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.780812 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/00e915fe-78b7-4c8f-bc2b-1357c1beeee3-config\") pod \"service-ca-operator-777779d784-qlvp5\" (UID: \"00e915fe-78b7-4c8f-bc2b-1357c1beeee3\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-qlvp5" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.780827 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/e3505c3b-461c-489c-8af1-117b3cbc433b-service-ca-bundle\") pod \"router-default-5444994796-zmc4g\" (UID: \"e3505c3b-461c-489c-8af1-117b3cbc433b\") " pod="openshift-ingress/router-default-5444994796-zmc4g" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.780842 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/bae52de6-77be-47d7-a5fe-8e22e5f24bf4-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-9mrm4\" (UID: \"bae52de6-77be-47d7-a5fe-8e22e5f24bf4\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-9mrm4" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.780882 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/35b6ed6b-3088-4a12-8bd6-f8def1138e85-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-lng7h\" (UID: \"35b6ed6b-3088-4a12-8bd6-f8def1138e85\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-lng7h" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.781584 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/152c1627-347b-483f-aa32-a65e67de0e5e-tmpfs\") pod \"packageserver-d55dfcdfc-7l5mk\" (UID: \"152c1627-347b-483f-aa32-a65e67de0e5e\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-7l5mk" Nov 28 10:00:03 crc kubenswrapper[4838]: E1128 10:00:03.782891 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 10:00:04.282861406 +0000 UTC m=+175.981835616 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8b7z5" (UID: "6a71181f-49a4-4b69-a3e6-2413929b81dc") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.783003 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bae52de6-77be-47d7-a5fe-8e22e5f24bf4-config\") pod \"kube-apiserver-operator-766d6c64bb-9mrm4\" (UID: \"bae52de6-77be-47d7-a5fe-8e22e5f24bf4\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-9mrm4" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.784114 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/bfd87dd0-c7b3-4e3c-940c-0229718e3579-profile-collector-cert\") pod \"olm-operator-6b444d44fb-7vsn9\" (UID: \"bfd87dd0-c7b3-4e3c-940c-0229718e3579\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-7vsn9" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.784519 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/da520ae0-3472-44e1-af12-5e1fcdfec000-auth-proxy-config\") pod \"machine-config-operator-74547568cd-pnngr\" (UID: \"da520ae0-3472-44e1-af12-5e1fcdfec000\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-pnngr" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.785199 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/00e915fe-78b7-4c8f-bc2b-1357c1beeee3-config\") pod \"service-ca-operator-777779d784-qlvp5\" (UID: \"00e915fe-78b7-4c8f-bc2b-1357c1beeee3\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-qlvp5" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.785297 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/0ba0db9f-c351-49d0-a9c2-87b0edcfccff-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-kmvqb\" (UID: \"0ba0db9f-c351-49d0-a9c2-87b0edcfccff\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-kmvqb" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.785667 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/152c1627-347b-483f-aa32-a65e67de0e5e-apiservice-cert\") pod \"packageserver-d55dfcdfc-7l5mk\" (UID: \"152c1627-347b-483f-aa32-a65e67de0e5e\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-7l5mk" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.786122 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/8a281de3-cc12-4dd1-b9be-0ca03a0613ec-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-xsjrl\" (UID: \"8a281de3-cc12-4dd1-b9be-0ca03a0613ec\") " pod="openshift-marketplace/marketplace-operator-79b997595-xsjrl" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.786144 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/e3505c3b-461c-489c-8af1-117b3cbc433b-service-ca-bundle\") pod \"router-default-5444994796-zmc4g\" (UID: \"e3505c3b-461c-489c-8af1-117b3cbc433b\") " pod="openshift-ingress/router-default-5444994796-zmc4g" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.786406 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/89ca4c63-f97b-4a71-8b2e-613170fdde6b-config-volume\") pod \"collect-profiles-29405400-6d4qp\" (UID: \"89ca4c63-f97b-4a71-8b2e-613170fdde6b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405400-6d4qp" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.786735 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gjlkq\" (UniqueName: \"kubernetes.io/projected/6316f735-c6d6-4b80-95be-c548707cd386-kube-api-access-gjlkq\") pod \"kube-storage-version-migrator-operator-b67b599dd-fh5z9\" (UID: \"6316f735-c6d6-4b80-95be-c548707cd386\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-fh5z9" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.786787 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/4752b167-aa81-4844-98ab-07cc66b152bd-profile-collector-cert\") pod \"catalog-operator-68c6474976-ss9lp\" (UID: \"4752b167-aa81-4844-98ab-07cc66b152bd\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-ss9lp" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.786817 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/a8af0096-480e-4c84-8da1-e6ff489addd3-certs\") pod \"machine-config-server-8q49c\" (UID: \"a8af0096-480e-4c84-8da1-e6ff489addd3\") " pod="openshift-machine-config-operator/machine-config-server-8q49c" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.787290 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/e81eda87-1848-48b9-8a4d-6c1c184cb421-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-x4rrn\" (UID: \"e81eda87-1848-48b9-8a4d-6c1c184cb421\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-x4rrn" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.787525 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/39d8a22f-d5ef-427b-aae0-0e8faad08f6e-config-volume\") pod \"dns-default-9pfms\" (UID: \"39d8a22f-d5ef-427b-aae0-0e8faad08f6e\") " pod="openshift-dns/dns-default-9pfms" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.787625 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/02ae83a0-06cd-46f5-a5de-3127a57b8d5c-mountpoint-dir\") pod \"csi-hostpathplugin-zj778\" (UID: \"02ae83a0-06cd-46f5-a5de-3127a57b8d5c\") " pod="hostpath-provisioner/csi-hostpathplugin-zj778" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.787688 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/e3505c3b-461c-489c-8af1-117b3cbc433b-metrics-certs\") pod \"router-default-5444994796-zmc4g\" (UID: \"e3505c3b-461c-489c-8af1-117b3cbc433b\") " pod="openshift-ingress/router-default-5444994796-zmc4g" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.787627 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5h4t8\" (UniqueName: \"kubernetes.io/projected/d91a5fa7-8c8e-455a-a237-e0ec0baa2197-kube-api-access-5h4t8\") pod \"apiserver-76f77b778f-bs82t\" (UID: \"d91a5fa7-8c8e-455a-a237-e0ec0baa2197\") " pod="openshift-apiserver/apiserver-76f77b778f-bs82t" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.787769 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/a8af0096-480e-4c84-8da1-e6ff489addd3-node-bootstrap-token\") pod \"machine-config-server-8q49c\" (UID: \"a8af0096-480e-4c84-8da1-e6ff489addd3\") " pod="openshift-machine-config-operator/machine-config-server-8q49c" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.787825 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fjhff\" (UniqueName: \"kubernetes.io/projected/16dead36-b11f-4dec-ad63-a25840480761-kube-api-access-fjhff\") pod \"ingress-canary-f4z4k\" (UID: \"16dead36-b11f-4dec-ad63-a25840480761\") " pod="openshift-ingress-canary/ingress-canary-f4z4k" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.787880 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-msngm\" (UniqueName: \"kubernetes.io/projected/d556750d-84c7-4fdc-8080-33cf240704df-kube-api-access-msngm\") pod \"service-ca-9c57cc56f-k2xhp\" (UID: \"d556750d-84c7-4fdc-8080-33cf240704df\") " pod="openshift-service-ca/service-ca-9c57cc56f-k2xhp" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.788010 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/39d8a22f-d5ef-427b-aae0-0e8faad08f6e-config-volume\") pod \"dns-default-9pfms\" (UID: \"39d8a22f-d5ef-427b-aae0-0e8faad08f6e\") " pod="openshift-dns/dns-default-9pfms" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.788049 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6316f735-c6d6-4b80-95be-c548707cd386-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-fh5z9\" (UID: \"6316f735-c6d6-4b80-95be-c548707cd386\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-fh5z9" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.788125 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8a281de3-cc12-4dd1-b9be-0ca03a0613ec-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-xsjrl\" (UID: \"8a281de3-cc12-4dd1-b9be-0ca03a0613ec\") " pod="openshift-marketplace/marketplace-operator-79b997595-xsjrl" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.788141 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bae52de6-77be-47d7-a5fe-8e22e5f24bf4-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-9mrm4\" (UID: \"bae52de6-77be-47d7-a5fe-8e22e5f24bf4\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-9mrm4" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.788240 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/39d8a22f-d5ef-427b-aae0-0e8faad08f6e-metrics-tls\") pod \"dns-default-9pfms\" (UID: \"39d8a22f-d5ef-427b-aae0-0e8faad08f6e\") " pod="openshift-dns/dns-default-9pfms" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.788343 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/d556750d-84c7-4fdc-8080-33cf240704df-signing-key\") pod \"service-ca-9c57cc56f-k2xhp\" (UID: \"d556750d-84c7-4fdc-8080-33cf240704df\") " pod="openshift-service-ca/service-ca-9c57cc56f-k2xhp" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.788395 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/e81eda87-1848-48b9-8a4d-6c1c184cb421-proxy-tls\") pod \"machine-config-controller-84d6567774-x4rrn\" (UID: \"e81eda87-1848-48b9-8a4d-6c1c184cb421\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-x4rrn" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.788443 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/d556750d-84c7-4fdc-8080-33cf240704df-signing-cabundle\") pod \"service-ca-9c57cc56f-k2xhp\" (UID: \"d556750d-84c7-4fdc-8080-33cf240704df\") " pod="openshift-service-ca/service-ca-9c57cc56f-k2xhp" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.789663 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/bfd87dd0-c7b3-4e3c-940c-0229718e3579-srv-cert\") pod \"olm-operator-6b444d44fb-7vsn9\" (UID: \"bfd87dd0-c7b3-4e3c-940c-0229718e3579\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-7vsn9" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.789930 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/f16a2d31-72dd-4ec0-b516-a3d10b0b9aed-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-zn7tf\" (UID: \"f16a2d31-72dd-4ec0-b516-a3d10b0b9aed\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-zn7tf" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.790257 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"certs\" (UniqueName: \"kubernetes.io/secret/a8af0096-480e-4c84-8da1-e6ff489addd3-certs\") pod \"machine-config-server-8q49c\" (UID: \"a8af0096-480e-4c84-8da1-e6ff489addd3\") " pod="openshift-machine-config-operator/machine-config-server-8q49c" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.790537 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/d556750d-84c7-4fdc-8080-33cf240704df-signing-cabundle\") pod \"service-ca-9c57cc56f-k2xhp\" (UID: \"d556750d-84c7-4fdc-8080-33cf240704df\") " pod="openshift-service-ca/service-ca-9c57cc56f-k2xhp" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.790844 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/4752b167-aa81-4844-98ab-07cc66b152bd-profile-collector-cert\") pod \"catalog-operator-68c6474976-ss9lp\" (UID: \"4752b167-aa81-4844-98ab-07cc66b152bd\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-ss9lp" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.791417 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/4752b167-aa81-4844-98ab-07cc66b152bd-srv-cert\") pod \"catalog-operator-68c6474976-ss9lp\" (UID: \"4752b167-aa81-4844-98ab-07cc66b152bd\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-ss9lp" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.791482 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/00e915fe-78b7-4c8f-bc2b-1357c1beeee3-serving-cert\") pod \"service-ca-operator-777779d784-qlvp5\" (UID: \"00e915fe-78b7-4c8f-bc2b-1357c1beeee3\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-qlvp5" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.791677 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bae52de6-77be-47d7-a5fe-8e22e5f24bf4-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-9mrm4\" (UID: \"bae52de6-77be-47d7-a5fe-8e22e5f24bf4\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-9mrm4" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.791855 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/a8af0096-480e-4c84-8da1-e6ff489addd3-node-bootstrap-token\") pod \"machine-config-server-8q49c\" (UID: \"a8af0096-480e-4c84-8da1-e6ff489addd3\") " pod="openshift-machine-config-operator/machine-config-server-8q49c" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.792408 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/e81eda87-1848-48b9-8a4d-6c1c184cb421-proxy-tls\") pod \"machine-config-controller-84d6567774-x4rrn\" (UID: \"e81eda87-1848-48b9-8a4d-6c1c184cb421\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-x4rrn" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.793229 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/16dead36-b11f-4dec-ad63-a25840480761-cert\") pod \"ingress-canary-f4z4k\" (UID: \"16dead36-b11f-4dec-ad63-a25840480761\") " pod="openshift-ingress-canary/ingress-canary-f4z4k" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.806007 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hqg6v\" (UniqueName: \"kubernetes.io/projected/6a71181f-49a4-4b69-a3e6-2413929b81dc-kube-api-access-hqg6v\") pod \"image-registry-697d97f7c8-8b7z5\" (UID: \"6a71181f-49a4-4b69-a3e6-2413929b81dc\") " pod="openshift-image-registry/image-registry-697d97f7c8-8b7z5" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.830119 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-lsj7g" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.876186 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-xrgtq"] Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.880351 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-bs82t" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.883328 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b2xxm\" (UniqueName: \"kubernetes.io/projected/bfd87dd0-c7b3-4e3c-940c-0229718e3579-kube-api-access-b2xxm\") pod \"olm-operator-6b444d44fb-7vsn9\" (UID: \"bfd87dd0-c7b3-4e3c-940c-0229718e3579\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-7vsn9" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.892110 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.892285 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6316f735-c6d6-4b80-95be-c548707cd386-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-fh5z9\" (UID: \"6316f735-c6d6-4b80-95be-c548707cd386\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-fh5z9" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.892321 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/35b6ed6b-3088-4a12-8bd6-f8def1138e85-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-lng7h\" (UID: \"35b6ed6b-3088-4a12-8bd6-f8def1138e85\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-lng7h" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.892357 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/35b6ed6b-3088-4a12-8bd6-f8def1138e85-config\") pod \"kube-controller-manager-operator-78b949d7b-lng7h\" (UID: \"35b6ed6b-3088-4a12-8bd6-f8def1138e85\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-lng7h" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.892385 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/02ae83a0-06cd-46f5-a5de-3127a57b8d5c-plugins-dir\") pod \"csi-hostpathplugin-zj778\" (UID: \"02ae83a0-06cd-46f5-a5de-3127a57b8d5c\") " pod="hostpath-provisioner/csi-hostpathplugin-zj778" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.892409 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hzjvn\" (UniqueName: \"kubernetes.io/projected/9d2e0aff-7ecb-4126-add3-57c7a24e781c-kube-api-access-hzjvn\") pod \"migrator-59844c95c7-5xzrq\" (UID: \"9d2e0aff-7ecb-4126-add3-57c7a24e781c\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-5xzrq" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.892442 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/02ae83a0-06cd-46f5-a5de-3127a57b8d5c-csi-data-dir\") pod \"csi-hostpathplugin-zj778\" (UID: \"02ae83a0-06cd-46f5-a5de-3127a57b8d5c\") " pod="hostpath-provisioner/csi-hostpathplugin-zj778" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.892475 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zg84s\" (UniqueName: \"kubernetes.io/projected/02ae83a0-06cd-46f5-a5de-3127a57b8d5c-kube-api-access-zg84s\") pod \"csi-hostpathplugin-zj778\" (UID: \"02ae83a0-06cd-46f5-a5de-3127a57b8d5c\") " pod="hostpath-provisioner/csi-hostpathplugin-zj778" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.892501 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/02ae83a0-06cd-46f5-a5de-3127a57b8d5c-socket-dir\") pod \"csi-hostpathplugin-zj778\" (UID: \"02ae83a0-06cd-46f5-a5de-3127a57b8d5c\") " pod="hostpath-provisioner/csi-hostpathplugin-zj778" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.892515 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6316f735-c6d6-4b80-95be-c548707cd386-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-fh5z9\" (UID: \"6316f735-c6d6-4b80-95be-c548707cd386\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-fh5z9" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.892584 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/02ae83a0-06cd-46f5-a5de-3127a57b8d5c-registration-dir\") pod \"csi-hostpathplugin-zj778\" (UID: \"02ae83a0-06cd-46f5-a5de-3127a57b8d5c\") " pod="hostpath-provisioner/csi-hostpathplugin-zj778" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.892616 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/35b6ed6b-3088-4a12-8bd6-f8def1138e85-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-lng7h\" (UID: \"35b6ed6b-3088-4a12-8bd6-f8def1138e85\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-lng7h" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.892659 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gjlkq\" (UniqueName: \"kubernetes.io/projected/6316f735-c6d6-4b80-95be-c548707cd386-kube-api-access-gjlkq\") pod \"kube-storage-version-migrator-operator-b67b599dd-fh5z9\" (UID: \"6316f735-c6d6-4b80-95be-c548707cd386\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-fh5z9" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.892683 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/02ae83a0-06cd-46f5-a5de-3127a57b8d5c-mountpoint-dir\") pod \"csi-hostpathplugin-zj778\" (UID: \"02ae83a0-06cd-46f5-a5de-3127a57b8d5c\") " pod="hostpath-provisioner/csi-hostpathplugin-zj778" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.892798 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/02ae83a0-06cd-46f5-a5de-3127a57b8d5c-mountpoint-dir\") pod \"csi-hostpathplugin-zj778\" (UID: \"02ae83a0-06cd-46f5-a5de-3127a57b8d5c\") " pod="hostpath-provisioner/csi-hostpathplugin-zj778" Nov 28 10:00:03 crc kubenswrapper[4838]: E1128 10:00:03.892856 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 10:00:04.392842893 +0000 UTC m=+176.091817063 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.893334 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6316f735-c6d6-4b80-95be-c548707cd386-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-fh5z9\" (UID: \"6316f735-c6d6-4b80-95be-c548707cd386\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-fh5z9" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.893802 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/02ae83a0-06cd-46f5-a5de-3127a57b8d5c-csi-data-dir\") pod \"csi-hostpathplugin-zj778\" (UID: \"02ae83a0-06cd-46f5-a5de-3127a57b8d5c\") " pod="hostpath-provisioner/csi-hostpathplugin-zj778" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.893989 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/02ae83a0-06cd-46f5-a5de-3127a57b8d5c-registration-dir\") pod \"csi-hostpathplugin-zj778\" (UID: \"02ae83a0-06cd-46f5-a5de-3127a57b8d5c\") " pod="hostpath-provisioner/csi-hostpathplugin-zj778" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.894134 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/02ae83a0-06cd-46f5-a5de-3127a57b8d5c-plugins-dir\") pod \"csi-hostpathplugin-zj778\" (UID: \"02ae83a0-06cd-46f5-a5de-3127a57b8d5c\") " pod="hostpath-provisioner/csi-hostpathplugin-zj778" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.894254 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/02ae83a0-06cd-46f5-a5de-3127a57b8d5c-socket-dir\") pod \"csi-hostpathplugin-zj778\" (UID: \"02ae83a0-06cd-46f5-a5de-3127a57b8d5c\") " pod="hostpath-provisioner/csi-hostpathplugin-zj778" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.894399 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/35b6ed6b-3088-4a12-8bd6-f8def1138e85-config\") pod \"kube-controller-manager-operator-78b949d7b-lng7h\" (UID: \"35b6ed6b-3088-4a12-8bd6-f8def1138e85\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-lng7h" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.924004 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/35b6ed6b-3088-4a12-8bd6-f8def1138e85-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-lng7h\" (UID: \"35b6ed6b-3088-4a12-8bd6-f8def1138e85\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-lng7h" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.924294 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6316f735-c6d6-4b80-95be-c548707cd386-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-fh5z9\" (UID: \"6316f735-c6d6-4b80-95be-c548707cd386\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-fh5z9" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.929842 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-hrtmd"] Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.930086 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-7vsn9" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.939187 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-w7htp"] Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.947968 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-tkw4f"] Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.958692 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-wpfps"] Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.967168 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-df5f6"] Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.972791 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gp4bv\" (UniqueName: \"kubernetes.io/projected/e3505c3b-461c-489c-8af1-117b3cbc433b-kube-api-access-gp4bv\") pod \"router-default-5444994796-zmc4g\" (UID: \"e3505c3b-461c-489c-8af1-117b3cbc433b\") " pod="openshift-ingress/router-default-5444994796-zmc4g" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.988228 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/502acc2d-a5e3-4240-b2fb-7f67b7518b82-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-vrzwk\" (UID: \"502acc2d-a5e3-4240-b2fb-7f67b7518b82\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-vrzwk" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.988242 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/d556750d-84c7-4fdc-8080-33cf240704df-signing-key\") pod \"service-ca-9c57cc56f-k2xhp\" (UID: \"d556750d-84c7-4fdc-8080-33cf240704df\") " pod="openshift-service-ca/service-ca-9c57cc56f-k2xhp" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.988404 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/39d8a22f-d5ef-427b-aae0-0e8faad08f6e-metrics-tls\") pod \"dns-default-9pfms\" (UID: \"39d8a22f-d5ef-427b-aae0-0e8faad08f6e\") " pod="openshift-dns/dns-default-9pfms" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.988518 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/da520ae0-3472-44e1-af12-5e1fcdfec000-images\") pod \"machine-config-operator-74547568cd-pnngr\" (UID: \"da520ae0-3472-44e1-af12-5e1fcdfec000\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-pnngr" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.988575 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/da520ae0-3472-44e1-af12-5e1fcdfec000-proxy-tls\") pod \"machine-config-operator-74547568cd-pnngr\" (UID: \"da520ae0-3472-44e1-af12-5e1fcdfec000\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-pnngr" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.988588 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/89ca4c63-f97b-4a71-8b2e-613170fdde6b-secret-volume\") pod \"collect-profiles-29405400-6d4qp\" (UID: \"89ca4c63-f97b-4a71-8b2e-613170fdde6b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405400-6d4qp" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.988596 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/9fb065c7-1402-4294-a8f6-f1aa662ecbb0-service-ca\") pod \"console-f9d7485db-lvtzk\" (UID: \"9fb065c7-1402-4294-a8f6-f1aa662ecbb0\") " pod="openshift-console/console-f9d7485db-lvtzk" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.989081 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/da520ae0-3472-44e1-af12-5e1fcdfec000-auth-proxy-config\") pod \"machine-config-operator-74547568cd-pnngr\" (UID: \"da520ae0-3472-44e1-af12-5e1fcdfec000\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-pnngr" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.989449 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mbpr7\" (UniqueName: \"kubernetes.io/projected/8a281de3-cc12-4dd1-b9be-0ca03a0613ec-kube-api-access-mbpr7\") pod \"marketplace-operator-79b997595-xsjrl\" (UID: \"8a281de3-cc12-4dd1-b9be-0ca03a0613ec\") " pod="openshift-marketplace/marketplace-operator-79b997595-xsjrl" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.990370 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sf77v\" (UniqueName: \"kubernetes.io/projected/89ca4c63-f97b-4a71-8b2e-613170fdde6b-kube-api-access-sf77v\") pod \"collect-profiles-29405400-6d4qp\" (UID: \"89ca4c63-f97b-4a71-8b2e-613170fdde6b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405400-6d4qp" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.990473 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/89ca4c63-f97b-4a71-8b2e-613170fdde6b-config-volume\") pod \"collect-profiles-29405400-6d4qp\" (UID: \"89ca4c63-f97b-4a71-8b2e-613170fdde6b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405400-6d4qp" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.991260 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8vb6p\" (UniqueName: \"kubernetes.io/projected/9f72bc48-a623-4365-8dfb-bbf5b0179798-kube-api-access-8vb6p\") pod \"openshift-controller-manager-operator-756b6f6bc6-nm4jv\" (UID: \"9f72bc48-a623-4365-8dfb-bbf5b0179798\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-nm4jv" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.993418 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8b7z5\" (UID: \"6a71181f-49a4-4b69-a3e6-2413929b81dc\") " pod="openshift-image-registry/image-registry-697d97f7c8-8b7z5" Nov 28 10:00:03 crc kubenswrapper[4838]: E1128 10:00:03.993804 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 10:00:04.493791593 +0000 UTC m=+176.192765753 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8b7z5" (UID: "6a71181f-49a4-4b69-a3e6-2413929b81dc") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.994331 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/e3505c3b-461c-489c-8af1-117b3cbc433b-stats-auth\") pod \"router-default-5444994796-zmc4g\" (UID: \"e3505c3b-461c-489c-8af1-117b3cbc433b\") " pod="openshift-ingress/router-default-5444994796-zmc4g" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.994388 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5gkkv\" (UniqueName: \"kubernetes.io/projected/0ba0db9f-c351-49d0-a9c2-87b0edcfccff-kube-api-access-5gkkv\") pod \"multus-admission-controller-857f4d67dd-kmvqb\" (UID: \"0ba0db9f-c351-49d0-a9c2-87b0edcfccff\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-kmvqb" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.994448 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r2csw\" (UniqueName: \"kubernetes.io/projected/da520ae0-3472-44e1-af12-5e1fcdfec000-kube-api-access-r2csw\") pod \"machine-config-operator-74547568cd-pnngr\" (UID: \"da520ae0-3472-44e1-af12-5e1fcdfec000\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-pnngr" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.994895 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/152c1627-347b-483f-aa32-a65e67de0e5e-webhook-cert\") pod \"packageserver-d55dfcdfc-7l5mk\" (UID: \"152c1627-347b-483f-aa32-a65e67de0e5e\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-7l5mk" Nov 28 10:00:03 crc kubenswrapper[4838]: W1128 10:00:03.995069 4838 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfb0f7dc9_74c6_4031_8edb_7b10c219df34.slice/crio-81b6d5cc50a25ee8d58570a25a4f774a514d1d9344de6ab63ec377df15a1de2b WatchSource:0}: Error finding container 81b6d5cc50a25ee8d58570a25a4f774a514d1d9344de6ab63ec377df15a1de2b: Status 404 returned error can't find the container with id 81b6d5cc50a25ee8d58570a25a4f774a514d1d9344de6ab63ec377df15a1de2b Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.995778 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/bae52de6-77be-47d7-a5fe-8e22e5f24bf4-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-9mrm4\" (UID: \"bae52de6-77be-47d7-a5fe-8e22e5f24bf4\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-9mrm4" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.996390 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z8p4m\" (UniqueName: \"kubernetes.io/projected/4752b167-aa81-4844-98ab-07cc66b152bd-kube-api-access-z8p4m\") pod \"catalog-operator-68c6474976-ss9lp\" (UID: \"4752b167-aa81-4844-98ab-07cc66b152bd\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-ss9lp" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.996809 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/e3505c3b-461c-489c-8af1-117b3cbc433b-metrics-certs\") pod \"router-default-5444994796-zmc4g\" (UID: \"e3505c3b-461c-489c-8af1-117b3cbc433b\") " pod="openshift-ingress/router-default-5444994796-zmc4g" Nov 28 10:00:03 crc kubenswrapper[4838]: I1128 10:00:03.996944 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/e3505c3b-461c-489c-8af1-117b3cbc433b-default-certificate\") pod \"router-default-5444994796-zmc4g\" (UID: \"e3505c3b-461c-489c-8af1-117b3cbc433b\") " pod="openshift-ingress/router-default-5444994796-zmc4g" Nov 28 10:00:04 crc kubenswrapper[4838]: W1128 10:00:04.003137 4838 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod910afa09_7086_4695_bd2f_9397ad54ad4f.slice/crio-2319715b6efae21fe6059092a6280acac031f88b50a8fb976904988d367f1b20 WatchSource:0}: Error finding container 2319715b6efae21fe6059092a6280acac031f88b50a8fb976904988d367f1b20: Status 404 returned error can't find the container with id 2319715b6efae21fe6059092a6280acac031f88b50a8fb976904988d367f1b20 Nov 28 10:00:04 crc kubenswrapper[4838]: I1128 10:00:04.007282 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-xsjrl" Nov 28 10:00:04 crc kubenswrapper[4838]: I1128 10:00:04.010627 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kpnzx\" (UniqueName: \"kubernetes.io/projected/f16a2d31-72dd-4ec0-b516-a3d10b0b9aed-kube-api-access-kpnzx\") pod \"package-server-manager-789f6589d5-zn7tf\" (UID: \"f16a2d31-72dd-4ec0-b516-a3d10b0b9aed\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-zn7tf" Nov 28 10:00:04 crc kubenswrapper[4838]: I1128 10:00:04.040413 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t4qwl\" (UniqueName: \"kubernetes.io/projected/39d8a22f-d5ef-427b-aae0-0e8faad08f6e-kube-api-access-t4qwl\") pod \"dns-default-9pfms\" (UID: \"39d8a22f-d5ef-427b-aae0-0e8faad08f6e\") " pod="openshift-dns/dns-default-9pfms" Nov 28 10:00:04 crc kubenswrapper[4838]: I1128 10:00:04.053629 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wpldx\" (UniqueName: \"kubernetes.io/projected/e81eda87-1848-48b9-8a4d-6c1c184cb421-kube-api-access-wpldx\") pod \"machine-config-controller-84d6567774-x4rrn\" (UID: \"e81eda87-1848-48b9-8a4d-6c1c184cb421\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-x4rrn" Nov 28 10:00:04 crc kubenswrapper[4838]: I1128 10:00:04.057058 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-9pfms" Nov 28 10:00:04 crc kubenswrapper[4838]: I1128 10:00:04.068613 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g7fff\" (UniqueName: \"kubernetes.io/projected/00e915fe-78b7-4c8f-bc2b-1357c1beeee3-kube-api-access-g7fff\") pod \"service-ca-operator-777779d784-qlvp5\" (UID: \"00e915fe-78b7-4c8f-bc2b-1357c1beeee3\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-qlvp5" Nov 28 10:00:04 crc kubenswrapper[4838]: I1128 10:00:04.094220 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 10:00:04 crc kubenswrapper[4838]: E1128 10:00:04.094960 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 10:00:04.594932512 +0000 UTC m=+176.293906722 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 10:00:04 crc kubenswrapper[4838]: I1128 10:00:04.106977 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cbcfj\" (UniqueName: \"kubernetes.io/projected/a8af0096-480e-4c84-8da1-e6ff489addd3-kube-api-access-cbcfj\") pod \"machine-config-server-8q49c\" (UID: \"a8af0096-480e-4c84-8da1-e6ff489addd3\") " pod="openshift-machine-config-operator/machine-config-server-8q49c" Nov 28 10:00:04 crc kubenswrapper[4838]: I1128 10:00:04.125660 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tfc7g\" (UniqueName: \"kubernetes.io/projected/502acc2d-a5e3-4240-b2fb-7f67b7518b82-kube-api-access-tfc7g\") pod \"control-plane-machine-set-operator-78cbb6b69f-vrzwk\" (UID: \"502acc2d-a5e3-4240-b2fb-7f67b7518b82\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-vrzwk" Nov 28 10:00:04 crc kubenswrapper[4838]: I1128 10:00:04.135256 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qkpgr\" (UniqueName: \"kubernetes.io/projected/152c1627-347b-483f-aa32-a65e67de0e5e-kube-api-access-qkpgr\") pod \"packageserver-d55dfcdfc-7l5mk\" (UID: \"152c1627-347b-483f-aa32-a65e67de0e5e\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-7l5mk" Nov 28 10:00:04 crc kubenswrapper[4838]: I1128 10:00:04.143395 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-nm4jv" Nov 28 10:00:04 crc kubenswrapper[4838]: I1128 10:00:04.171454 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-msngm\" (UniqueName: \"kubernetes.io/projected/d556750d-84c7-4fdc-8080-33cf240704df-kube-api-access-msngm\") pod \"service-ca-9c57cc56f-k2xhp\" (UID: \"d556750d-84c7-4fdc-8080-33cf240704df\") " pod="openshift-service-ca/service-ca-9c57cc56f-k2xhp" Nov 28 10:00:04 crc kubenswrapper[4838]: I1128 10:00:04.195891 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8b7z5\" (UID: \"6a71181f-49a4-4b69-a3e6-2413929b81dc\") " pod="openshift-image-registry/image-registry-697d97f7c8-8b7z5" Nov 28 10:00:04 crc kubenswrapper[4838]: E1128 10:00:04.196170 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 10:00:04.696158313 +0000 UTC m=+176.395132483 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8b7z5" (UID: "6a71181f-49a4-4b69-a3e6-2413929b81dc") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 10:00:04 crc kubenswrapper[4838]: I1128 10:00:04.200087 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-lvtzk" Nov 28 10:00:04 crc kubenswrapper[4838]: I1128 10:00:04.200923 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fjhff\" (UniqueName: \"kubernetes.io/projected/16dead36-b11f-4dec-ad63-a25840480761-kube-api-access-fjhff\") pod \"ingress-canary-f4z4k\" (UID: \"16dead36-b11f-4dec-ad63-a25840480761\") " pod="openshift-ingress-canary/ingress-canary-f4z4k" Nov 28 10:00:04 crc kubenswrapper[4838]: I1128 10:00:04.214008 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zg84s\" (UniqueName: \"kubernetes.io/projected/02ae83a0-06cd-46f5-a5de-3127a57b8d5c-kube-api-access-zg84s\") pod \"csi-hostpathplugin-zj778\" (UID: \"02ae83a0-06cd-46f5-a5de-3127a57b8d5c\") " pod="hostpath-provisioner/csi-hostpathplugin-zj778" Nov 28 10:00:04 crc kubenswrapper[4838]: I1128 10:00:04.215255 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-9mrm4" Nov 28 10:00:04 crc kubenswrapper[4838]: I1128 10:00:04.240029 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-x4rrn" Nov 28 10:00:04 crc kubenswrapper[4838]: I1128 10:00:04.240267 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hzjvn\" (UniqueName: \"kubernetes.io/projected/9d2e0aff-7ecb-4126-add3-57c7a24e781c-kube-api-access-hzjvn\") pod \"migrator-59844c95c7-5xzrq\" (UID: \"9d2e0aff-7ecb-4126-add3-57c7a24e781c\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-5xzrq" Nov 28 10:00:04 crc kubenswrapper[4838]: I1128 10:00:04.245142 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-zn7tf" Nov 28 10:00:04 crc kubenswrapper[4838]: I1128 10:00:04.255099 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-vrzwk" Nov 28 10:00:04 crc kubenswrapper[4838]: I1128 10:00:04.257419 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/35b6ed6b-3088-4a12-8bd6-f8def1138e85-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-lng7h\" (UID: \"35b6ed6b-3088-4a12-8bd6-f8def1138e85\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-lng7h" Nov 28 10:00:04 crc kubenswrapper[4838]: I1128 10:00:04.261071 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-ss9lp" Nov 28 10:00:04 crc kubenswrapper[4838]: I1128 10:00:04.268477 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-pnngr" Nov 28 10:00:04 crc kubenswrapper[4838]: I1128 10:00:04.273153 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-kmvqb" Nov 28 10:00:04 crc kubenswrapper[4838]: I1128 10:00:04.275221 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gjlkq\" (UniqueName: \"kubernetes.io/projected/6316f735-c6d6-4b80-95be-c548707cd386-kube-api-access-gjlkq\") pod \"kube-storage-version-migrator-operator-b67b599dd-fh5z9\" (UID: \"6316f735-c6d6-4b80-95be-c548707cd386\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-fh5z9" Nov 28 10:00:04 crc kubenswrapper[4838]: I1128 10:00:04.282219 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-zmc4g" Nov 28 10:00:04 crc kubenswrapper[4838]: I1128 10:00:04.293117 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405400-6d4qp" Nov 28 10:00:04 crc kubenswrapper[4838]: I1128 10:00:04.293499 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-lng7h" Nov 28 10:00:04 crc kubenswrapper[4838]: I1128 10:00:04.296804 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 10:00:04 crc kubenswrapper[4838]: E1128 10:00:04.296992 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 10:00:04.796968937 +0000 UTC m=+176.495943107 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 10:00:04 crc kubenswrapper[4838]: I1128 10:00:04.297019 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8b7z5\" (UID: \"6a71181f-49a4-4b69-a3e6-2413929b81dc\") " pod="openshift-image-registry/image-registry-697d97f7c8-8b7z5" Nov 28 10:00:04 crc kubenswrapper[4838]: E1128 10:00:04.297366 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 10:00:04.797353463 +0000 UTC m=+176.496327633 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8b7z5" (UID: "6a71181f-49a4-4b69-a3e6-2413929b81dc") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 10:00:04 crc kubenswrapper[4838]: I1128 10:00:04.300800 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-k2xhp" Nov 28 10:00:04 crc kubenswrapper[4838]: I1128 10:00:04.313457 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-qlvp5" Nov 28 10:00:04 crc kubenswrapper[4838]: I1128 10:00:04.319508 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-f4z4k" Nov 28 10:00:04 crc kubenswrapper[4838]: I1128 10:00:04.328012 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-7l5mk" Nov 28 10:00:04 crc kubenswrapper[4838]: I1128 10:00:04.350132 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-zj778" Nov 28 10:00:04 crc kubenswrapper[4838]: I1128 10:00:04.362313 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-8q49c" Nov 28 10:00:04 crc kubenswrapper[4838]: I1128 10:00:04.397798 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 10:00:04 crc kubenswrapper[4838]: E1128 10:00:04.398135 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 10:00:04.898120545 +0000 UTC m=+176.597094715 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 10:00:04 crc kubenswrapper[4838]: I1128 10:00:04.499673 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8b7z5\" (UID: \"6a71181f-49a4-4b69-a3e6-2413929b81dc\") " pod="openshift-image-registry/image-registry-697d97f7c8-8b7z5" Nov 28 10:00:04 crc kubenswrapper[4838]: E1128 10:00:04.500113 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 10:00:05.000098107 +0000 UTC m=+176.699072277 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8b7z5" (UID: "6a71181f-49a4-4b69-a3e6-2413929b81dc") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 10:00:04 crc kubenswrapper[4838]: I1128 10:00:04.507272 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-fh5z9" Nov 28 10:00:04 crc kubenswrapper[4838]: I1128 10:00:04.522020 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-9pfms"] Nov 28 10:00:04 crc kubenswrapper[4838]: I1128 10:00:04.531142 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-5xzrq" Nov 28 10:00:04 crc kubenswrapper[4838]: I1128 10:00:04.600535 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 10:00:04 crc kubenswrapper[4838]: E1128 10:00:04.600897 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 10:00:05.100884 +0000 UTC m=+176.799858170 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 10:00:04 crc kubenswrapper[4838]: I1128 10:00:04.602000 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-7vsn9"] Nov 28 10:00:04 crc kubenswrapper[4838]: I1128 10:00:04.602029 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-lsj7g"] Nov 28 10:00:04 crc kubenswrapper[4838]: I1128 10:00:04.602040 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-nm4jv"] Nov 28 10:00:04 crc kubenswrapper[4838]: I1128 10:00:04.622870 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/downloads-7954f5f757-2w9k9" podStartSLOduration=135.622857843 podStartE2EDuration="2m15.622857843s" podCreationTimestamp="2025-11-28 09:57:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 10:00:04.622122353 +0000 UTC m=+176.321096523" watchObservedRunningTime="2025-11-28 10:00:04.622857843 +0000 UTC m=+176.321832013" Nov 28 10:00:04 crc kubenswrapper[4838]: W1128 10:00:04.641254 4838 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod39d8a22f_d5ef_427b_aae0_0e8faad08f6e.slice/crio-8044e14ade7da6050315069d64422de2f381a6ef176cb284ea3ea0b3df576f58 WatchSource:0}: Error finding container 8044e14ade7da6050315069d64422de2f381a6ef176cb284ea3ea0b3df576f58: Status 404 returned error can't find the container with id 8044e14ade7da6050315069d64422de2f381a6ef176cb284ea3ea0b3df576f58 Nov 28 10:00:04 crc kubenswrapper[4838]: I1128 10:00:04.686650 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-bs85m" event={"ID":"fb0f7dc9-74c6-4031-8edb-7b10c219df34","Type":"ContainerStarted","Data":"81b6d5cc50a25ee8d58570a25a4f774a514d1d9344de6ab63ec377df15a1de2b"} Nov 28 10:00:04 crc kubenswrapper[4838]: I1128 10:00:04.693878 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-w7htp" event={"ID":"4301958d-845f-449d-80a5-b81cd858368d","Type":"ContainerStarted","Data":"a3b60362ee85a58de2ea39e7fda111b1beee7005a2a6c2a9e1ca5005e258351c"} Nov 28 10:00:04 crc kubenswrapper[4838]: I1128 10:00:04.703602 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8b7z5\" (UID: \"6a71181f-49a4-4b69-a3e6-2413929b81dc\") " pod="openshift-image-registry/image-registry-697d97f7c8-8b7z5" Nov 28 10:00:04 crc kubenswrapper[4838]: E1128 10:00:04.704107 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 10:00:05.204073342 +0000 UTC m=+176.903047512 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8b7z5" (UID: "6a71181f-49a4-4b69-a3e6-2413929b81dc") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 10:00:04 crc kubenswrapper[4838]: I1128 10:00:04.705952 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-lsj7g" event={"ID":"d97b2299-c955-4314-8b0a-a952e7bb53da","Type":"ContainerStarted","Data":"18584b1156dfe91854a7a4bee0f4b340fa38b841f31cdd4c644aa201375632b6"} Nov 28 10:00:04 crc kubenswrapper[4838]: I1128 10:00:04.712154 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-zmc4g" event={"ID":"e3505c3b-461c-489c-8af1-117b3cbc433b","Type":"ContainerStarted","Data":"02ca3bba378cee1db37ccbd809dc1b8c09c4821e82d90ae5c2004a18abff796a"} Nov 28 10:00:04 crc kubenswrapper[4838]: I1128 10:00:04.715517 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-df5f6" event={"ID":"8d6cc687-8b13-44b1-a15b-488c17e8b50c","Type":"ContainerStarted","Data":"5f3ece3652b669403065c2b4a84281cd2e781b0954be5e4d96273be56c182770"} Nov 28 10:00:04 crc kubenswrapper[4838]: I1128 10:00:04.716741 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-bs82t"] Nov 28 10:00:04 crc kubenswrapper[4838]: I1128 10:00:04.722834 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-vn4kw" event={"ID":"488c1f95-482d-4a08-b83d-81b3f08090ab","Type":"ContainerStarted","Data":"b830811a615933cdad687ec9b67bbd471f0d640adfc4e3052c32417d0758b4ee"} Nov 28 10:00:04 crc kubenswrapper[4838]: I1128 10:00:04.729641 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-xsjrl"] Nov 28 10:00:04 crc kubenswrapper[4838]: I1128 10:00:04.734203 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-hrtmd" event={"ID":"910afa09-7086-4695-bd2f-9397ad54ad4f","Type":"ContainerStarted","Data":"2319715b6efae21fe6059092a6280acac031f88b50a8fb976904988d367f1b20"} Nov 28 10:00:04 crc kubenswrapper[4838]: I1128 10:00:04.741444 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-8q49c" event={"ID":"a8af0096-480e-4c84-8da1-e6ff489addd3","Type":"ContainerStarted","Data":"f21a6e96a0e0cd02597b683eef3c94e44337d0a74cba699041ad2419166631c4"} Nov 28 10:00:04 crc kubenswrapper[4838]: I1128 10:00:04.752217 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-7vsn9" event={"ID":"bfd87dd0-c7b3-4e3c-940c-0229718e3579","Type":"ContainerStarted","Data":"d69c24df1a54fb95176d65ae81a6012ead8c5d9cd82cc6d069d18118b17b9081"} Nov 28 10:00:04 crc kubenswrapper[4838]: I1128 10:00:04.763173 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-xrgtq" event={"ID":"74b81264-e855-4198-a063-9ef62eb9ad30","Type":"ContainerStarted","Data":"38a0d2caffe23df330b235ca815748b61341db940d0bc79b5880a5f4eca3d031"} Nov 28 10:00:04 crc kubenswrapper[4838]: I1128 10:00:04.769922 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-tkw4f" event={"ID":"43982f1d-a55d-4870-be0a-c08c63a8e841","Type":"ContainerStarted","Data":"3f3b74bc1d0587c5a11e29c6102e0210f85387908616aba3c66c7ce66e68f264"} Nov 28 10:00:04 crc kubenswrapper[4838]: I1128 10:00:04.770764 4838 patch_prober.go:28] interesting pod/downloads-7954f5f757-2w9k9 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.9:8080/\": dial tcp 10.217.0.9:8080: connect: connection refused" start-of-body= Nov 28 10:00:04 crc kubenswrapper[4838]: I1128 10:00:04.770822 4838 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-2w9k9" podUID="ebabbe26-3c09-4a23-8dcc-aed864a3e4a4" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.9:8080/\": dial tcp 10.217.0.9:8080: connect: connection refused" Nov 28 10:00:04 crc kubenswrapper[4838]: I1128 10:00:04.807894 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 10:00:04 crc kubenswrapper[4838]: E1128 10:00:04.808589 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 10:00:05.308555035 +0000 UTC m=+177.007529205 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 10:00:04 crc kubenswrapper[4838]: I1128 10:00:04.921160 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8b7z5\" (UID: \"6a71181f-49a4-4b69-a3e6-2413929b81dc\") " pod="openshift-image-registry/image-registry-697d97f7c8-8b7z5" Nov 28 10:00:04 crc kubenswrapper[4838]: E1128 10:00:04.922066 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 10:00:05.422048785 +0000 UTC m=+177.121022955 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8b7z5" (UID: "6a71181f-49a4-4b69-a3e6-2413929b81dc") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 10:00:04 crc kubenswrapper[4838]: I1128 10:00:04.969283 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-x4rrn"] Nov 28 10:00:04 crc kubenswrapper[4838]: I1128 10:00:04.997012 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-lvtzk"] Nov 28 10:00:05 crc kubenswrapper[4838]: I1128 10:00:05.024042 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 10:00:05 crc kubenswrapper[4838]: E1128 10:00:05.024407 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 10:00:05.524393851 +0000 UTC m=+177.223368021 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 10:00:05 crc kubenswrapper[4838]: I1128 10:00:05.124615 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-kmvqb"] Nov 28 10:00:05 crc kubenswrapper[4838]: I1128 10:00:05.125391 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8b7z5\" (UID: \"6a71181f-49a4-4b69-a3e6-2413929b81dc\") " pod="openshift-image-registry/image-registry-697d97f7c8-8b7z5" Nov 28 10:00:05 crc kubenswrapper[4838]: E1128 10:00:05.125686 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 10:00:05.625676664 +0000 UTC m=+177.324650834 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8b7z5" (UID: "6a71181f-49a4-4b69-a3e6-2413929b81dc") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 10:00:05 crc kubenswrapper[4838]: I1128 10:00:05.159882 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-pnngr"] Nov 28 10:00:05 crc kubenswrapper[4838]: I1128 10:00:05.164504 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-zn7tf"] Nov 28 10:00:05 crc kubenswrapper[4838]: I1128 10:00:05.166285 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-9mrm4"] Nov 28 10:00:05 crc kubenswrapper[4838]: I1128 10:00:05.226149 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 10:00:05 crc kubenswrapper[4838]: E1128 10:00:05.226529 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 10:00:05.72650993 +0000 UTC m=+177.425484100 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 10:00:05 crc kubenswrapper[4838]: I1128 10:00:05.295961 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405400-6d4qp"] Nov 28 10:00:05 crc kubenswrapper[4838]: I1128 10:00:05.301797 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-vrzwk"] Nov 28 10:00:05 crc kubenswrapper[4838]: I1128 10:00:05.301837 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-7l5mk"] Nov 28 10:00:05 crc kubenswrapper[4838]: I1128 10:00:05.303298 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-ss9lp"] Nov 28 10:00:05 crc kubenswrapper[4838]: I1128 10:00:05.328809 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8b7z5\" (UID: \"6a71181f-49a4-4b69-a3e6-2413929b81dc\") " pod="openshift-image-registry/image-registry-697d97f7c8-8b7z5" Nov 28 10:00:05 crc kubenswrapper[4838]: E1128 10:00:05.329282 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 10:00:05.829262083 +0000 UTC m=+177.528236323 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8b7z5" (UID: "6a71181f-49a4-4b69-a3e6-2413929b81dc") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 10:00:05 crc kubenswrapper[4838]: I1128 10:00:05.430430 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 10:00:05 crc kubenswrapper[4838]: E1128 10:00:05.430573 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 10:00:05.930549277 +0000 UTC m=+177.629523447 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 10:00:05 crc kubenswrapper[4838]: I1128 10:00:05.431053 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8b7z5\" (UID: \"6a71181f-49a4-4b69-a3e6-2413929b81dc\") " pod="openshift-image-registry/image-registry-697d97f7c8-8b7z5" Nov 28 10:00:05 crc kubenswrapper[4838]: E1128 10:00:05.431404 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 10:00:05.931393132 +0000 UTC m=+177.630367382 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8b7z5" (UID: "6a71181f-49a4-4b69-a3e6-2413929b81dc") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 10:00:05 crc kubenswrapper[4838]: I1128 10:00:05.456786 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-k2xhp"] Nov 28 10:00:05 crc kubenswrapper[4838]: I1128 10:00:05.462090 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-fh5z9"] Nov 28 10:00:05 crc kubenswrapper[4838]: I1128 10:00:05.466402 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-qlvp5"] Nov 28 10:00:05 crc kubenswrapper[4838]: I1128 10:00:05.472846 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-f4z4k"] Nov 28 10:00:05 crc kubenswrapper[4838]: I1128 10:00:05.474625 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-5xzrq"] Nov 28 10:00:05 crc kubenswrapper[4838]: W1128 10:00:05.480108 4838 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6316f735_c6d6_4b80_95be_c548707cd386.slice/crio-2f9bb95fe83c7e391d65d6ed12356434ec4652af1b6682176fbed74db2e4c1dc WatchSource:0}: Error finding container 2f9bb95fe83c7e391d65d6ed12356434ec4652af1b6682176fbed74db2e4c1dc: Status 404 returned error can't find the container with id 2f9bb95fe83c7e391d65d6ed12356434ec4652af1b6682176fbed74db2e4c1dc Nov 28 10:00:05 crc kubenswrapper[4838]: W1128 10:00:05.489878 4838 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod00e915fe_78b7_4c8f_bc2b_1357c1beeee3.slice/crio-98717b7edba983d7bc511a73a23f6e2cca6b52c533d4ddb1028ae998c6b6031c WatchSource:0}: Error finding container 98717b7edba983d7bc511a73a23f6e2cca6b52c533d4ddb1028ae998c6b6031c: Status 404 returned error can't find the container with id 98717b7edba983d7bc511a73a23f6e2cca6b52c533d4ddb1028ae998c6b6031c Nov 28 10:00:05 crc kubenswrapper[4838]: W1128 10:00:05.492106 4838 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd556750d_84c7_4fdc_8080_33cf240704df.slice/crio-4b20574137c797513b7c68d724debb74a7b9311ef6cb09523af4a3c7ba0aa1a4 WatchSource:0}: Error finding container 4b20574137c797513b7c68d724debb74a7b9311ef6cb09523af4a3c7ba0aa1a4: Status 404 returned error can't find the container with id 4b20574137c797513b7c68d724debb74a7b9311ef6cb09523af4a3c7ba0aa1a4 Nov 28 10:00:05 crc kubenswrapper[4838]: I1128 10:00:05.495570 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-zj778"] Nov 28 10:00:05 crc kubenswrapper[4838]: I1128 10:00:05.501544 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-lng7h"] Nov 28 10:00:05 crc kubenswrapper[4838]: W1128 10:00:05.521689 4838 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod02ae83a0_06cd_46f5_a5de_3127a57b8d5c.slice/crio-1f4d9fb170d2be5cda222396afc40c2ab124a86d72347c8da2cc0effa8d58de5 WatchSource:0}: Error finding container 1f4d9fb170d2be5cda222396afc40c2ab124a86d72347c8da2cc0effa8d58de5: Status 404 returned error can't find the container with id 1f4d9fb170d2be5cda222396afc40c2ab124a86d72347c8da2cc0effa8d58de5 Nov 28 10:00:05 crc kubenswrapper[4838]: I1128 10:00:05.534950 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 10:00:05 crc kubenswrapper[4838]: E1128 10:00:05.535039 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 10:00:06.035024631 +0000 UTC m=+177.733998801 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 10:00:05 crc kubenswrapper[4838]: I1128 10:00:05.535229 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8b7z5\" (UID: \"6a71181f-49a4-4b69-a3e6-2413929b81dc\") " pod="openshift-image-registry/image-registry-697d97f7c8-8b7z5" Nov 28 10:00:05 crc kubenswrapper[4838]: E1128 10:00:05.535513 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 10:00:06.0355054 +0000 UTC m=+177.734479570 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8b7z5" (UID: "6a71181f-49a4-4b69-a3e6-2413929b81dc") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 10:00:05 crc kubenswrapper[4838]: I1128 10:00:05.636230 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 10:00:05 crc kubenswrapper[4838]: E1128 10:00:05.636666 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 10:00:06.136651518 +0000 UTC m=+177.835625688 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 10:00:05 crc kubenswrapper[4838]: I1128 10:00:05.737845 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8b7z5\" (UID: \"6a71181f-49a4-4b69-a3e6-2413929b81dc\") " pod="openshift-image-registry/image-registry-697d97f7c8-8b7z5" Nov 28 10:00:05 crc kubenswrapper[4838]: E1128 10:00:05.738345 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 10:00:06.238333938 +0000 UTC m=+177.937308108 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8b7z5" (UID: "6a71181f-49a4-4b69-a3e6-2413929b81dc") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 10:00:05 crc kubenswrapper[4838]: I1128 10:00:05.776143 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-x4zn6" event={"ID":"de1e3837-d0de-4dcf-9c04-621f91ab3f52","Type":"ContainerStarted","Data":"19df9f5eff4398fa6d124e666de8bb92c863408e970bb0cd131982bacf582c0d"} Nov 28 10:00:05 crc kubenswrapper[4838]: I1128 10:00:05.776872 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-x4rrn" event={"ID":"e81eda87-1848-48b9-8a4d-6c1c184cb421","Type":"ContainerStarted","Data":"93430e6471d9b58677ca7ff8eefbaf3ff13761e62c0c228537d2cb96afb00ed4"} Nov 28 10:00:05 crc kubenswrapper[4838]: I1128 10:00:05.783209 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-zj778" event={"ID":"02ae83a0-06cd-46f5-a5de-3127a57b8d5c","Type":"ContainerStarted","Data":"1f4d9fb170d2be5cda222396afc40c2ab124a86d72347c8da2cc0effa8d58de5"} Nov 28 10:00:05 crc kubenswrapper[4838]: I1128 10:00:05.788090 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405400-6d4qp" event={"ID":"89ca4c63-f97b-4a71-8b2e-613170fdde6b","Type":"ContainerStarted","Data":"230976085bb66f3c5c6b9f39803c7d36885415f9627fb9d5e8734e8fe4b2a0f1"} Nov 28 10:00:05 crc kubenswrapper[4838]: I1128 10:00:05.790521 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-nm4jv" event={"ID":"9f72bc48-a623-4365-8dfb-bbf5b0179798","Type":"ContainerStarted","Data":"b72d7a145e487b6a5673b11ae7b5d7feb6beefd117d242c90d5a17bbd5c16786"} Nov 28 10:00:05 crc kubenswrapper[4838]: I1128 10:00:05.809815 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-wpfps" event={"ID":"01a7faa9-fda8-4f56-b472-e9165b66dab9","Type":"ContainerStarted","Data":"823ac1bde9c327c4521717d131bb21ded9ea987b396d831235c72d7ef8ca0618"} Nov 28 10:00:05 crc kubenswrapper[4838]: I1128 10:00:05.812134 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-w7htp" event={"ID":"4301958d-845f-449d-80a5-b81cd858368d","Type":"ContainerStarted","Data":"857614da2d59bfeae4c33a35e5033bfa60c8b68bdbe053f003c5753bd4b9f10c"} Nov 28 10:00:05 crc kubenswrapper[4838]: I1128 10:00:05.815206 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-xsjrl" event={"ID":"8a281de3-cc12-4dd1-b9be-0ca03a0613ec","Type":"ContainerStarted","Data":"e751d1b872dd99bf093972c4be19f6624fdafa3eddd0cc7b4bb4b247dcad03fe"} Nov 28 10:00:05 crc kubenswrapper[4838]: I1128 10:00:05.817024 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-kmvqb" event={"ID":"0ba0db9f-c351-49d0-a9c2-87b0edcfccff","Type":"ContainerStarted","Data":"4595a3652836c08052a1cd508acf121b840ce410b611580995da72766a4817a5"} Nov 28 10:00:05 crc kubenswrapper[4838]: I1128 10:00:05.838683 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 10:00:05 crc kubenswrapper[4838]: I1128 10:00:05.838761 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-25qvk" event={"ID":"4831ba67-ae8f-40fc-9d30-1c4ce8e11f8a","Type":"ContainerStarted","Data":"ee5fdbc1754122d8fd5d1246cc837826b19e8fc77309caf2b6828e5e5e252194"} Nov 28 10:00:05 crc kubenswrapper[4838]: E1128 10:00:05.838905 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 10:00:06.338884622 +0000 UTC m=+178.037858792 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 10:00:05 crc kubenswrapper[4838]: I1128 10:00:05.839034 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8b7z5\" (UID: \"6a71181f-49a4-4b69-a3e6-2413929b81dc\") " pod="openshift-image-registry/image-registry-697d97f7c8-8b7z5" Nov 28 10:00:05 crc kubenswrapper[4838]: E1128 10:00:05.839323 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 10:00:06.339309449 +0000 UTC m=+178.038283629 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8b7z5" (UID: "6a71181f-49a4-4b69-a3e6-2413929b81dc") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 10:00:05 crc kubenswrapper[4838]: I1128 10:00:05.840300 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-f4z4k" event={"ID":"16dead36-b11f-4dec-ad63-a25840480761","Type":"ContainerStarted","Data":"4d39e1650d140133efe478674236f3683f30ff4c06787f49a8e3690f0370c038"} Nov 28 10:00:05 crc kubenswrapper[4838]: I1128 10:00:05.853459 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-75j6q" event={"ID":"cc483880-bf40-4f4c-bf77-52eb4896bd5b","Type":"ContainerStarted","Data":"68fad22b01142c00a010c036fae40d56e97966c40e06f59ff5f3ac350944c7a6"} Nov 28 10:00:05 crc kubenswrapper[4838]: I1128 10:00:05.863550 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-9mrm4" event={"ID":"bae52de6-77be-47d7-a5fe-8e22e5f24bf4","Type":"ContainerStarted","Data":"abf6d4f7928723cfd7896ce1a9d10337376284fa6e28ec6d23bed6ecc6c2386a"} Nov 28 10:00:05 crc kubenswrapper[4838]: I1128 10:00:05.875397 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-9pfms" event={"ID":"39d8a22f-d5ef-427b-aae0-0e8faad08f6e","Type":"ContainerStarted","Data":"8044e14ade7da6050315069d64422de2f381a6ef176cb284ea3ea0b3df576f58"} Nov 28 10:00:05 crc kubenswrapper[4838]: I1128 10:00:05.887081 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-5xzrq" event={"ID":"9d2e0aff-7ecb-4126-add3-57c7a24e781c","Type":"ContainerStarted","Data":"120b0d4d4c4a83f0f306ded99584ab4325e0c4533b0cb41964275acbf44e46c9"} Nov 28 10:00:05 crc kubenswrapper[4838]: I1128 10:00:05.911493 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-pnngr" event={"ID":"da520ae0-3472-44e1-af12-5e1fcdfec000","Type":"ContainerStarted","Data":"5b3f27e1d3888dadbb726a3f4a19f31f02ea02172d032dac84bd12dc4cb2605c"} Nov 28 10:00:05 crc kubenswrapper[4838]: I1128 10:00:05.927337 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-lng7h" event={"ID":"35b6ed6b-3088-4a12-8bd6-f8def1138e85","Type":"ContainerStarted","Data":"1d2f47ae43f2a783ad65d96ecc173b52c06f7a8b007f28732cc497fe56fc7634"} Nov 28 10:00:05 crc kubenswrapper[4838]: I1128 10:00:05.929798 4838 generic.go:334] "Generic (PLEG): container finished" podID="69a82003-94b3-4aaa-9904-8485cfa5f662" containerID="d2d4ae00ad6d0c16c04903f145a2b19d6069273594f4d7496c1c8f51bfd40fac" exitCode=0 Nov 28 10:00:05 crc kubenswrapper[4838]: I1128 10:00:05.929873 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-jvhxj" event={"ID":"69a82003-94b3-4aaa-9904-8485cfa5f662","Type":"ContainerDied","Data":"d2d4ae00ad6d0c16c04903f145a2b19d6069273594f4d7496c1c8f51bfd40fac"} Nov 28 10:00:05 crc kubenswrapper[4838]: I1128 10:00:05.934092 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-ss9lp" event={"ID":"4752b167-aa81-4844-98ab-07cc66b152bd","Type":"ContainerStarted","Data":"1652ea0bf5048cc3d4a6ef119fdc284b8beffd40313f01267fd94335bf810338"} Nov 28 10:00:05 crc kubenswrapper[4838]: I1128 10:00:05.937381 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-465nq" event={"ID":"6524e1a4-6ca0-4e18-8be8-f7cd56757453","Type":"ContainerStarted","Data":"723730b66164245cccc8f13f6c6a5e88c764cba0f21423f7ac9ab4403cd79f80"} Nov 28 10:00:05 crc kubenswrapper[4838]: I1128 10:00:05.939449 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 10:00:05 crc kubenswrapper[4838]: E1128 10:00:05.939729 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 10:00:06.439699987 +0000 UTC m=+178.138674157 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 10:00:05 crc kubenswrapper[4838]: I1128 10:00:05.941498 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-qlvp5" event={"ID":"00e915fe-78b7-4c8f-bc2b-1357c1beeee3","Type":"ContainerStarted","Data":"98717b7edba983d7bc511a73a23f6e2cca6b52c533d4ddb1028ae998c6b6031c"} Nov 28 10:00:05 crc kubenswrapper[4838]: I1128 10:00:05.943784 4838 generic.go:334] "Generic (PLEG): container finished" podID="fdac71d5-8b14-4c59-9d37-345456b26b36" containerID="da2b8bccf72a5c489832f576bedea88feaf00dfdf90bbaf0abcb45468a6dbb99" exitCode=0 Nov 28 10:00:05 crc kubenswrapper[4838]: I1128 10:00:05.943818 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-z695f" event={"ID":"fdac71d5-8b14-4c59-9d37-345456b26b36","Type":"ContainerDied","Data":"da2b8bccf72a5c489832f576bedea88feaf00dfdf90bbaf0abcb45468a6dbb99"} Nov 28 10:00:05 crc kubenswrapper[4838]: I1128 10:00:05.960499 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-lvtzk" event={"ID":"9fb065c7-1402-4294-a8f6-f1aa662ecbb0","Type":"ContainerStarted","Data":"33bc225c6b26822596184d2d688100df66a5d4009c39bb68b8560cf61229a6af"} Nov 28 10:00:05 crc kubenswrapper[4838]: I1128 10:00:05.961908 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-zn7tf" event={"ID":"f16a2d31-72dd-4ec0-b516-a3d10b0b9aed","Type":"ContainerStarted","Data":"9fdb59a69d93a761f686e4ad60f238c461f0638b108e0c97072e83822188dbb1"} Nov 28 10:00:05 crc kubenswrapper[4838]: I1128 10:00:05.963228 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-bs82t" event={"ID":"d91a5fa7-8c8e-455a-a237-e0ec0baa2197","Type":"ContainerStarted","Data":"1615811e899723fe5634a392a5a175aab29ceefec18c611546b3dae579a2931f"} Nov 28 10:00:05 crc kubenswrapper[4838]: I1128 10:00:05.964539 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-fh5z9" event={"ID":"6316f735-c6d6-4b80-95be-c548707cd386","Type":"ContainerStarted","Data":"2f9bb95fe83c7e391d65d6ed12356434ec4652af1b6682176fbed74db2e4c1dc"} Nov 28 10:00:05 crc kubenswrapper[4838]: I1128 10:00:05.965924 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-qfpq5" event={"ID":"8c869fc5-3a3b-41e2-8eac-4dc5835be740","Type":"ContainerStarted","Data":"33b1580a5214b3edf427bbf694020ff0a1546046f78fd654e292b4face4ec619"} Nov 28 10:00:05 crc kubenswrapper[4838]: I1128 10:00:05.968212 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-k2xhp" event={"ID":"d556750d-84c7-4fdc-8080-33cf240704df","Type":"ContainerStarted","Data":"4b20574137c797513b7c68d724debb74a7b9311ef6cb09523af4a3c7ba0aa1a4"} Nov 28 10:00:05 crc kubenswrapper[4838]: I1128 10:00:05.970507 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-7l5mk" event={"ID":"152c1627-347b-483f-aa32-a65e67de0e5e","Type":"ContainerStarted","Data":"20ae2f57fbfca061e8b8f65b7017a456640cac8d7d7899009d2aed03be78b40b"} Nov 28 10:00:05 crc kubenswrapper[4838]: I1128 10:00:05.971786 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-vrzwk" event={"ID":"502acc2d-a5e3-4240-b2fb-7f67b7518b82","Type":"ContainerStarted","Data":"7ba0667f6555b9bf3d9751a4b46757efe2e66f90a85264e8920f12bf20f05f2b"} Nov 28 10:00:05 crc kubenswrapper[4838]: I1128 10:00:05.972070 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console-operator/console-operator-58897d9998-vn4kw" Nov 28 10:00:05 crc kubenswrapper[4838]: I1128 10:00:05.972336 4838 patch_prober.go:28] interesting pod/downloads-7954f5f757-2w9k9 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.9:8080/\": dial tcp 10.217.0.9:8080: connect: connection refused" start-of-body= Nov 28 10:00:05 crc kubenswrapper[4838]: I1128 10:00:05.972373 4838 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-2w9k9" podUID="ebabbe26-3c09-4a23-8dcc-aed864a3e4a4" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.9:8080/\": dial tcp 10.217.0.9:8080: connect: connection refused" Nov 28 10:00:05 crc kubenswrapper[4838]: I1128 10:00:05.989331 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console-operator/console-operator-58897d9998-vn4kw" podStartSLOduration=136.989309282 podStartE2EDuration="2m16.989309282s" podCreationTimestamp="2025-11-28 09:57:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 10:00:05.988505388 +0000 UTC m=+177.687479558" watchObservedRunningTime="2025-11-28 10:00:05.989309282 +0000 UTC m=+177.688283452" Nov 28 10:00:06 crc kubenswrapper[4838]: I1128 10:00:06.040540 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8b7z5\" (UID: \"6a71181f-49a4-4b69-a3e6-2413929b81dc\") " pod="openshift-image-registry/image-registry-697d97f7c8-8b7z5" Nov 28 10:00:06 crc kubenswrapper[4838]: E1128 10:00:06.061833 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 10:00:06.561817227 +0000 UTC m=+178.260791397 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8b7z5" (UID: "6a71181f-49a4-4b69-a3e6-2413929b81dc") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 10:00:06 crc kubenswrapper[4838]: I1128 10:00:06.141415 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 10:00:06 crc kubenswrapper[4838]: E1128 10:00:06.141652 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 10:00:06.641629788 +0000 UTC m=+178.340603958 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 10:00:06 crc kubenswrapper[4838]: I1128 10:00:06.141708 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8b7z5\" (UID: \"6a71181f-49a4-4b69-a3e6-2413929b81dc\") " pod="openshift-image-registry/image-registry-697d97f7c8-8b7z5" Nov 28 10:00:06 crc kubenswrapper[4838]: E1128 10:00:06.142116 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 10:00:06.642101827 +0000 UTC m=+178.341075997 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8b7z5" (UID: "6a71181f-49a4-4b69-a3e6-2413929b81dc") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 10:00:06 crc kubenswrapper[4838]: I1128 10:00:06.242514 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 10:00:06 crc kubenswrapper[4838]: E1128 10:00:06.242899 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 10:00:06.742751065 +0000 UTC m=+178.441725235 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 10:00:06 crc kubenswrapper[4838]: I1128 10:00:06.243164 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8b7z5\" (UID: \"6a71181f-49a4-4b69-a3e6-2413929b81dc\") " pod="openshift-image-registry/image-registry-697d97f7c8-8b7z5" Nov 28 10:00:06 crc kubenswrapper[4838]: E1128 10:00:06.243464 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 10:00:06.743452683 +0000 UTC m=+178.442426853 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8b7z5" (UID: "6a71181f-49a4-4b69-a3e6-2413929b81dc") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 10:00:06 crc kubenswrapper[4838]: I1128 10:00:06.344569 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 10:00:06 crc kubenswrapper[4838]: E1128 10:00:06.345621 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 10:00:06.845600882 +0000 UTC m=+178.544575052 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 10:00:06 crc kubenswrapper[4838]: I1128 10:00:06.447602 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8b7z5\" (UID: \"6a71181f-49a4-4b69-a3e6-2413929b81dc\") " pod="openshift-image-registry/image-registry-697d97f7c8-8b7z5" Nov 28 10:00:06 crc kubenswrapper[4838]: E1128 10:00:06.448123 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 10:00:06.948100276 +0000 UTC m=+178.647074476 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8b7z5" (UID: "6a71181f-49a4-4b69-a3e6-2413929b81dc") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 10:00:06 crc kubenswrapper[4838]: I1128 10:00:06.549328 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 10:00:06 crc kubenswrapper[4838]: E1128 10:00:06.549829 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 10:00:07.049802806 +0000 UTC m=+178.748777016 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 10:00:06 crc kubenswrapper[4838]: I1128 10:00:06.651621 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8b7z5\" (UID: \"6a71181f-49a4-4b69-a3e6-2413929b81dc\") " pod="openshift-image-registry/image-registry-697d97f7c8-8b7z5" Nov 28 10:00:06 crc kubenswrapper[4838]: E1128 10:00:06.652127 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 10:00:07.152101691 +0000 UTC m=+178.851075901 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8b7z5" (UID: "6a71181f-49a4-4b69-a3e6-2413929b81dc") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 10:00:06 crc kubenswrapper[4838]: I1128 10:00:06.753479 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 10:00:06 crc kubenswrapper[4838]: E1128 10:00:06.754037 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 10:00:07.25401234 +0000 UTC m=+178.952986540 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 10:00:06 crc kubenswrapper[4838]: I1128 10:00:06.855983 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8b7z5\" (UID: \"6a71181f-49a4-4b69-a3e6-2413929b81dc\") " pod="openshift-image-registry/image-registry-697d97f7c8-8b7z5" Nov 28 10:00:06 crc kubenswrapper[4838]: E1128 10:00:06.856523 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 10:00:07.356502502 +0000 UTC m=+179.055476712 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8b7z5" (UID: "6a71181f-49a4-4b69-a3e6-2413929b81dc") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 10:00:06 crc kubenswrapper[4838]: I1128 10:00:06.957983 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 10:00:06 crc kubenswrapper[4838]: E1128 10:00:06.958226 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 10:00:07.458186563 +0000 UTC m=+179.157160773 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 10:00:06 crc kubenswrapper[4838]: I1128 10:00:06.958470 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8b7z5\" (UID: \"6a71181f-49a4-4b69-a3e6-2413929b81dc\") " pod="openshift-image-registry/image-registry-697d97f7c8-8b7z5" Nov 28 10:00:06 crc kubenswrapper[4838]: E1128 10:00:06.959410 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 10:00:07.459384461 +0000 UTC m=+179.158358671 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8b7z5" (UID: "6a71181f-49a4-4b69-a3e6-2413929b81dc") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 10:00:06 crc kubenswrapper[4838]: I1128 10:00:06.972118 4838 patch_prober.go:28] interesting pod/console-operator-58897d9998-vn4kw container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.7:8443/readyz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 28 10:00:06 crc kubenswrapper[4838]: I1128 10:00:06.972209 4838 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-58897d9998-vn4kw" podUID="488c1f95-482d-4a08-b83d-81b3f08090ab" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.7:8443/readyz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Nov 28 10:00:06 crc kubenswrapper[4838]: I1128 10:00:06.980083 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-hrtmd" event={"ID":"910afa09-7086-4695-bd2f-9397ad54ad4f","Type":"ContainerStarted","Data":"504fec1a4046d1107b0fffb84341cc1c2d3e16fa7244bc4c0f64010416aa7639"} Nov 28 10:00:07 crc kubenswrapper[4838]: I1128 10:00:07.060077 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 10:00:07 crc kubenswrapper[4838]: E1128 10:00:07.061402 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 10:00:07.561362973 +0000 UTC m=+179.260337193 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 10:00:07 crc kubenswrapper[4838]: I1128 10:00:07.092365 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console-operator/console-operator-58897d9998-vn4kw" Nov 28 10:00:07 crc kubenswrapper[4838]: I1128 10:00:07.166306 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8b7z5\" (UID: \"6a71181f-49a4-4b69-a3e6-2413929b81dc\") " pod="openshift-image-registry/image-registry-697d97f7c8-8b7z5" Nov 28 10:00:07 crc kubenswrapper[4838]: E1128 10:00:07.166975 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 10:00:07.666962632 +0000 UTC m=+179.365936802 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8b7z5" (UID: "6a71181f-49a4-4b69-a3e6-2413929b81dc") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 10:00:07 crc kubenswrapper[4838]: I1128 10:00:07.267122 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 10:00:07 crc kubenswrapper[4838]: E1128 10:00:07.267510 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 10:00:07.767494695 +0000 UTC m=+179.466468865 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 10:00:07 crc kubenswrapper[4838]: I1128 10:00:07.369670 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8b7z5\" (UID: \"6a71181f-49a4-4b69-a3e6-2413929b81dc\") " pod="openshift-image-registry/image-registry-697d97f7c8-8b7z5" Nov 28 10:00:07 crc kubenswrapper[4838]: E1128 10:00:07.371353 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 10:00:07.871341213 +0000 UTC m=+179.570315383 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8b7z5" (UID: "6a71181f-49a4-4b69-a3e6-2413929b81dc") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 10:00:07 crc kubenswrapper[4838]: I1128 10:00:07.473504 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 10:00:07 crc kubenswrapper[4838]: E1128 10:00:07.473895 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 10:00:07.973878798 +0000 UTC m=+179.672852968 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 10:00:07 crc kubenswrapper[4838]: I1128 10:00:07.575137 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8b7z5\" (UID: \"6a71181f-49a4-4b69-a3e6-2413929b81dc\") " pod="openshift-image-registry/image-registry-697d97f7c8-8b7z5" Nov 28 10:00:07 crc kubenswrapper[4838]: E1128 10:00:07.575897 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 10:00:08.07588166 +0000 UTC m=+179.774855830 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8b7z5" (UID: "6a71181f-49a4-4b69-a3e6-2413929b81dc") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 10:00:07 crc kubenswrapper[4838]: I1128 10:00:07.683926 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 10:00:07 crc kubenswrapper[4838]: E1128 10:00:07.684380 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 10:00:08.184364307 +0000 UTC m=+179.883338467 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 10:00:07 crc kubenswrapper[4838]: I1128 10:00:07.790551 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8b7z5\" (UID: \"6a71181f-49a4-4b69-a3e6-2413929b81dc\") " pod="openshift-image-registry/image-registry-697d97f7c8-8b7z5" Nov 28 10:00:07 crc kubenswrapper[4838]: E1128 10:00:07.790883 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 10:00:08.290871192 +0000 UTC m=+179.989845362 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8b7z5" (UID: "6a71181f-49a4-4b69-a3e6-2413929b81dc") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 10:00:07 crc kubenswrapper[4838]: I1128 10:00:07.892045 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 10:00:07 crc kubenswrapper[4838]: E1128 10:00:07.892487 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 10:00:08.392466298 +0000 UTC m=+180.091440468 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 10:00:07 crc kubenswrapper[4838]: I1128 10:00:07.993099 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8b7z5\" (UID: \"6a71181f-49a4-4b69-a3e6-2413929b81dc\") " pod="openshift-image-registry/image-registry-697d97f7c8-8b7z5" Nov 28 10:00:07 crc kubenswrapper[4838]: E1128 10:00:07.993657 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 10:00:08.493642678 +0000 UTC m=+180.192616838 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8b7z5" (UID: "6a71181f-49a4-4b69-a3e6-2413929b81dc") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 10:00:08 crc kubenswrapper[4838]: I1128 10:00:08.026757 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-zmc4g" event={"ID":"e3505c3b-461c-489c-8af1-117b3cbc433b","Type":"ContainerStarted","Data":"2b656b1aaa4fb6640b7f7f652540bc8e1cbceda228682b38b8b607af3f127d03"} Nov 28 10:00:08 crc kubenswrapper[4838]: I1128 10:00:08.033248 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-vrzwk" event={"ID":"502acc2d-a5e3-4240-b2fb-7f67b7518b82","Type":"ContainerStarted","Data":"9b2a3bc55e3dceb197586b9ff14023edaa5d06b96ebdbdb434a9c851e6a7c878"} Nov 28 10:00:08 crc kubenswrapper[4838]: I1128 10:00:08.040472 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-lvtzk" event={"ID":"9fb065c7-1402-4294-a8f6-f1aa662ecbb0","Type":"ContainerStarted","Data":"9987b620f50ad69f71453bb0a5dcde6871fb95839426a7dff767e8460a8c56e9"} Nov 28 10:00:08 crc kubenswrapper[4838]: I1128 10:00:08.063351 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-lsj7g" event={"ID":"d97b2299-c955-4314-8b0a-a952e7bb53da","Type":"ContainerStarted","Data":"d531e69909f5041f57d389d91c4ffba2123a3d15a74ac5d5c9db02da8154dd9e"} Nov 28 10:00:08 crc kubenswrapper[4838]: I1128 10:00:08.069530 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress/router-default-5444994796-zmc4g" podStartSLOduration=139.069516649 podStartE2EDuration="2m19.069516649s" podCreationTimestamp="2025-11-28 09:57:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 10:00:08.068997248 +0000 UTC m=+179.767971418" watchObservedRunningTime="2025-11-28 10:00:08.069516649 +0000 UTC m=+179.768490819" Nov 28 10:00:08 crc kubenswrapper[4838]: I1128 10:00:08.098225 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 10:00:08 crc kubenswrapper[4838]: E1128 10:00:08.100398 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 10:00:08.600379453 +0000 UTC m=+180.299353623 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 10:00:08 crc kubenswrapper[4838]: I1128 10:00:08.100765 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-lsj7g" podStartSLOduration=139.100750448 podStartE2EDuration="2m19.100750448s" podCreationTimestamp="2025-11-28 09:57:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 10:00:08.100124432 +0000 UTC m=+179.799098602" watchObservedRunningTime="2025-11-28 10:00:08.100750448 +0000 UTC m=+179.799724618" Nov 28 10:00:08 crc kubenswrapper[4838]: I1128 10:00:08.107195 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-w7htp" event={"ID":"4301958d-845f-449d-80a5-b81cd858368d","Type":"ContainerStarted","Data":"a8a090d1f335251ee49bb1e8ece0ca9d2fa8fc989d3fc44ccc373b6d45748303"} Nov 28 10:00:08 crc kubenswrapper[4838]: I1128 10:00:08.136474 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-5xzrq" event={"ID":"9d2e0aff-7ecb-4126-add3-57c7a24e781c","Type":"ContainerStarted","Data":"ba381b15b76f3401ba0689c77b36c545b860d5d8db4687f99a4753173c306b35"} Nov 28 10:00:08 crc kubenswrapper[4838]: I1128 10:00:08.136756 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-5xzrq" event={"ID":"9d2e0aff-7ecb-4126-add3-57c7a24e781c","Type":"ContainerStarted","Data":"b7d20ef2ef1d08301f5580e28a4acc1b5754dc25bc472d788287d95bc75abfab"} Nov 28 10:00:08 crc kubenswrapper[4838]: I1128 10:00:08.156758 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-kmvqb" event={"ID":"0ba0db9f-c351-49d0-a9c2-87b0edcfccff","Type":"ContainerStarted","Data":"91e5e211b331a21826b05ed6941ebe421dbc54a3e4cdc0bb2fca9a6653768156"} Nov 28 10:00:08 crc kubenswrapper[4838]: I1128 10:00:08.193936 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-vrzwk" podStartSLOduration=139.193914532 podStartE2EDuration="2m19.193914532s" podCreationTimestamp="2025-11-28 09:57:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 10:00:08.145101399 +0000 UTC m=+179.844075569" watchObservedRunningTime="2025-11-28 10:00:08.193914532 +0000 UTC m=+179.892888702" Nov 28 10:00:08 crc kubenswrapper[4838]: I1128 10:00:08.196548 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-bs85m" event={"ID":"fb0f7dc9-74c6-4031-8edb-7b10c219df34","Type":"ContainerStarted","Data":"232da8a69f693d892e0cde1f8479d9e9d3feec8479b8eac605ce534488881234"} Nov 28 10:00:08 crc kubenswrapper[4838]: I1128 10:00:08.198680 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-bs85m" event={"ID":"fb0f7dc9-74c6-4031-8edb-7b10c219df34","Type":"ContainerStarted","Data":"d53fa3ee00eac622fdf3ee579d2c23bca6ac257298b61a01e453c90bb5503053"} Nov 28 10:00:08 crc kubenswrapper[4838]: I1128 10:00:08.199194 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-f9d7485db-lvtzk" podStartSLOduration=139.199172856 podStartE2EDuration="2m19.199172856s" podCreationTimestamp="2025-11-28 09:57:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 10:00:08.197278219 +0000 UTC m=+179.896252389" watchObservedRunningTime="2025-11-28 10:00:08.199172856 +0000 UTC m=+179.898147026" Nov 28 10:00:08 crc kubenswrapper[4838]: I1128 10:00:08.200912 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8b7z5\" (UID: \"6a71181f-49a4-4b69-a3e6-2413929b81dc\") " pod="openshift-image-registry/image-registry-697d97f7c8-8b7z5" Nov 28 10:00:08 crc kubenswrapper[4838]: E1128 10:00:08.206650 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 10:00:08.706630048 +0000 UTC m=+180.405604218 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8b7z5" (UID: "6a71181f-49a4-4b69-a3e6-2413929b81dc") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 10:00:08 crc kubenswrapper[4838]: I1128 10:00:08.209008 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405400-6d4qp" event={"ID":"89ca4c63-f97b-4a71-8b2e-613170fdde6b","Type":"ContainerStarted","Data":"b99401cb44d9c0221a17a49233459c6b66892d5c9d6f4fa120c97998fd551abe"} Nov 28 10:00:08 crc kubenswrapper[4838]: I1128 10:00:08.250323 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-f4z4k" event={"ID":"16dead36-b11f-4dec-ad63-a25840480761","Type":"ContainerStarted","Data":"73794dadaef011d5a01f2079119185c3960b6f4af570d89187eb75cebb1f42ba"} Nov 28 10:00:08 crc kubenswrapper[4838]: I1128 10:00:08.250626 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-5xzrq" podStartSLOduration=139.250602304 podStartE2EDuration="2m19.250602304s" podCreationTimestamp="2025-11-28 09:57:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 10:00:08.23745877 +0000 UTC m=+179.936432940" watchObservedRunningTime="2025-11-28 10:00:08.250602304 +0000 UTC m=+179.949576474" Nov 28 10:00:08 crc kubenswrapper[4838]: I1128 10:00:08.277227 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-7l5mk" event={"ID":"152c1627-347b-483f-aa32-a65e67de0e5e","Type":"ContainerStarted","Data":"8bb5b69deaa24d99d14ba4da2d069de382757171718734a281a116a3b185daca"} Nov 28 10:00:08 crc kubenswrapper[4838]: I1128 10:00:08.278183 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-7l5mk" Nov 28 10:00:08 crc kubenswrapper[4838]: I1128 10:00:08.284427 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-ingress/router-default-5444994796-zmc4g" Nov 28 10:00:08 crc kubenswrapper[4838]: I1128 10:00:08.288272 4838 patch_prober.go:28] interesting pod/router-default-5444994796-zmc4g container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 28 10:00:08 crc kubenswrapper[4838]: [-]has-synced failed: reason withheld Nov 28 10:00:08 crc kubenswrapper[4838]: [+]process-running ok Nov 28 10:00:08 crc kubenswrapper[4838]: healthz check failed Nov 28 10:00:08 crc kubenswrapper[4838]: I1128 10:00:08.288325 4838 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-zmc4g" podUID="e3505c3b-461c-489c-8af1-117b3cbc433b" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 28 10:00:08 crc kubenswrapper[4838]: I1128 10:00:08.294035 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-9pfms" event={"ID":"39d8a22f-d5ef-427b-aae0-0e8faad08f6e","Type":"ContainerStarted","Data":"df85b50b26061fc6288a2d3bd1b69db983d8b82dbabd64be8b783549013e5275"} Nov 28 10:00:08 crc kubenswrapper[4838]: I1128 10:00:08.294843 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-dns/dns-default-9pfms" Nov 28 10:00:08 crc kubenswrapper[4838]: I1128 10:00:08.303914 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 10:00:08 crc kubenswrapper[4838]: E1128 10:00:08.304587 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 10:00:08.804573016 +0000 UTC m=+180.503547186 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 10:00:08 crc kubenswrapper[4838]: I1128 10:00:08.326465 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-zn7tf" event={"ID":"f16a2d31-72dd-4ec0-b516-a3d10b0b9aed","Type":"ContainerStarted","Data":"c0db913257af3afcb0adba1cee6f58a6df41a02c9a3ee5375d048afd41d74c93"} Nov 28 10:00:08 crc kubenswrapper[4838]: I1128 10:00:08.327076 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-zn7tf" Nov 28 10:00:08 crc kubenswrapper[4838]: I1128 10:00:08.357755 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/machine-api-operator-5694c8668f-bs85m" podStartSLOduration=139.357741786 podStartE2EDuration="2m19.357741786s" podCreationTimestamp="2025-11-28 09:57:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 10:00:08.35709516 +0000 UTC m=+180.056069330" watchObservedRunningTime="2025-11-28 10:00:08.357741786 +0000 UTC m=+180.056715956" Nov 28 10:00:08 crc kubenswrapper[4838]: I1128 10:00:08.358302 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-w7htp" podStartSLOduration=139.358297598 podStartE2EDuration="2m19.358297598s" podCreationTimestamp="2025-11-28 09:57:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 10:00:08.311943176 +0000 UTC m=+180.010917346" watchObservedRunningTime="2025-11-28 10:00:08.358297598 +0000 UTC m=+180.057271758" Nov 28 10:00:08 crc kubenswrapper[4838]: I1128 10:00:08.360271 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-df5f6" event={"ID":"8d6cc687-8b13-44b1-a15b-488c17e8b50c","Type":"ContainerStarted","Data":"05f94b1d5efe9901e6ee05d54e5dac334982ec559032fb38f6482fccdd938936"} Nov 28 10:00:08 crc kubenswrapper[4838]: I1128 10:00:08.362455 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-558db77b4-df5f6" Nov 28 10:00:08 crc kubenswrapper[4838]: I1128 10:00:08.408452 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8b7z5\" (UID: \"6a71181f-49a4-4b69-a3e6-2413929b81dc\") " pod="openshift-image-registry/image-registry-697d97f7c8-8b7z5" Nov 28 10:00:08 crc kubenswrapper[4838]: E1128 10:00:08.411164 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 10:00:08.911148965 +0000 UTC m=+180.610123135 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8b7z5" (UID: "6a71181f-49a4-4b69-a3e6-2413929b81dc") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 10:00:08 crc kubenswrapper[4838]: I1128 10:00:08.414646 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/dns-default-9pfms" podStartSLOduration=8.414631526 podStartE2EDuration="8.414631526s" podCreationTimestamp="2025-11-28 10:00:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 10:00:08.412189517 +0000 UTC m=+180.111163687" watchObservedRunningTime="2025-11-28 10:00:08.414631526 +0000 UTC m=+180.113605696" Nov 28 10:00:08 crc kubenswrapper[4838]: I1128 10:00:08.425692 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-x4rrn" event={"ID":"e81eda87-1848-48b9-8a4d-6c1c184cb421","Type":"ContainerStarted","Data":"b0404dcc9876e478be34f2f680160916748c8e2cb1724c0e4ecd7bffcf6a60d4"} Nov 28 10:00:08 crc kubenswrapper[4838]: I1128 10:00:08.446029 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-lng7h" event={"ID":"35b6ed6b-3088-4a12-8bd6-f8def1138e85","Type":"ContainerStarted","Data":"678942ff82e7191222f00b8ec03f6797f7733a1b902fe3346254dd2f510501ad"} Nov 28 10:00:08 crc kubenswrapper[4838]: I1128 10:00:08.478225 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-z695f" event={"ID":"fdac71d5-8b14-4c59-9d37-345456b26b36","Type":"ContainerStarted","Data":"9abd6aafe526b7602ce6fc136206f958f0901df0b3ab7beae59c9b8702c2d724"} Nov 28 10:00:08 crc kubenswrapper[4838]: I1128 10:00:08.478917 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-config-operator/openshift-config-operator-7777fb866f-z695f" Nov 28 10:00:08 crc kubenswrapper[4838]: I1128 10:00:08.492052 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-wpfps" event={"ID":"01a7faa9-fda8-4f56-b472-e9165b66dab9","Type":"ContainerStarted","Data":"c362c4f283fcd0231fd91f5bd157e48bfb979da5cddfbecac0560a4226fa688a"} Nov 28 10:00:08 crc kubenswrapper[4838]: I1128 10:00:08.512135 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 10:00:08 crc kubenswrapper[4838]: E1128 10:00:08.512971 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 10:00:09.01294894 +0000 UTC m=+180.711923150 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 10:00:08 crc kubenswrapper[4838]: I1128 10:00:08.524010 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-ss9lp" event={"ID":"4752b167-aa81-4844-98ab-07cc66b152bd","Type":"ContainerStarted","Data":"36fed049804f8c4b50a0bac3aa5d6c106084e888ade914def2787bf55f5aa93c"} Nov 28 10:00:08 crc kubenswrapper[4838]: I1128 10:00:08.525299 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-ss9lp" Nov 28 10:00:08 crc kubenswrapper[4838]: I1128 10:00:08.543844 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-8q49c" event={"ID":"a8af0096-480e-4c84-8da1-e6ff489addd3","Type":"ContainerStarted","Data":"3b73bcfe7971d890b8940f54e146a8a95a768c86bedc1bbc31ebf586817dd263"} Nov 28 10:00:08 crc kubenswrapper[4838]: I1128 10:00:08.594571 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-ss9lp" Nov 28 10:00:08 crc kubenswrapper[4838]: I1128 10:00:08.594631 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-pnngr" event={"ID":"da520ae0-3472-44e1-af12-5e1fcdfec000","Type":"ContainerStarted","Data":"40ada94d064253dbaf819a1bcaea5c289a789d96151247a1c6fa585a5c7b82c9"} Nov 28 10:00:08 crc kubenswrapper[4838]: I1128 10:00:08.612144 4838 generic.go:334] "Generic (PLEG): container finished" podID="d91a5fa7-8c8e-455a-a237-e0ec0baa2197" containerID="cdd77ebdb2f3f9c0937bed017102e5c45aac5213d77e14dd33f961e01576b700" exitCode=0 Nov 28 10:00:08 crc kubenswrapper[4838]: I1128 10:00:08.612231 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-bs82t" event={"ID":"d91a5fa7-8c8e-455a-a237-e0ec0baa2197","Type":"ContainerDied","Data":"cdd77ebdb2f3f9c0937bed017102e5c45aac5213d77e14dd33f961e01576b700"} Nov 28 10:00:08 crc kubenswrapper[4838]: I1128 10:00:08.613643 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8b7z5\" (UID: \"6a71181f-49a4-4b69-a3e6-2413929b81dc\") " pod="openshift-image-registry/image-registry-697d97f7c8-8b7z5" Nov 28 10:00:08 crc kubenswrapper[4838]: I1128 10:00:08.613782 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-zn7tf" podStartSLOduration=139.613754354 podStartE2EDuration="2m19.613754354s" podCreationTimestamp="2025-11-28 09:57:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 10:00:08.613092236 +0000 UTC m=+180.312066406" watchObservedRunningTime="2025-11-28 10:00:08.613754354 +0000 UTC m=+180.312728524" Nov 28 10:00:08 crc kubenswrapper[4838]: E1128 10:00:08.614426 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 10:00:09.11440849 +0000 UTC m=+180.813382751 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8b7z5" (UID: "6a71181f-49a4-4b69-a3e6-2413929b81dc") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 10:00:08 crc kubenswrapper[4838]: I1128 10:00:08.651074 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-fh5z9" event={"ID":"6316f735-c6d6-4b80-95be-c548707cd386","Type":"ContainerStarted","Data":"ad452f26588654a7dd3cb27e73ab9680116ff1297cf32ec6a4f9fd35498e9d71"} Nov 28 10:00:08 crc kubenswrapper[4838]: I1128 10:00:08.678076 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-xsjrl" event={"ID":"8a281de3-cc12-4dd1-b9be-0ca03a0613ec","Type":"ContainerStarted","Data":"6e26a372f47f409cbd328fe79d285e7b58ca7123f6fff591bcefa8d90affd22a"} Nov 28 10:00:08 crc kubenswrapper[4838]: I1128 10:00:08.678543 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-xsjrl" Nov 28 10:00:08 crc kubenswrapper[4838]: I1128 10:00:08.684140 4838 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-xsjrl container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.31:8080/healthz\": dial tcp 10.217.0.31:8080: connect: connection refused" start-of-body= Nov 28 10:00:08 crc kubenswrapper[4838]: I1128 10:00:08.684191 4838 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-xsjrl" podUID="8a281de3-cc12-4dd1-b9be-0ca03a0613ec" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.31:8080/healthz\": dial tcp 10.217.0.31:8080: connect: connection refused" Nov 28 10:00:08 crc kubenswrapper[4838]: I1128 10:00:08.716257 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 10:00:08 crc kubenswrapper[4838]: E1128 10:00:08.745300 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 10:00:09.245279656 +0000 UTC m=+180.944253826 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 10:00:08 crc kubenswrapper[4838]: I1128 10:00:08.756565 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-xrgtq" event={"ID":"74b81264-e855-4198-a063-9ef62eb9ad30","Type":"ContainerStarted","Data":"8874d4cd19f3eb967d9b738b3b8b25d11d2b7b8f46e2d9584a33d9dc929bbb21"} Nov 28 10:00:08 crc kubenswrapper[4838]: I1128 10:00:08.757399 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-xrgtq" Nov 28 10:00:08 crc kubenswrapper[4838]: I1128 10:00:08.762547 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29405400-6d4qp" podStartSLOduration=8.762520806 podStartE2EDuration="8.762520806s" podCreationTimestamp="2025-11-28 10:00:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 10:00:08.752973068 +0000 UTC m=+180.451947238" watchObservedRunningTime="2025-11-28 10:00:08.762520806 +0000 UTC m=+180.461494976" Nov 28 10:00:08 crc kubenswrapper[4838]: I1128 10:00:08.763847 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-7l5mk" podStartSLOduration=139.763839409 podStartE2EDuration="2m19.763839409s" podCreationTimestamp="2025-11-28 09:57:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 10:00:08.683943545 +0000 UTC m=+180.382917715" watchObservedRunningTime="2025-11-28 10:00:08.763839409 +0000 UTC m=+180.462813579" Nov 28 10:00:08 crc kubenswrapper[4838]: I1128 10:00:08.781104 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-tkw4f" event={"ID":"43982f1d-a55d-4870-be0a-c08c63a8e841","Type":"ContainerStarted","Data":"9b854347df0b43e42ac4d44244e036289459717d8271d2c23f48bd7b49e00d82"} Nov 28 10:00:08 crc kubenswrapper[4838]: I1128 10:00:08.822032 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8b7z5\" (UID: \"6a71181f-49a4-4b69-a3e6-2413929b81dc\") " pod="openshift-image-registry/image-registry-697d97f7c8-8b7z5" Nov 28 10:00:08 crc kubenswrapper[4838]: E1128 10:00:08.823435 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 10:00:09.323421219 +0000 UTC m=+181.022395389 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8b7z5" (UID: "6a71181f-49a4-4b69-a3e6-2413929b81dc") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 10:00:08 crc kubenswrapper[4838]: I1128 10:00:08.824854 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-k2xhp" event={"ID":"d556750d-84c7-4fdc-8080-33cf240704df","Type":"ContainerStarted","Data":"3903f3b3c2afb56ecbee307fa039ee4682e85a5f7a247f96187fca36ca31a5bb"} Nov 28 10:00:08 crc kubenswrapper[4838]: I1128 10:00:08.858318 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-canary/ingress-canary-f4z4k" podStartSLOduration=8.858301306 podStartE2EDuration="8.858301306s" podCreationTimestamp="2025-11-28 10:00:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 10:00:08.856076786 +0000 UTC m=+180.555050956" watchObservedRunningTime="2025-11-28 10:00:08.858301306 +0000 UTC m=+180.557275476" Nov 28 10:00:08 crc kubenswrapper[4838]: I1128 10:00:08.866866 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-qlvp5" event={"ID":"00e915fe-78b7-4c8f-bc2b-1357c1beeee3","Type":"ContainerStarted","Data":"0e94d19d248463fe045a151e8934035f2b72db8d3eb001aec18b906f59ee0e0e"} Nov 28 10:00:08 crc kubenswrapper[4838]: I1128 10:00:08.900436 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-7vsn9" event={"ID":"bfd87dd0-c7b3-4e3c-940c-0229718e3579","Type":"ContainerStarted","Data":"43cab7d20546fc5bbe585b75071cd3d39e18b9d405ee927289275bac49c4ef05"} Nov 28 10:00:08 crc kubenswrapper[4838]: I1128 10:00:08.901266 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-7vsn9" Nov 28 10:00:08 crc kubenswrapper[4838]: I1128 10:00:08.908741 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-nm4jv" event={"ID":"9f72bc48-a623-4365-8dfb-bbf5b0179798","Type":"ContainerStarted","Data":"1168ff8061cf827e0fcd2735b92d5dc92b456b49556856d502a3927cfa59efc6"} Nov 28 10:00:08 crc kubenswrapper[4838]: I1128 10:00:08.929165 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 10:00:08 crc kubenswrapper[4838]: E1128 10:00:08.930547 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 10:00:09.430513609 +0000 UTC m=+181.129487779 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 10:00:08 crc kubenswrapper[4838]: I1128 10:00:08.946006 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-7vsn9" Nov 28 10:00:08 crc kubenswrapper[4838]: I1128 10:00:08.958838 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-9mrm4" event={"ID":"bae52de6-77be-47d7-a5fe-8e22e5f24bf4","Type":"ContainerStarted","Data":"8fc624f7d67c3cda0489dd34379fca300a222358bd038bab7d671039bb16607f"} Nov 28 10:00:08 crc kubenswrapper[4838]: I1128 10:00:08.960371 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-tkw4f" podStartSLOduration=139.960362591 podStartE2EDuration="2m19.960362591s" podCreationTimestamp="2025-11-28 09:57:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 10:00:08.960113961 +0000 UTC m=+180.659088131" watchObservedRunningTime="2025-11-28 10:00:08.960362591 +0000 UTC m=+180.659336751" Nov 28 10:00:08 crc kubenswrapper[4838]: I1128 10:00:08.960746 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-879f6c89f-75j6q" Nov 28 10:00:08 crc kubenswrapper[4838]: I1128 10:00:08.961096 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-config-operator/openshift-config-operator-7777fb866f-z695f" podStartSLOduration=139.961091481 podStartE2EDuration="2m19.961091481s" podCreationTimestamp="2025-11-28 09:57:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 10:00:08.901555842 +0000 UTC m=+180.600530012" watchObservedRunningTime="2025-11-28 10:00:08.961091481 +0000 UTC m=+180.660065651" Nov 28 10:00:08 crc kubenswrapper[4838]: I1128 10:00:08.976167 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-879f6c89f-75j6q" Nov 28 10:00:09 crc kubenswrapper[4838]: I1128 10:00:09.032691 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8b7z5\" (UID: \"6a71181f-49a4-4b69-a3e6-2413929b81dc\") " pod="openshift-image-registry/image-registry-697d97f7c8-8b7z5" Nov 28 10:00:09 crc kubenswrapper[4838]: E1128 10:00:09.035156 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 10:00:09.535144788 +0000 UTC m=+181.234118958 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8b7z5" (UID: "6a71181f-49a4-4b69-a3e6-2413929b81dc") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 10:00:09 crc kubenswrapper[4838]: I1128 10:00:09.066824 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-558db77b4-df5f6" podStartSLOduration=140.066809424 podStartE2EDuration="2m20.066809424s" podCreationTimestamp="2025-11-28 09:57:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 10:00:09.065791533 +0000 UTC m=+180.764765703" watchObservedRunningTime="2025-11-28 10:00:09.066809424 +0000 UTC m=+180.765783584" Nov 28 10:00:09 crc kubenswrapper[4838]: I1128 10:00:09.099620 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-wpfps" podStartSLOduration=140.099606597 podStartE2EDuration="2m20.099606597s" podCreationTimestamp="2025-11-28 09:57:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 10:00:09.099004582 +0000 UTC m=+180.797978752" watchObservedRunningTime="2025-11-28 10:00:09.099606597 +0000 UTC m=+180.798580767" Nov 28 10:00:09 crc kubenswrapper[4838]: I1128 10:00:09.138128 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-xrgtq" Nov 28 10:00:09 crc kubenswrapper[4838]: I1128 10:00:09.138759 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 10:00:09 crc kubenswrapper[4838]: E1128 10:00:09.139056 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 10:00:09.639042518 +0000 UTC m=+181.338016688 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 10:00:09 crc kubenswrapper[4838]: I1128 10:00:09.140284 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-lng7h" podStartSLOduration=140.140274839 podStartE2EDuration="2m20.140274839s" podCreationTimestamp="2025-11-28 09:57:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 10:00:09.138280428 +0000 UTC m=+180.837254598" watchObservedRunningTime="2025-11-28 10:00:09.140274839 +0000 UTC m=+180.839249009" Nov 28 10:00:09 crc kubenswrapper[4838]: I1128 10:00:09.181631 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-server-8q49c" podStartSLOduration=8.181615927 podStartE2EDuration="8.181615927s" podCreationTimestamp="2025-11-28 10:00:01 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 10:00:09.181160119 +0000 UTC m=+180.880134289" watchObservedRunningTime="2025-11-28 10:00:09.181615927 +0000 UTC m=+180.880590097" Nov 28 10:00:09 crc kubenswrapper[4838]: I1128 10:00:09.182748 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-xsjrl" podStartSLOduration=140.182741523 podStartE2EDuration="2m20.182741523s" podCreationTimestamp="2025-11-28 09:57:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 10:00:09.162995232 +0000 UTC m=+180.861969402" watchObservedRunningTime="2025-11-28 10:00:09.182741523 +0000 UTC m=+180.881715693" Nov 28 10:00:09 crc kubenswrapper[4838]: I1128 10:00:09.239778 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-pnngr" podStartSLOduration=140.239764679 podStartE2EDuration="2m20.239764679s" podCreationTimestamp="2025-11-28 09:57:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 10:00:09.237994737 +0000 UTC m=+180.936968907" watchObservedRunningTime="2025-11-28 10:00:09.239764679 +0000 UTC m=+180.938738849" Nov 28 10:00:09 crc kubenswrapper[4838]: I1128 10:00:09.243473 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8b7z5\" (UID: \"6a71181f-49a4-4b69-a3e6-2413929b81dc\") " pod="openshift-image-registry/image-registry-697d97f7c8-8b7z5" Nov 28 10:00:09 crc kubenswrapper[4838]: E1128 10:00:09.243842 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 10:00:09.743828444 +0000 UTC m=+181.442802614 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8b7z5" (UID: "6a71181f-49a4-4b69-a3e6-2413929b81dc") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 10:00:09 crc kubenswrapper[4838]: I1128 10:00:09.278106 4838 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-7l5mk container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.34:5443/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 28 10:00:09 crc kubenswrapper[4838]: I1128 10:00:09.278155 4838 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-7l5mk" podUID="152c1627-347b-483f-aa32-a65e67de0e5e" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.34:5443/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 28 10:00:09 crc kubenswrapper[4838]: I1128 10:00:09.290348 4838 patch_prober.go:28] interesting pod/router-default-5444994796-zmc4g container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 28 10:00:09 crc kubenswrapper[4838]: [-]has-synced failed: reason withheld Nov 28 10:00:09 crc kubenswrapper[4838]: [+]process-running ok Nov 28 10:00:09 crc kubenswrapper[4838]: healthz check failed Nov 28 10:00:09 crc kubenswrapper[4838]: I1128 10:00:09.290398 4838 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-zmc4g" podUID="e3505c3b-461c-489c-8af1-117b3cbc433b" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 28 10:00:09 crc kubenswrapper[4838]: I1128 10:00:09.304290 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-ss9lp" podStartSLOduration=140.304273629 podStartE2EDuration="2m20.304273629s" podCreationTimestamp="2025-11-28 09:57:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 10:00:09.295260373 +0000 UTC m=+180.994234543" watchObservedRunningTime="2025-11-28 10:00:09.304273629 +0000 UTC m=+181.003247799" Nov 28 10:00:09 crc kubenswrapper[4838]: I1128 10:00:09.305364 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-fh5z9" podStartSLOduration=140.305358603 podStartE2EDuration="2m20.305358603s" podCreationTimestamp="2025-11-28 09:57:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 10:00:09.260877197 +0000 UTC m=+180.959851367" watchObservedRunningTime="2025-11-28 10:00:09.305358603 +0000 UTC m=+181.004332773" Nov 28 10:00:09 crc kubenswrapper[4838]: I1128 10:00:09.336547 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca/service-ca-9c57cc56f-k2xhp" podStartSLOduration=140.33653407 podStartE2EDuration="2m20.33653407s" podCreationTimestamp="2025-11-28 09:57:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 10:00:09.334270528 +0000 UTC m=+181.033244698" watchObservedRunningTime="2025-11-28 10:00:09.33653407 +0000 UTC m=+181.035508240" Nov 28 10:00:09 crc kubenswrapper[4838]: I1128 10:00:09.345196 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 10:00:09 crc kubenswrapper[4838]: E1128 10:00:09.345507 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 10:00:09.845491673 +0000 UTC m=+181.544465843 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 10:00:09 crc kubenswrapper[4838]: I1128 10:00:09.356921 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-xrgtq" podStartSLOduration=140.356906487 podStartE2EDuration="2m20.356906487s" podCreationTimestamp="2025-11-28 09:57:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 10:00:09.355037051 +0000 UTC m=+181.054011221" watchObservedRunningTime="2025-11-28 10:00:09.356906487 +0000 UTC m=+181.055880647" Nov 28 10:00:09 crc kubenswrapper[4838]: I1128 10:00:09.364790 4838 patch_prober.go:28] interesting pod/oauth-openshift-558db77b4-df5f6 container/oauth-openshift namespace/openshift-authentication: Readiness probe status=failure output="Get \"https://10.217.0.17:6443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 28 10:00:09 crc kubenswrapper[4838]: I1128 10:00:09.364977 4838 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-authentication/oauth-openshift-558db77b4-df5f6" podUID="8d6cc687-8b13-44b1-a15b-488c17e8b50c" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.17:6443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Nov 28 10:00:09 crc kubenswrapper[4838]: I1128 10:00:09.381892 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-x4rrn" podStartSLOduration=140.381877351 podStartE2EDuration="2m20.381877351s" podCreationTimestamp="2025-11-28 09:57:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 10:00:09.379912011 +0000 UTC m=+181.078886181" watchObservedRunningTime="2025-11-28 10:00:09.381877351 +0000 UTC m=+181.080851521" Nov 28 10:00:09 crc kubenswrapper[4838]: I1128 10:00:09.425798 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication-operator/authentication-operator-69f744f599-25qvk" podStartSLOduration=140.425778884 podStartE2EDuration="2m20.425778884s" podCreationTimestamp="2025-11-28 09:57:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 10:00:09.423550044 +0000 UTC m=+181.122524214" watchObservedRunningTime="2025-11-28 10:00:09.425778884 +0000 UTC m=+181.124753054" Nov 28 10:00:09 crc kubenswrapper[4838]: I1128 10:00:09.446899 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8b7z5\" (UID: \"6a71181f-49a4-4b69-a3e6-2413929b81dc\") " pod="openshift-image-registry/image-registry-697d97f7c8-8b7z5" Nov 28 10:00:09 crc kubenswrapper[4838]: E1128 10:00:09.447185 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 10:00:09.947174323 +0000 UTC m=+181.646148493 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8b7z5" (UID: "6a71181f-49a4-4b69-a3e6-2413929b81dc") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 10:00:09 crc kubenswrapper[4838]: I1128 10:00:09.472572 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-x4zn6" podStartSLOduration=140.472553174 podStartE2EDuration="2m20.472553174s" podCreationTimestamp="2025-11-28 09:57:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 10:00:09.471809664 +0000 UTC m=+181.170783824" watchObservedRunningTime="2025-11-28 10:00:09.472553174 +0000 UTC m=+181.171527344" Nov 28 10:00:09 crc kubenswrapper[4838]: I1128 10:00:09.521966 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-879f6c89f-75j6q" podStartSLOduration=140.52194813 podStartE2EDuration="2m20.52194813s" podCreationTimestamp="2025-11-28 09:57:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 10:00:09.487654638 +0000 UTC m=+181.186628808" watchObservedRunningTime="2025-11-28 10:00:09.52194813 +0000 UTC m=+181.220922300" Nov 28 10:00:09 crc kubenswrapper[4838]: I1128 10:00:09.531809 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-nm4jv" podStartSLOduration=140.531780619 podStartE2EDuration="2m20.531780619s" podCreationTimestamp="2025-11-28 09:57:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 10:00:09.517292751 +0000 UTC m=+181.216266921" watchObservedRunningTime="2025-11-28 10:00:09.531780619 +0000 UTC m=+181.230754789" Nov 28 10:00:09 crc kubenswrapper[4838]: I1128 10:00:09.550183 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-9mrm4" podStartSLOduration=140.550164876 podStartE2EDuration="2m20.550164876s" podCreationTimestamp="2025-11-28 09:57:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 10:00:09.546007867 +0000 UTC m=+181.244982037" watchObservedRunningTime="2025-11-28 10:00:09.550164876 +0000 UTC m=+181.249139046" Nov 28 10:00:09 crc kubenswrapper[4838]: I1128 10:00:09.550734 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 10:00:09 crc kubenswrapper[4838]: E1128 10:00:09.551017 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 10:00:10.05099302 +0000 UTC m=+181.749967190 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 10:00:09 crc kubenswrapper[4838]: I1128 10:00:09.551199 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8b7z5\" (UID: \"6a71181f-49a4-4b69-a3e6-2413929b81dc\") " pod="openshift-image-registry/image-registry-697d97f7c8-8b7z5" Nov 28 10:00:09 crc kubenswrapper[4838]: E1128 10:00:09.551517 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 10:00:10.05151047 +0000 UTC m=+181.750484640 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8b7z5" (UID: "6a71181f-49a4-4b69-a3e6-2413929b81dc") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 10:00:09 crc kubenswrapper[4838]: I1128 10:00:09.566857 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-7vsn9" podStartSLOduration=140.566839854 podStartE2EDuration="2m20.566839854s" podCreationTimestamp="2025-11-28 09:57:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 10:00:09.566240129 +0000 UTC m=+181.265214299" watchObservedRunningTime="2025-11-28 10:00:09.566839854 +0000 UTC m=+181.265814024" Nov 28 10:00:09 crc kubenswrapper[4838]: I1128 10:00:09.594177 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-465nq" podStartSLOduration=140.594159433 podStartE2EDuration="2m20.594159433s" podCreationTimestamp="2025-11-28 09:57:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 10:00:09.593506006 +0000 UTC m=+181.292480176" watchObservedRunningTime="2025-11-28 10:00:09.594159433 +0000 UTC m=+181.293133603" Nov 28 10:00:09 crc kubenswrapper[4838]: I1128 10:00:09.627700 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca-operator/service-ca-operator-777779d784-qlvp5" podStartSLOduration=140.627686505 podStartE2EDuration="2m20.627686505s" podCreationTimestamp="2025-11-28 09:57:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 10:00:09.623076698 +0000 UTC m=+181.322050868" watchObservedRunningTime="2025-11-28 10:00:09.627686505 +0000 UTC m=+181.326660675" Nov 28 10:00:09 crc kubenswrapper[4838]: I1128 10:00:09.629443 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd-operator/etcd-operator-b45778765-hrtmd" podStartSLOduration=140.629437686 podStartE2EDuration="2m20.629437686s" podCreationTimestamp="2025-11-28 09:57:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 10:00:09.611983607 +0000 UTC m=+181.310957777" watchObservedRunningTime="2025-11-28 10:00:09.629437686 +0000 UTC m=+181.328411856" Nov 28 10:00:09 crc kubenswrapper[4838]: I1128 10:00:09.654952 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 10:00:09 crc kubenswrapper[4838]: E1128 10:00:09.655236 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 10:00:10.155220563 +0000 UTC m=+181.854194733 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 10:00:09 crc kubenswrapper[4838]: I1128 10:00:09.756372 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8b7z5\" (UID: \"6a71181f-49a4-4b69-a3e6-2413929b81dc\") " pod="openshift-image-registry/image-registry-697d97f7c8-8b7z5" Nov 28 10:00:09 crc kubenswrapper[4838]: E1128 10:00:09.756761 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 10:00:10.256747167 +0000 UTC m=+181.955721327 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8b7z5" (UID: "6a71181f-49a4-4b69-a3e6-2413929b81dc") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 10:00:09 crc kubenswrapper[4838]: I1128 10:00:09.766512 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-sdvg7"] Nov 28 10:00:09 crc kubenswrapper[4838]: I1128 10:00:09.767394 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-sdvg7" Nov 28 10:00:09 crc kubenswrapper[4838]: I1128 10:00:09.769550 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Nov 28 10:00:09 crc kubenswrapper[4838]: I1128 10:00:09.778373 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-sdvg7"] Nov 28 10:00:09 crc kubenswrapper[4838]: I1128 10:00:09.857159 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 10:00:09 crc kubenswrapper[4838]: E1128 10:00:09.857323 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 10:00:10.35729687 +0000 UTC m=+182.056271040 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 10:00:09 crc kubenswrapper[4838]: I1128 10:00:09.857365 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e1f1e017-546a-4f0b-965e-bd050ad48e44-catalog-content\") pod \"certified-operators-sdvg7\" (UID: \"e1f1e017-546a-4f0b-965e-bd050ad48e44\") " pod="openshift-marketplace/certified-operators-sdvg7" Nov 28 10:00:09 crc kubenswrapper[4838]: I1128 10:00:09.857404 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kxlsh\" (UniqueName: \"kubernetes.io/projected/e1f1e017-546a-4f0b-965e-bd050ad48e44-kube-api-access-kxlsh\") pod \"certified-operators-sdvg7\" (UID: \"e1f1e017-546a-4f0b-965e-bd050ad48e44\") " pod="openshift-marketplace/certified-operators-sdvg7" Nov 28 10:00:09 crc kubenswrapper[4838]: I1128 10:00:09.857500 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8b7z5\" (UID: \"6a71181f-49a4-4b69-a3e6-2413929b81dc\") " pod="openshift-image-registry/image-registry-697d97f7c8-8b7z5" Nov 28 10:00:09 crc kubenswrapper[4838]: I1128 10:00:09.857572 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e1f1e017-546a-4f0b-965e-bd050ad48e44-utilities\") pod \"certified-operators-sdvg7\" (UID: \"e1f1e017-546a-4f0b-965e-bd050ad48e44\") " pod="openshift-marketplace/certified-operators-sdvg7" Nov 28 10:00:09 crc kubenswrapper[4838]: E1128 10:00:09.857975 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 10:00:10.357958918 +0000 UTC m=+182.056933088 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8b7z5" (UID: "6a71181f-49a4-4b69-a3e6-2413929b81dc") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 10:00:09 crc kubenswrapper[4838]: I1128 10:00:09.958192 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 10:00:09 crc kubenswrapper[4838]: E1128 10:00:09.958346 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 10:00:10.458328024 +0000 UTC m=+182.157302194 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 10:00:09 crc kubenswrapper[4838]: I1128 10:00:09.958446 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e1f1e017-546a-4f0b-965e-bd050ad48e44-utilities\") pod \"certified-operators-sdvg7\" (UID: \"e1f1e017-546a-4f0b-965e-bd050ad48e44\") " pod="openshift-marketplace/certified-operators-sdvg7" Nov 28 10:00:09 crc kubenswrapper[4838]: I1128 10:00:09.958495 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e1f1e017-546a-4f0b-965e-bd050ad48e44-catalog-content\") pod \"certified-operators-sdvg7\" (UID: \"e1f1e017-546a-4f0b-965e-bd050ad48e44\") " pod="openshift-marketplace/certified-operators-sdvg7" Nov 28 10:00:09 crc kubenswrapper[4838]: I1128 10:00:09.958516 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kxlsh\" (UniqueName: \"kubernetes.io/projected/e1f1e017-546a-4f0b-965e-bd050ad48e44-kube-api-access-kxlsh\") pod \"certified-operators-sdvg7\" (UID: \"e1f1e017-546a-4f0b-965e-bd050ad48e44\") " pod="openshift-marketplace/certified-operators-sdvg7" Nov 28 10:00:09 crc kubenswrapper[4838]: I1128 10:00:09.958551 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8b7z5\" (UID: \"6a71181f-49a4-4b69-a3e6-2413929b81dc\") " pod="openshift-image-registry/image-registry-697d97f7c8-8b7z5" Nov 28 10:00:09 crc kubenswrapper[4838]: E1128 10:00:09.958826 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 10:00:10.458818664 +0000 UTC m=+182.157792834 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8b7z5" (UID: "6a71181f-49a4-4b69-a3e6-2413929b81dc") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 10:00:09 crc kubenswrapper[4838]: I1128 10:00:09.958957 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e1f1e017-546a-4f0b-965e-bd050ad48e44-utilities\") pod \"certified-operators-sdvg7\" (UID: \"e1f1e017-546a-4f0b-965e-bd050ad48e44\") " pod="openshift-marketplace/certified-operators-sdvg7" Nov 28 10:00:09 crc kubenswrapper[4838]: I1128 10:00:09.959040 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e1f1e017-546a-4f0b-965e-bd050ad48e44-catalog-content\") pod \"certified-operators-sdvg7\" (UID: \"e1f1e017-546a-4f0b-965e-bd050ad48e44\") " pod="openshift-marketplace/certified-operators-sdvg7" Nov 28 10:00:09 crc kubenswrapper[4838]: I1128 10:00:09.964219 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-wpfps" event={"ID":"01a7faa9-fda8-4f56-b472-e9165b66dab9","Type":"ContainerStarted","Data":"b78747242812f7243eed9ee426fc6b8827dcc495c1f1d74f4d31165c0c301128"} Nov 28 10:00:09 crc kubenswrapper[4838]: I1128 10:00:09.965710 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-x4rrn" event={"ID":"e81eda87-1848-48b9-8a4d-6c1c184cb421","Type":"ContainerStarted","Data":"fe4c049ebf99274fc63cf597ceba696e393dffc3b4dd82d0b8bf8bf51f9afbad"} Nov 28 10:00:09 crc kubenswrapper[4838]: I1128 10:00:09.965797 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-6gkhp"] Nov 28 10:00:09 crc kubenswrapper[4838]: I1128 10:00:09.967459 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-zj778" event={"ID":"02ae83a0-06cd-46f5-a5de-3127a57b8d5c","Type":"ContainerStarted","Data":"282dd50f30f783220779b5ee5e88abcb6c1b5dc5cb23814c3b77875752904ec9"} Nov 28 10:00:09 crc kubenswrapper[4838]: I1128 10:00:09.967546 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-6gkhp" Nov 28 10:00:09 crc kubenswrapper[4838]: I1128 10:00:09.968247 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-9pfms" event={"ID":"39d8a22f-d5ef-427b-aae0-0e8faad08f6e","Type":"ContainerStarted","Data":"3b95e7802491c63f8d9370e0d0eee9f23fa0e0afb0fa6455d51c0ecb45539fc2"} Nov 28 10:00:09 crc kubenswrapper[4838]: I1128 10:00:09.970023 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Nov 28 10:00:09 crc kubenswrapper[4838]: I1128 10:00:09.970888 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-qfpq5" event={"ID":"8c869fc5-3a3b-41e2-8eac-4dc5835be740","Type":"ContainerStarted","Data":"c4a023beebd72f20f9d7b51d0e010a8e0eb90dd2bb289360936ccea309a43864"} Nov 28 10:00:09 crc kubenswrapper[4838]: I1128 10:00:09.972956 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-pnngr" event={"ID":"da520ae0-3472-44e1-af12-5e1fcdfec000","Type":"ContainerStarted","Data":"88b88462c4a9c070b4a15b7b67bfdde678552f5a2acd34aa95cf676b5db602ab"} Nov 28 10:00:09 crc kubenswrapper[4838]: I1128 10:00:09.974794 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-zn7tf" event={"ID":"f16a2d31-72dd-4ec0-b516-a3d10b0b9aed","Type":"ContainerStarted","Data":"58247c06aa18c46f130e9d6159d85d9302296e69fdcb92f8b743a01c053b25b1"} Nov 28 10:00:09 crc kubenswrapper[4838]: I1128 10:00:09.976488 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-kmvqb" event={"ID":"0ba0db9f-c351-49d0-a9c2-87b0edcfccff","Type":"ContainerStarted","Data":"fe6ed8be867664789fc4ee5108356076dab8402809c6267cacb0610c7b531a6c"} Nov 28 10:00:09 crc kubenswrapper[4838]: I1128 10:00:09.977303 4838 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-xsjrl container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.31:8080/healthz\": dial tcp 10.217.0.31:8080: connect: connection refused" start-of-body= Nov 28 10:00:09 crc kubenswrapper[4838]: I1128 10:00:09.977437 4838 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-xsjrl" podUID="8a281de3-cc12-4dd1-b9be-0ca03a0613ec" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.31:8080/healthz\": dial tcp 10.217.0.31:8080: connect: connection refused" Nov 28 10:00:09 crc kubenswrapper[4838]: I1128 10:00:09.984764 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-6gkhp"] Nov 28 10:00:09 crc kubenswrapper[4838]: I1128 10:00:09.997709 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kxlsh\" (UniqueName: \"kubernetes.io/projected/e1f1e017-546a-4f0b-965e-bd050ad48e44-kube-api-access-kxlsh\") pod \"certified-operators-sdvg7\" (UID: \"e1f1e017-546a-4f0b-965e-bd050ad48e44\") " pod="openshift-marketplace/certified-operators-sdvg7" Nov 28 10:00:10 crc kubenswrapper[4838]: I1128 10:00:10.044531 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-admission-controller-857f4d67dd-kmvqb" podStartSLOduration=141.044518374 podStartE2EDuration="2m21.044518374s" podCreationTimestamp="2025-11-28 09:57:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 10:00:10.042477402 +0000 UTC m=+181.741451572" watchObservedRunningTime="2025-11-28 10:00:10.044518374 +0000 UTC m=+181.743492544" Nov 28 10:00:10 crc kubenswrapper[4838]: I1128 10:00:10.056044 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns-operator/dns-operator-744455d44c-qfpq5" podStartSLOduration=141.056029291 podStartE2EDuration="2m21.056029291s" podCreationTimestamp="2025-11-28 09:57:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 10:00:10.053684927 +0000 UTC m=+181.752659097" watchObservedRunningTime="2025-11-28 10:00:10.056029291 +0000 UTC m=+181.755003451" Nov 28 10:00:10 crc kubenswrapper[4838]: I1128 10:00:10.059905 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 10:00:10 crc kubenswrapper[4838]: E1128 10:00:10.060050 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 10:00:10.560032065 +0000 UTC m=+182.259006235 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 10:00:10 crc kubenswrapper[4838]: I1128 10:00:10.060248 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0a0eea75-7b92-4cde-bb7d-5ee4cb46ddd0-catalog-content\") pod \"community-operators-6gkhp\" (UID: \"0a0eea75-7b92-4cde-bb7d-5ee4cb46ddd0\") " pod="openshift-marketplace/community-operators-6gkhp" Nov 28 10:00:10 crc kubenswrapper[4838]: I1128 10:00:10.060336 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cwtjv\" (UniqueName: \"kubernetes.io/projected/0a0eea75-7b92-4cde-bb7d-5ee4cb46ddd0-kube-api-access-cwtjv\") pod \"community-operators-6gkhp\" (UID: \"0a0eea75-7b92-4cde-bb7d-5ee4cb46ddd0\") " pod="openshift-marketplace/community-operators-6gkhp" Nov 28 10:00:10 crc kubenswrapper[4838]: I1128 10:00:10.060661 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0a0eea75-7b92-4cde-bb7d-5ee4cb46ddd0-utilities\") pod \"community-operators-6gkhp\" (UID: \"0a0eea75-7b92-4cde-bb7d-5ee4cb46ddd0\") " pod="openshift-marketplace/community-operators-6gkhp" Nov 28 10:00:10 crc kubenswrapper[4838]: I1128 10:00:10.075955 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8b7z5\" (UID: \"6a71181f-49a4-4b69-a3e6-2413929b81dc\") " pod="openshift-image-registry/image-registry-697d97f7c8-8b7z5" Nov 28 10:00:10 crc kubenswrapper[4838]: E1128 10:00:10.077938 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 10:00:10.577924441 +0000 UTC m=+182.276898611 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8b7z5" (UID: "6a71181f-49a4-4b69-a3e6-2413929b81dc") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 10:00:10 crc kubenswrapper[4838]: I1128 10:00:10.079439 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-sdvg7" Nov 28 10:00:10 crc kubenswrapper[4838]: I1128 10:00:10.158472 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-zlxs4"] Nov 28 10:00:10 crc kubenswrapper[4838]: I1128 10:00:10.159377 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-zlxs4" Nov 28 10:00:10 crc kubenswrapper[4838]: I1128 10:00:10.171931 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-zlxs4"] Nov 28 10:00:10 crc kubenswrapper[4838]: I1128 10:00:10.178156 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 10:00:10 crc kubenswrapper[4838]: E1128 10:00:10.178327 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 10:00:10.678305418 +0000 UTC m=+182.377279588 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 10:00:10 crc kubenswrapper[4838]: I1128 10:00:10.178471 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8b7z5\" (UID: \"6a71181f-49a4-4b69-a3e6-2413929b81dc\") " pod="openshift-image-registry/image-registry-697d97f7c8-8b7z5" Nov 28 10:00:10 crc kubenswrapper[4838]: I1128 10:00:10.178529 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0a0eea75-7b92-4cde-bb7d-5ee4cb46ddd0-catalog-content\") pod \"community-operators-6gkhp\" (UID: \"0a0eea75-7b92-4cde-bb7d-5ee4cb46ddd0\") " pod="openshift-marketplace/community-operators-6gkhp" Nov 28 10:00:10 crc kubenswrapper[4838]: I1128 10:00:10.178552 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cwtjv\" (UniqueName: \"kubernetes.io/projected/0a0eea75-7b92-4cde-bb7d-5ee4cb46ddd0-kube-api-access-cwtjv\") pod \"community-operators-6gkhp\" (UID: \"0a0eea75-7b92-4cde-bb7d-5ee4cb46ddd0\") " pod="openshift-marketplace/community-operators-6gkhp" Nov 28 10:00:10 crc kubenswrapper[4838]: I1128 10:00:10.178596 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0a0eea75-7b92-4cde-bb7d-5ee4cb46ddd0-utilities\") pod \"community-operators-6gkhp\" (UID: \"0a0eea75-7b92-4cde-bb7d-5ee4cb46ddd0\") " pod="openshift-marketplace/community-operators-6gkhp" Nov 28 10:00:10 crc kubenswrapper[4838]: I1128 10:00:10.179004 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0a0eea75-7b92-4cde-bb7d-5ee4cb46ddd0-utilities\") pod \"community-operators-6gkhp\" (UID: \"0a0eea75-7b92-4cde-bb7d-5ee4cb46ddd0\") " pod="openshift-marketplace/community-operators-6gkhp" Nov 28 10:00:10 crc kubenswrapper[4838]: E1128 10:00:10.179252 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 10:00:10.679240746 +0000 UTC m=+182.378214916 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8b7z5" (UID: "6a71181f-49a4-4b69-a3e6-2413929b81dc") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 10:00:10 crc kubenswrapper[4838]: I1128 10:00:10.179507 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0a0eea75-7b92-4cde-bb7d-5ee4cb46ddd0-catalog-content\") pod \"community-operators-6gkhp\" (UID: \"0a0eea75-7b92-4cde-bb7d-5ee4cb46ddd0\") " pod="openshift-marketplace/community-operators-6gkhp" Nov 28 10:00:10 crc kubenswrapper[4838]: I1128 10:00:10.209853 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cwtjv\" (UniqueName: \"kubernetes.io/projected/0a0eea75-7b92-4cde-bb7d-5ee4cb46ddd0-kube-api-access-cwtjv\") pod \"community-operators-6gkhp\" (UID: \"0a0eea75-7b92-4cde-bb7d-5ee4cb46ddd0\") " pod="openshift-marketplace/community-operators-6gkhp" Nov 28 10:00:10 crc kubenswrapper[4838]: I1128 10:00:10.281178 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 10:00:10 crc kubenswrapper[4838]: I1128 10:00:10.281354 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d14771c4-48e3-4efe-a5f2-31331a30979f-catalog-content\") pod \"certified-operators-zlxs4\" (UID: \"d14771c4-48e3-4efe-a5f2-31331a30979f\") " pod="openshift-marketplace/certified-operators-zlxs4" Nov 28 10:00:10 crc kubenswrapper[4838]: I1128 10:00:10.281390 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d14771c4-48e3-4efe-a5f2-31331a30979f-utilities\") pod \"certified-operators-zlxs4\" (UID: \"d14771c4-48e3-4efe-a5f2-31331a30979f\") " pod="openshift-marketplace/certified-operators-zlxs4" Nov 28 10:00:10 crc kubenswrapper[4838]: I1128 10:00:10.281432 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4vls5\" (UniqueName: \"kubernetes.io/projected/d14771c4-48e3-4efe-a5f2-31331a30979f-kube-api-access-4vls5\") pod \"certified-operators-zlxs4\" (UID: \"d14771c4-48e3-4efe-a5f2-31331a30979f\") " pod="openshift-marketplace/certified-operators-zlxs4" Nov 28 10:00:10 crc kubenswrapper[4838]: E1128 10:00:10.281546 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 10:00:10.781531331 +0000 UTC m=+182.480505501 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 10:00:10 crc kubenswrapper[4838]: I1128 10:00:10.288025 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-6gkhp" Nov 28 10:00:10 crc kubenswrapper[4838]: I1128 10:00:10.303860 4838 patch_prober.go:28] interesting pod/router-default-5444994796-zmc4g container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 28 10:00:10 crc kubenswrapper[4838]: [-]has-synced failed: reason withheld Nov 28 10:00:10 crc kubenswrapper[4838]: [+]process-running ok Nov 28 10:00:10 crc kubenswrapper[4838]: healthz check failed Nov 28 10:00:10 crc kubenswrapper[4838]: I1128 10:00:10.303913 4838 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-zmc4g" podUID="e3505c3b-461c-489c-8af1-117b3cbc433b" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 28 10:00:10 crc kubenswrapper[4838]: I1128 10:00:10.377643 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-sdvg7"] Nov 28 10:00:10 crc kubenswrapper[4838]: I1128 10:00:10.382602 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d14771c4-48e3-4efe-a5f2-31331a30979f-catalog-content\") pod \"certified-operators-zlxs4\" (UID: \"d14771c4-48e3-4efe-a5f2-31331a30979f\") " pod="openshift-marketplace/certified-operators-zlxs4" Nov 28 10:00:10 crc kubenswrapper[4838]: I1128 10:00:10.382648 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8b7z5\" (UID: \"6a71181f-49a4-4b69-a3e6-2413929b81dc\") " pod="openshift-image-registry/image-registry-697d97f7c8-8b7z5" Nov 28 10:00:10 crc kubenswrapper[4838]: I1128 10:00:10.382670 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d14771c4-48e3-4efe-a5f2-31331a30979f-utilities\") pod \"certified-operators-zlxs4\" (UID: \"d14771c4-48e3-4efe-a5f2-31331a30979f\") " pod="openshift-marketplace/certified-operators-zlxs4" Nov 28 10:00:10 crc kubenswrapper[4838]: I1128 10:00:10.382733 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4vls5\" (UniqueName: \"kubernetes.io/projected/d14771c4-48e3-4efe-a5f2-31331a30979f-kube-api-access-4vls5\") pod \"certified-operators-zlxs4\" (UID: \"d14771c4-48e3-4efe-a5f2-31331a30979f\") " pod="openshift-marketplace/certified-operators-zlxs4" Nov 28 10:00:10 crc kubenswrapper[4838]: E1128 10:00:10.383058 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 10:00:10.883040563 +0000 UTC m=+182.582014733 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8b7z5" (UID: "6a71181f-49a4-4b69-a3e6-2413929b81dc") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 10:00:10 crc kubenswrapper[4838]: I1128 10:00:10.383117 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d14771c4-48e3-4efe-a5f2-31331a30979f-utilities\") pod \"certified-operators-zlxs4\" (UID: \"d14771c4-48e3-4efe-a5f2-31331a30979f\") " pod="openshift-marketplace/certified-operators-zlxs4" Nov 28 10:00:10 crc kubenswrapper[4838]: I1128 10:00:10.383334 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d14771c4-48e3-4efe-a5f2-31331a30979f-catalog-content\") pod \"certified-operators-zlxs4\" (UID: \"d14771c4-48e3-4efe-a5f2-31331a30979f\") " pod="openshift-marketplace/certified-operators-zlxs4" Nov 28 10:00:10 crc kubenswrapper[4838]: I1128 10:00:10.395197 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-bcnzk"] Nov 28 10:00:10 crc kubenswrapper[4838]: I1128 10:00:10.396277 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-bcnzk" Nov 28 10:00:10 crc kubenswrapper[4838]: I1128 10:00:10.406301 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4vls5\" (UniqueName: \"kubernetes.io/projected/d14771c4-48e3-4efe-a5f2-31331a30979f-kube-api-access-4vls5\") pod \"certified-operators-zlxs4\" (UID: \"d14771c4-48e3-4efe-a5f2-31331a30979f\") " pod="openshift-marketplace/certified-operators-zlxs4" Nov 28 10:00:10 crc kubenswrapper[4838]: I1128 10:00:10.410153 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-bcnzk"] Nov 28 10:00:10 crc kubenswrapper[4838]: I1128 10:00:10.479009 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-zlxs4" Nov 28 10:00:10 crc kubenswrapper[4838]: I1128 10:00:10.483435 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 10:00:10 crc kubenswrapper[4838]: E1128 10:00:10.483577 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 10:00:10.983554096 +0000 UTC m=+182.682528256 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 10:00:10 crc kubenswrapper[4838]: I1128 10:00:10.483660 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e35b5813-d7b9-4cbc-b002-44d465476046-catalog-content\") pod \"community-operators-bcnzk\" (UID: \"e35b5813-d7b9-4cbc-b002-44d465476046\") " pod="openshift-marketplace/community-operators-bcnzk" Nov 28 10:00:10 crc kubenswrapper[4838]: I1128 10:00:10.483689 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k6g2v\" (UniqueName: \"kubernetes.io/projected/e35b5813-d7b9-4cbc-b002-44d465476046-kube-api-access-k6g2v\") pod \"community-operators-bcnzk\" (UID: \"e35b5813-d7b9-4cbc-b002-44d465476046\") " pod="openshift-marketplace/community-operators-bcnzk" Nov 28 10:00:10 crc kubenswrapper[4838]: I1128 10:00:10.483731 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8b7z5\" (UID: \"6a71181f-49a4-4b69-a3e6-2413929b81dc\") " pod="openshift-image-registry/image-registry-697d97f7c8-8b7z5" Nov 28 10:00:10 crc kubenswrapper[4838]: I1128 10:00:10.483763 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e35b5813-d7b9-4cbc-b002-44d465476046-utilities\") pod \"community-operators-bcnzk\" (UID: \"e35b5813-d7b9-4cbc-b002-44d465476046\") " pod="openshift-marketplace/community-operators-bcnzk" Nov 28 10:00:10 crc kubenswrapper[4838]: E1128 10:00:10.484025 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 10:00:10.984015294 +0000 UTC m=+182.682989474 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8b7z5" (UID: "6a71181f-49a4-4b69-a3e6-2413929b81dc") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 10:00:10 crc kubenswrapper[4838]: I1128 10:00:10.585551 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 10:00:10 crc kubenswrapper[4838]: E1128 10:00:10.585741 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 10:00:11.085703164 +0000 UTC m=+182.784677324 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 10:00:10 crc kubenswrapper[4838]: I1128 10:00:10.585946 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e35b5813-d7b9-4cbc-b002-44d465476046-catalog-content\") pod \"community-operators-bcnzk\" (UID: \"e35b5813-d7b9-4cbc-b002-44d465476046\") " pod="openshift-marketplace/community-operators-bcnzk" Nov 28 10:00:10 crc kubenswrapper[4838]: I1128 10:00:10.585983 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k6g2v\" (UniqueName: \"kubernetes.io/projected/e35b5813-d7b9-4cbc-b002-44d465476046-kube-api-access-k6g2v\") pod \"community-operators-bcnzk\" (UID: \"e35b5813-d7b9-4cbc-b002-44d465476046\") " pod="openshift-marketplace/community-operators-bcnzk" Nov 28 10:00:10 crc kubenswrapper[4838]: I1128 10:00:10.586024 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8b7z5\" (UID: \"6a71181f-49a4-4b69-a3e6-2413929b81dc\") " pod="openshift-image-registry/image-registry-697d97f7c8-8b7z5" Nov 28 10:00:10 crc kubenswrapper[4838]: I1128 10:00:10.586046 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e35b5813-d7b9-4cbc-b002-44d465476046-utilities\") pod \"community-operators-bcnzk\" (UID: \"e35b5813-d7b9-4cbc-b002-44d465476046\") " pod="openshift-marketplace/community-operators-bcnzk" Nov 28 10:00:10 crc kubenswrapper[4838]: I1128 10:00:10.586360 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e35b5813-d7b9-4cbc-b002-44d465476046-catalog-content\") pod \"community-operators-bcnzk\" (UID: \"e35b5813-d7b9-4cbc-b002-44d465476046\") " pod="openshift-marketplace/community-operators-bcnzk" Nov 28 10:00:10 crc kubenswrapper[4838]: I1128 10:00:10.586505 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e35b5813-d7b9-4cbc-b002-44d465476046-utilities\") pod \"community-operators-bcnzk\" (UID: \"e35b5813-d7b9-4cbc-b002-44d465476046\") " pod="openshift-marketplace/community-operators-bcnzk" Nov 28 10:00:10 crc kubenswrapper[4838]: E1128 10:00:10.586535 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 10:00:11.086511078 +0000 UTC m=+182.785485288 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8b7z5" (UID: "6a71181f-49a4-4b69-a3e6-2413929b81dc") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 10:00:10 crc kubenswrapper[4838]: I1128 10:00:10.613327 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k6g2v\" (UniqueName: \"kubernetes.io/projected/e35b5813-d7b9-4cbc-b002-44d465476046-kube-api-access-k6g2v\") pod \"community-operators-bcnzk\" (UID: \"e35b5813-d7b9-4cbc-b002-44d465476046\") " pod="openshift-marketplace/community-operators-bcnzk" Nov 28 10:00:10 crc kubenswrapper[4838]: I1128 10:00:10.686691 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 10:00:10 crc kubenswrapper[4838]: E1128 10:00:10.687110 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 10:00:11.187087313 +0000 UTC m=+182.886061513 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 10:00:10 crc kubenswrapper[4838]: I1128 10:00:10.687217 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8b7z5\" (UID: \"6a71181f-49a4-4b69-a3e6-2413929b81dc\") " pod="openshift-image-registry/image-registry-697d97f7c8-8b7z5" Nov 28 10:00:10 crc kubenswrapper[4838]: E1128 10:00:10.687659 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 10:00:11.187645205 +0000 UTC m=+182.886619405 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8b7z5" (UID: "6a71181f-49a4-4b69-a3e6-2413929b81dc") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 10:00:10 crc kubenswrapper[4838]: I1128 10:00:10.713638 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-bcnzk" Nov 28 10:00:10 crc kubenswrapper[4838]: I1128 10:00:10.788765 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 10:00:10 crc kubenswrapper[4838]: E1128 10:00:10.788946 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 10:00:11.288919908 +0000 UTC m=+182.987894078 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 10:00:10 crc kubenswrapper[4838]: I1128 10:00:10.788998 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8b7z5\" (UID: \"6a71181f-49a4-4b69-a3e6-2413929b81dc\") " pod="openshift-image-registry/image-registry-697d97f7c8-8b7z5" Nov 28 10:00:10 crc kubenswrapper[4838]: E1128 10:00:10.789301 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 10:00:11.289289693 +0000 UTC m=+182.988263863 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8b7z5" (UID: "6a71181f-49a4-4b69-a3e6-2413929b81dc") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 10:00:10 crc kubenswrapper[4838]: I1128 10:00:10.890374 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 10:00:10 crc kubenswrapper[4838]: E1128 10:00:10.890707 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 10:00:11.390693661 +0000 UTC m=+183.089667831 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 10:00:10 crc kubenswrapper[4838]: I1128 10:00:10.977588 4838 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-7l5mk container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.34:5443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 28 10:00:10 crc kubenswrapper[4838]: I1128 10:00:10.977640 4838 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-7l5mk" podUID="152c1627-347b-483f-aa32-a65e67de0e5e" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.34:5443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Nov 28 10:00:10 crc kubenswrapper[4838]: I1128 10:00:10.978482 4838 patch_prober.go:28] interesting pod/oauth-openshift-558db77b4-df5f6 container/oauth-openshift namespace/openshift-authentication: Readiness probe status=failure output="Get \"https://10.217.0.17:6443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 28 10:00:10 crc kubenswrapper[4838]: I1128 10:00:10.978511 4838 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-authentication/oauth-openshift-558db77b4-df5f6" podUID="8d6cc687-8b13-44b1-a15b-488c17e8b50c" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.17:6443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Nov 28 10:00:10 crc kubenswrapper[4838]: I1128 10:00:10.986649 4838 patch_prober.go:28] interesting pod/openshift-config-operator-7777fb866f-z695f container/openshift-config-operator namespace/openshift-config-operator: Readiness probe status=failure output="Get \"https://10.217.0.13:8443/healthz\": dial tcp 10.217.0.13:8443: connect: connection refused" start-of-body= Nov 28 10:00:10 crc kubenswrapper[4838]: I1128 10:00:10.986701 4838 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-config-operator/openshift-config-operator-7777fb866f-z695f" podUID="fdac71d5-8b14-4c59-9d37-345456b26b36" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.13:8443/healthz\": dial tcp 10.217.0.13:8443: connect: connection refused" Nov 28 10:00:10 crc kubenswrapper[4838]: I1128 10:00:10.991678 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8b7z5\" (UID: \"6a71181f-49a4-4b69-a3e6-2413929b81dc\") " pod="openshift-image-registry/image-registry-697d97f7c8-8b7z5" Nov 28 10:00:10 crc kubenswrapper[4838]: E1128 10:00:10.991995 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 10:00:11.491983486 +0000 UTC m=+183.190957656 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8b7z5" (UID: "6a71181f-49a4-4b69-a3e6-2413929b81dc") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 10:00:11 crc kubenswrapper[4838]: I1128 10:00:11.093229 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 10:00:11 crc kubenswrapper[4838]: E1128 10:00:11.093566 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 10:00:11.593160435 +0000 UTC m=+183.292134645 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 10:00:11 crc kubenswrapper[4838]: I1128 10:00:11.094183 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8b7z5\" (UID: \"6a71181f-49a4-4b69-a3e6-2413929b81dc\") " pod="openshift-image-registry/image-registry-697d97f7c8-8b7z5" Nov 28 10:00:11 crc kubenswrapper[4838]: E1128 10:00:11.095821 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 10:00:11.595804533 +0000 UTC m=+183.294778703 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8b7z5" (UID: "6a71181f-49a4-4b69-a3e6-2413929b81dc") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 10:00:11 crc kubenswrapper[4838]: I1128 10:00:11.157655 4838 patch_prober.go:28] interesting pod/openshift-config-operator-7777fb866f-z695f container/openshift-config-operator namespace/openshift-config-operator: Readiness probe status=failure output="Get \"https://10.217.0.13:8443/healthz\": dial tcp 10.217.0.13:8443: connect: connection refused" start-of-body= Nov 28 10:00:11 crc kubenswrapper[4838]: I1128 10:00:11.157751 4838 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-config-operator/openshift-config-operator-7777fb866f-z695f" podUID="fdac71d5-8b14-4c59-9d37-345456b26b36" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.13:8443/healthz\": dial tcp 10.217.0.13:8443: connect: connection refused" Nov 28 10:00:11 crc kubenswrapper[4838]: I1128 10:00:11.157873 4838 patch_prober.go:28] interesting pod/openshift-config-operator-7777fb866f-z695f container/openshift-config-operator namespace/openshift-config-operator: Liveness probe status=failure output="Get \"https://10.217.0.13:8443/healthz\": dial tcp 10.217.0.13:8443: connect: connection refused" start-of-body= Nov 28 10:00:11 crc kubenswrapper[4838]: I1128 10:00:11.157977 4838 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-config-operator/openshift-config-operator-7777fb866f-z695f" podUID="fdac71d5-8b14-4c59-9d37-345456b26b36" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.13:8443/healthz\": dial tcp 10.217.0.13:8443: connect: connection refused" Nov 28 10:00:11 crc kubenswrapper[4838]: I1128 10:00:11.196418 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 10:00:11 crc kubenswrapper[4838]: E1128 10:00:11.196834 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 10:00:11.696814885 +0000 UTC m=+183.395789055 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 10:00:11 crc kubenswrapper[4838]: I1128 10:00:11.284520 4838 patch_prober.go:28] interesting pod/router-default-5444994796-zmc4g container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 28 10:00:11 crc kubenswrapper[4838]: [-]has-synced failed: reason withheld Nov 28 10:00:11 crc kubenswrapper[4838]: [+]process-running ok Nov 28 10:00:11 crc kubenswrapper[4838]: healthz check failed Nov 28 10:00:11 crc kubenswrapper[4838]: I1128 10:00:11.284580 4838 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-zmc4g" podUID="e3505c3b-461c-489c-8af1-117b3cbc433b" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 28 10:00:11 crc kubenswrapper[4838]: I1128 10:00:11.298022 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8b7z5\" (UID: \"6a71181f-49a4-4b69-a3e6-2413929b81dc\") " pod="openshift-image-registry/image-registry-697d97f7c8-8b7z5" Nov 28 10:00:11 crc kubenswrapper[4838]: E1128 10:00:11.298340 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 10:00:11.798325718 +0000 UTC m=+183.497299888 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8b7z5" (UID: "6a71181f-49a4-4b69-a3e6-2413929b81dc") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 10:00:11 crc kubenswrapper[4838]: I1128 10:00:11.398950 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 10:00:11 crc kubenswrapper[4838]: E1128 10:00:11.399327 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 10:00:11.899307999 +0000 UTC m=+183.598282169 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 10:00:11 crc kubenswrapper[4838]: I1128 10:00:11.500127 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8b7z5\" (UID: \"6a71181f-49a4-4b69-a3e6-2413929b81dc\") " pod="openshift-image-registry/image-registry-697d97f7c8-8b7z5" Nov 28 10:00:11 crc kubenswrapper[4838]: E1128 10:00:11.500450 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 10:00:12.000435876 +0000 UTC m=+183.699410046 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8b7z5" (UID: "6a71181f-49a4-4b69-a3e6-2413929b81dc") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 10:00:11 crc kubenswrapper[4838]: I1128 10:00:11.601231 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 10:00:11 crc kubenswrapper[4838]: E1128 10:00:11.601495 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 10:00:12.10147841 +0000 UTC m=+183.800452580 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 10:00:11 crc kubenswrapper[4838]: I1128 10:00:11.702599 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8b7z5\" (UID: \"6a71181f-49a4-4b69-a3e6-2413929b81dc\") " pod="openshift-image-registry/image-registry-697d97f7c8-8b7z5" Nov 28 10:00:11 crc kubenswrapper[4838]: E1128 10:00:11.702941 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 10:00:12.20292753 +0000 UTC m=+183.901901700 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8b7z5" (UID: "6a71181f-49a4-4b69-a3e6-2413929b81dc") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 10:00:11 crc kubenswrapper[4838]: I1128 10:00:11.802982 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 10:00:11 crc kubenswrapper[4838]: E1128 10:00:11.803267 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 10:00:12.303249485 +0000 UTC m=+184.002223655 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 10:00:11 crc kubenswrapper[4838]: I1128 10:00:11.904465 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8b7z5\" (UID: \"6a71181f-49a4-4b69-a3e6-2413929b81dc\") " pod="openshift-image-registry/image-registry-697d97f7c8-8b7z5" Nov 28 10:00:11 crc kubenswrapper[4838]: E1128 10:00:11.904852 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 10:00:12.404839021 +0000 UTC m=+184.103813191 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8b7z5" (UID: "6a71181f-49a4-4b69-a3e6-2413929b81dc") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 10:00:11 crc kubenswrapper[4838]: I1128 10:00:11.962043 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-vmqxd"] Nov 28 10:00:11 crc kubenswrapper[4838]: I1128 10:00:11.963110 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-vmqxd" Nov 28 10:00:11 crc kubenswrapper[4838]: I1128 10:00:11.965215 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Nov 28 10:00:11 crc kubenswrapper[4838]: I1128 10:00:11.972432 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-vmqxd"] Nov 28 10:00:11 crc kubenswrapper[4838]: I1128 10:00:11.985020 4838 patch_prober.go:28] interesting pod/downloads-7954f5f757-2w9k9 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.9:8080/\": dial tcp 10.217.0.9:8080: connect: connection refused" start-of-body= Nov 28 10:00:11 crc kubenswrapper[4838]: I1128 10:00:11.985088 4838 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-2w9k9" podUID="ebabbe26-3c09-4a23-8dcc-aed864a3e4a4" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.9:8080/\": dial tcp 10.217.0.9:8080: connect: connection refused" Nov 28 10:00:11 crc kubenswrapper[4838]: I1128 10:00:11.985668 4838 patch_prober.go:28] interesting pod/downloads-7954f5f757-2w9k9 container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.9:8080/\": dial tcp 10.217.0.9:8080: connect: connection refused" start-of-body= Nov 28 10:00:11 crc kubenswrapper[4838]: I1128 10:00:11.985708 4838 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-2w9k9" podUID="ebabbe26-3c09-4a23-8dcc-aed864a3e4a4" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.9:8080/\": dial tcp 10.217.0.9:8080: connect: connection refused" Nov 28 10:00:11 crc kubenswrapper[4838]: I1128 10:00:11.985964 4838 patch_prober.go:28] interesting pod/oauth-openshift-558db77b4-df5f6 container/oauth-openshift namespace/openshift-authentication: Readiness probe status=failure output="Get \"https://10.217.0.17:6443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 28 10:00:11 crc kubenswrapper[4838]: I1128 10:00:11.986000 4838 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-authentication/oauth-openshift-558db77b4-df5f6" podUID="8d6cc687-8b13-44b1-a15b-488c17e8b50c" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.17:6443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Nov 28 10:00:11 crc kubenswrapper[4838]: I1128 10:00:11.986180 4838 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-7l5mk container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.34:5443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 28 10:00:11 crc kubenswrapper[4838]: I1128 10:00:11.986222 4838 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-7l5mk" podUID="152c1627-347b-483f-aa32-a65e67de0e5e" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.34:5443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Nov 28 10:00:12 crc kubenswrapper[4838]: I1128 10:00:12.005842 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 10:00:12 crc kubenswrapper[4838]: I1128 10:00:12.006405 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bf9fc775-df38-4de1-b17a-d093a477938a-utilities\") pod \"redhat-marketplace-vmqxd\" (UID: \"bf9fc775-df38-4de1-b17a-d093a477938a\") " pod="openshift-marketplace/redhat-marketplace-vmqxd" Nov 28 10:00:12 crc kubenswrapper[4838]: I1128 10:00:12.006479 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rgsp8\" (UniqueName: \"kubernetes.io/projected/bf9fc775-df38-4de1-b17a-d093a477938a-kube-api-access-rgsp8\") pod \"redhat-marketplace-vmqxd\" (UID: \"bf9fc775-df38-4de1-b17a-d093a477938a\") " pod="openshift-marketplace/redhat-marketplace-vmqxd" Nov 28 10:00:12 crc kubenswrapper[4838]: I1128 10:00:12.006552 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bf9fc775-df38-4de1-b17a-d093a477938a-catalog-content\") pod \"redhat-marketplace-vmqxd\" (UID: \"bf9fc775-df38-4de1-b17a-d093a477938a\") " pod="openshift-marketplace/redhat-marketplace-vmqxd" Nov 28 10:00:12 crc kubenswrapper[4838]: E1128 10:00:12.006782 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 10:00:12.506754911 +0000 UTC m=+184.205729081 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 10:00:12 crc kubenswrapper[4838]: I1128 10:00:12.123256 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bf9fc775-df38-4de1-b17a-d093a477938a-catalog-content\") pod \"redhat-marketplace-vmqxd\" (UID: \"bf9fc775-df38-4de1-b17a-d093a477938a\") " pod="openshift-marketplace/redhat-marketplace-vmqxd" Nov 28 10:00:12 crc kubenswrapper[4838]: I1128 10:00:12.123311 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8b7z5\" (UID: \"6a71181f-49a4-4b69-a3e6-2413929b81dc\") " pod="openshift-image-registry/image-registry-697d97f7c8-8b7z5" Nov 28 10:00:12 crc kubenswrapper[4838]: I1128 10:00:12.123349 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bf9fc775-df38-4de1-b17a-d093a477938a-utilities\") pod \"redhat-marketplace-vmqxd\" (UID: \"bf9fc775-df38-4de1-b17a-d093a477938a\") " pod="openshift-marketplace/redhat-marketplace-vmqxd" Nov 28 10:00:12 crc kubenswrapper[4838]: I1128 10:00:12.123389 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rgsp8\" (UniqueName: \"kubernetes.io/projected/bf9fc775-df38-4de1-b17a-d093a477938a-kube-api-access-rgsp8\") pod \"redhat-marketplace-vmqxd\" (UID: \"bf9fc775-df38-4de1-b17a-d093a477938a\") " pod="openshift-marketplace/redhat-marketplace-vmqxd" Nov 28 10:00:12 crc kubenswrapper[4838]: E1128 10:00:12.123837 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 10:00:12.623826316 +0000 UTC m=+184.322800486 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8b7z5" (UID: "6a71181f-49a4-4b69-a3e6-2413929b81dc") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 10:00:12 crc kubenswrapper[4838]: I1128 10:00:12.124183 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bf9fc775-df38-4de1-b17a-d093a477938a-utilities\") pod \"redhat-marketplace-vmqxd\" (UID: \"bf9fc775-df38-4de1-b17a-d093a477938a\") " pod="openshift-marketplace/redhat-marketplace-vmqxd" Nov 28 10:00:12 crc kubenswrapper[4838]: I1128 10:00:12.125504 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bf9fc775-df38-4de1-b17a-d093a477938a-catalog-content\") pod \"redhat-marketplace-vmqxd\" (UID: \"bf9fc775-df38-4de1-b17a-d093a477938a\") " pod="openshift-marketplace/redhat-marketplace-vmqxd" Nov 28 10:00:12 crc kubenswrapper[4838]: I1128 10:00:12.143960 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rgsp8\" (UniqueName: \"kubernetes.io/projected/bf9fc775-df38-4de1-b17a-d093a477938a-kube-api-access-rgsp8\") pod \"redhat-marketplace-vmqxd\" (UID: \"bf9fc775-df38-4de1-b17a-d093a477938a\") " pod="openshift-marketplace/redhat-marketplace-vmqxd" Nov 28 10:00:12 crc kubenswrapper[4838]: I1128 10:00:12.224359 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 10:00:12 crc kubenswrapper[4838]: E1128 10:00:12.225042 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 10:00:12.725014505 +0000 UTC m=+184.423988715 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 10:00:12 crc kubenswrapper[4838]: I1128 10:00:12.285380 4838 patch_prober.go:28] interesting pod/router-default-5444994796-zmc4g container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 28 10:00:12 crc kubenswrapper[4838]: [-]has-synced failed: reason withheld Nov 28 10:00:12 crc kubenswrapper[4838]: [+]process-running ok Nov 28 10:00:12 crc kubenswrapper[4838]: healthz check failed Nov 28 10:00:12 crc kubenswrapper[4838]: I1128 10:00:12.285481 4838 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-zmc4g" podUID="e3505c3b-461c-489c-8af1-117b3cbc433b" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 28 10:00:12 crc kubenswrapper[4838]: I1128 10:00:12.292236 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-vmqxd" Nov 28 10:00:12 crc kubenswrapper[4838]: I1128 10:00:12.328485 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8b7z5\" (UID: \"6a71181f-49a4-4b69-a3e6-2413929b81dc\") " pod="openshift-image-registry/image-registry-697d97f7c8-8b7z5" Nov 28 10:00:12 crc kubenswrapper[4838]: E1128 10:00:12.328978 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 10:00:12.828959417 +0000 UTC m=+184.527933617 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8b7z5" (UID: "6a71181f-49a4-4b69-a3e6-2413929b81dc") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 10:00:12 crc kubenswrapper[4838]: I1128 10:00:12.364177 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-d2lkr"] Nov 28 10:00:12 crc kubenswrapper[4838]: I1128 10:00:12.368164 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-d2lkr" Nov 28 10:00:12 crc kubenswrapper[4838]: I1128 10:00:12.370476 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-d2lkr"] Nov 28 10:00:12 crc kubenswrapper[4838]: I1128 10:00:12.430020 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 10:00:12 crc kubenswrapper[4838]: E1128 10:00:12.430167 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 10:00:12.930143087 +0000 UTC m=+184.629117257 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 10:00:12 crc kubenswrapper[4838]: I1128 10:00:12.430394 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0ee3d402-f8eb-4319-aacc-f5c15fd7dc49-utilities\") pod \"redhat-marketplace-d2lkr\" (UID: \"0ee3d402-f8eb-4319-aacc-f5c15fd7dc49\") " pod="openshift-marketplace/redhat-marketplace-d2lkr" Nov 28 10:00:12 crc kubenswrapper[4838]: I1128 10:00:12.430444 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8b7z5\" (UID: \"6a71181f-49a4-4b69-a3e6-2413929b81dc\") " pod="openshift-image-registry/image-registry-697d97f7c8-8b7z5" Nov 28 10:00:12 crc kubenswrapper[4838]: I1128 10:00:12.430571 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0ee3d402-f8eb-4319-aacc-f5c15fd7dc49-catalog-content\") pod \"redhat-marketplace-d2lkr\" (UID: \"0ee3d402-f8eb-4319-aacc-f5c15fd7dc49\") " pod="openshift-marketplace/redhat-marketplace-d2lkr" Nov 28 10:00:12 crc kubenswrapper[4838]: I1128 10:00:12.430609 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sbcft\" (UniqueName: \"kubernetes.io/projected/0ee3d402-f8eb-4319-aacc-f5c15fd7dc49-kube-api-access-sbcft\") pod \"redhat-marketplace-d2lkr\" (UID: \"0ee3d402-f8eb-4319-aacc-f5c15fd7dc49\") " pod="openshift-marketplace/redhat-marketplace-d2lkr" Nov 28 10:00:12 crc kubenswrapper[4838]: E1128 10:00:12.431078 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 10:00:12.931058284 +0000 UTC m=+184.630032484 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8b7z5" (UID: "6a71181f-49a4-4b69-a3e6-2413929b81dc") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 10:00:12 crc kubenswrapper[4838]: I1128 10:00:12.531998 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 10:00:12 crc kubenswrapper[4838]: I1128 10:00:12.532378 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0ee3d402-f8eb-4319-aacc-f5c15fd7dc49-catalog-content\") pod \"redhat-marketplace-d2lkr\" (UID: \"0ee3d402-f8eb-4319-aacc-f5c15fd7dc49\") " pod="openshift-marketplace/redhat-marketplace-d2lkr" Nov 28 10:00:12 crc kubenswrapper[4838]: I1128 10:00:12.532413 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sbcft\" (UniqueName: \"kubernetes.io/projected/0ee3d402-f8eb-4319-aacc-f5c15fd7dc49-kube-api-access-sbcft\") pod \"redhat-marketplace-d2lkr\" (UID: \"0ee3d402-f8eb-4319-aacc-f5c15fd7dc49\") " pod="openshift-marketplace/redhat-marketplace-d2lkr" Nov 28 10:00:12 crc kubenswrapper[4838]: I1128 10:00:12.532459 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0ee3d402-f8eb-4319-aacc-f5c15fd7dc49-utilities\") pod \"redhat-marketplace-d2lkr\" (UID: \"0ee3d402-f8eb-4319-aacc-f5c15fd7dc49\") " pod="openshift-marketplace/redhat-marketplace-d2lkr" Nov 28 10:00:12 crc kubenswrapper[4838]: I1128 10:00:12.532910 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0ee3d402-f8eb-4319-aacc-f5c15fd7dc49-utilities\") pod \"redhat-marketplace-d2lkr\" (UID: \"0ee3d402-f8eb-4319-aacc-f5c15fd7dc49\") " pod="openshift-marketplace/redhat-marketplace-d2lkr" Nov 28 10:00:12 crc kubenswrapper[4838]: E1128 10:00:12.532997 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 10:00:13.032978974 +0000 UTC m=+184.731953144 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 10:00:12 crc kubenswrapper[4838]: I1128 10:00:12.533251 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0ee3d402-f8eb-4319-aacc-f5c15fd7dc49-catalog-content\") pod \"redhat-marketplace-d2lkr\" (UID: \"0ee3d402-f8eb-4319-aacc-f5c15fd7dc49\") " pod="openshift-marketplace/redhat-marketplace-d2lkr" Nov 28 10:00:12 crc kubenswrapper[4838]: I1128 10:00:12.563564 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sbcft\" (UniqueName: \"kubernetes.io/projected/0ee3d402-f8eb-4319-aacc-f5c15fd7dc49-kube-api-access-sbcft\") pod \"redhat-marketplace-d2lkr\" (UID: \"0ee3d402-f8eb-4319-aacc-f5c15fd7dc49\") " pod="openshift-marketplace/redhat-marketplace-d2lkr" Nov 28 10:00:12 crc kubenswrapper[4838]: E1128 10:00:12.634658 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 10:00:13.134638322 +0000 UTC m=+184.833612522 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8b7z5" (UID: "6a71181f-49a4-4b69-a3e6-2413929b81dc") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 10:00:12 crc kubenswrapper[4838]: I1128 10:00:12.634158 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8b7z5\" (UID: \"6a71181f-49a4-4b69-a3e6-2413929b81dc\") " pod="openshift-image-registry/image-registry-697d97f7c8-8b7z5" Nov 28 10:00:12 crc kubenswrapper[4838]: I1128 10:00:12.699790 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-d2lkr" Nov 28 10:00:12 crc kubenswrapper[4838]: I1128 10:00:12.769165 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 10:00:12 crc kubenswrapper[4838]: E1128 10:00:12.769503 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 10:00:13.269472779 +0000 UTC m=+184.968446949 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 10:00:12 crc kubenswrapper[4838]: I1128 10:00:12.769788 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8b7z5\" (UID: \"6a71181f-49a4-4b69-a3e6-2413929b81dc\") " pod="openshift-image-registry/image-registry-697d97f7c8-8b7z5" Nov 28 10:00:12 crc kubenswrapper[4838]: E1128 10:00:12.770213 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 10:00:13.270198248 +0000 UTC m=+184.969172428 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8b7z5" (UID: "6a71181f-49a4-4b69-a3e6-2413929b81dc") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 10:00:12 crc kubenswrapper[4838]: I1128 10:00:12.877075 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 10:00:12 crc kubenswrapper[4838]: E1128 10:00:12.877568 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 10:00:13.377540808 +0000 UTC m=+185.076514978 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 10:00:12 crc kubenswrapper[4838]: I1128 10:00:12.878399 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8b7z5\" (UID: \"6a71181f-49a4-4b69-a3e6-2413929b81dc\") " pod="openshift-image-registry/image-registry-697d97f7c8-8b7z5" Nov 28 10:00:12 crc kubenswrapper[4838]: E1128 10:00:12.881960 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 10:00:13.381945766 +0000 UTC m=+185.080919926 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8b7z5" (UID: "6a71181f-49a4-4b69-a3e6-2413929b81dc") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 10:00:12 crc kubenswrapper[4838]: I1128 10:00:12.965257 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-mcgzz"] Nov 28 10:00:12 crc kubenswrapper[4838]: I1128 10:00:12.968218 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-mcgzz" Nov 28 10:00:12 crc kubenswrapper[4838]: I1128 10:00:12.970962 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Nov 28 10:00:12 crc kubenswrapper[4838]: I1128 10:00:12.979248 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 10:00:12 crc kubenswrapper[4838]: E1128 10:00:12.979583 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 10:00:13.479568902 +0000 UTC m=+185.178543072 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 10:00:12 crc kubenswrapper[4838]: I1128 10:00:12.980261 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-mcgzz"] Nov 28 10:00:13 crc kubenswrapper[4838]: I1128 10:00:13.012080 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-sdvg7" event={"ID":"e1f1e017-546a-4f0b-965e-bd050ad48e44","Type":"ContainerStarted","Data":"2d27f2b6be67f789c4d44de8532764dd99dbebc82d3e4a72a69a947a62caffad"} Nov 28 10:00:13 crc kubenswrapper[4838]: I1128 10:00:13.080325 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/2a223cc8-af33-4e83-8bfc-2676c5700447-metrics-certs\") pod \"network-metrics-daemon-p69l6\" (UID: \"2a223cc8-af33-4e83-8bfc-2676c5700447\") " pod="openshift-multus/network-metrics-daemon-p69l6" Nov 28 10:00:13 crc kubenswrapper[4838]: I1128 10:00:13.080373 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7mw6c\" (UniqueName: \"kubernetes.io/projected/ac5ded1f-10ca-4db6-b3a6-80f30f28cb34-kube-api-access-7mw6c\") pod \"redhat-operators-mcgzz\" (UID: \"ac5ded1f-10ca-4db6-b3a6-80f30f28cb34\") " pod="openshift-marketplace/redhat-operators-mcgzz" Nov 28 10:00:13 crc kubenswrapper[4838]: I1128 10:00:13.080395 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ac5ded1f-10ca-4db6-b3a6-80f30f28cb34-catalog-content\") pod \"redhat-operators-mcgzz\" (UID: \"ac5ded1f-10ca-4db6-b3a6-80f30f28cb34\") " pod="openshift-marketplace/redhat-operators-mcgzz" Nov 28 10:00:13 crc kubenswrapper[4838]: I1128 10:00:13.080420 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ac5ded1f-10ca-4db6-b3a6-80f30f28cb34-utilities\") pod \"redhat-operators-mcgzz\" (UID: \"ac5ded1f-10ca-4db6-b3a6-80f30f28cb34\") " pod="openshift-marketplace/redhat-operators-mcgzz" Nov 28 10:00:13 crc kubenswrapper[4838]: I1128 10:00:13.080451 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8b7z5\" (UID: \"6a71181f-49a4-4b69-a3e6-2413929b81dc\") " pod="openshift-image-registry/image-registry-697d97f7c8-8b7z5" Nov 28 10:00:13 crc kubenswrapper[4838]: E1128 10:00:13.080743 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 10:00:13.580699319 +0000 UTC m=+185.279673489 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8b7z5" (UID: "6a71181f-49a4-4b69-a3e6-2413929b81dc") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 10:00:13 crc kubenswrapper[4838]: I1128 10:00:13.082128 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-secret" Nov 28 10:00:13 crc kubenswrapper[4838]: I1128 10:00:13.098529 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/2a223cc8-af33-4e83-8bfc-2676c5700447-metrics-certs\") pod \"network-metrics-daemon-p69l6\" (UID: \"2a223cc8-af33-4e83-8bfc-2676c5700447\") " pod="openshift-multus/network-metrics-daemon-p69l6" Nov 28 10:00:13 crc kubenswrapper[4838]: I1128 10:00:13.181346 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 10:00:13 crc kubenswrapper[4838]: E1128 10:00:13.181522 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 10:00:13.681496973 +0000 UTC m=+185.380471143 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 10:00:13 crc kubenswrapper[4838]: I1128 10:00:13.181622 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ac5ded1f-10ca-4db6-b3a6-80f30f28cb34-utilities\") pod \"redhat-operators-mcgzz\" (UID: \"ac5ded1f-10ca-4db6-b3a6-80f30f28cb34\") " pod="openshift-marketplace/redhat-operators-mcgzz" Nov 28 10:00:13 crc kubenswrapper[4838]: I1128 10:00:13.181666 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8b7z5\" (UID: \"6a71181f-49a4-4b69-a3e6-2413929b81dc\") " pod="openshift-image-registry/image-registry-697d97f7c8-8b7z5" Nov 28 10:00:13 crc kubenswrapper[4838]: I1128 10:00:13.181742 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7mw6c\" (UniqueName: \"kubernetes.io/projected/ac5ded1f-10ca-4db6-b3a6-80f30f28cb34-kube-api-access-7mw6c\") pod \"redhat-operators-mcgzz\" (UID: \"ac5ded1f-10ca-4db6-b3a6-80f30f28cb34\") " pod="openshift-marketplace/redhat-operators-mcgzz" Nov 28 10:00:13 crc kubenswrapper[4838]: I1128 10:00:13.181766 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ac5ded1f-10ca-4db6-b3a6-80f30f28cb34-catalog-content\") pod \"redhat-operators-mcgzz\" (UID: \"ac5ded1f-10ca-4db6-b3a6-80f30f28cb34\") " pod="openshift-marketplace/redhat-operators-mcgzz" Nov 28 10:00:13 crc kubenswrapper[4838]: I1128 10:00:13.182131 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ac5ded1f-10ca-4db6-b3a6-80f30f28cb34-utilities\") pod \"redhat-operators-mcgzz\" (UID: \"ac5ded1f-10ca-4db6-b3a6-80f30f28cb34\") " pod="openshift-marketplace/redhat-operators-mcgzz" Nov 28 10:00:13 crc kubenswrapper[4838]: E1128 10:00:13.182319 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 10:00:13.682301945 +0000 UTC m=+185.381276115 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8b7z5" (UID: "6a71181f-49a4-4b69-a3e6-2413929b81dc") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 10:00:13 crc kubenswrapper[4838]: I1128 10:00:13.182452 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ac5ded1f-10ca-4db6-b3a6-80f30f28cb34-catalog-content\") pod \"redhat-operators-mcgzz\" (UID: \"ac5ded1f-10ca-4db6-b3a6-80f30f28cb34\") " pod="openshift-marketplace/redhat-operators-mcgzz" Nov 28 10:00:13 crc kubenswrapper[4838]: I1128 10:00:13.196688 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7mw6c\" (UniqueName: \"kubernetes.io/projected/ac5ded1f-10ca-4db6-b3a6-80f30f28cb34-kube-api-access-7mw6c\") pod \"redhat-operators-mcgzz\" (UID: \"ac5ded1f-10ca-4db6-b3a6-80f30f28cb34\") " pod="openshift-marketplace/redhat-operators-mcgzz" Nov 28 10:00:13 crc kubenswrapper[4838]: I1128 10:00:13.234363 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-zlxs4"] Nov 28 10:00:13 crc kubenswrapper[4838]: I1128 10:00:13.276510 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-558db77b4-df5f6" Nov 28 10:00:13 crc kubenswrapper[4838]: I1128 10:00:13.283027 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 10:00:13 crc kubenswrapper[4838]: E1128 10:00:13.283341 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 10:00:13.783322158 +0000 UTC m=+185.482296328 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 10:00:13 crc kubenswrapper[4838]: I1128 10:00:13.285812 4838 patch_prober.go:28] interesting pod/router-default-5444994796-zmc4g container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 28 10:00:13 crc kubenswrapper[4838]: [-]has-synced failed: reason withheld Nov 28 10:00:13 crc kubenswrapper[4838]: [+]process-running ok Nov 28 10:00:13 crc kubenswrapper[4838]: healthz check failed Nov 28 10:00:13 crc kubenswrapper[4838]: I1128 10:00:13.285865 4838 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-zmc4g" podUID="e3505c3b-461c-489c-8af1-117b3cbc433b" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 28 10:00:13 crc kubenswrapper[4838]: I1128 10:00:13.292224 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-mcgzz" Nov 28 10:00:13 crc kubenswrapper[4838]: I1128 10:00:13.312584 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-bcnzk"] Nov 28 10:00:13 crc kubenswrapper[4838]: I1128 10:00:13.315836 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-6gkhp"] Nov 28 10:00:13 crc kubenswrapper[4838]: I1128 10:00:13.358320 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-d2lkr"] Nov 28 10:00:13 crc kubenswrapper[4838]: I1128 10:00:13.360733 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-n5fj2"] Nov 28 10:00:13 crc kubenswrapper[4838]: I1128 10:00:13.361632 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-n5fj2" Nov 28 10:00:13 crc kubenswrapper[4838]: I1128 10:00:13.368875 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-vmqxd"] Nov 28 10:00:13 crc kubenswrapper[4838]: I1128 10:00:13.378268 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-n5fj2"] Nov 28 10:00:13 crc kubenswrapper[4838]: I1128 10:00:13.379407 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-sa-dockercfg-d427c" Nov 28 10:00:13 crc kubenswrapper[4838]: I1128 10:00:13.385137 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8b7z5\" (UID: \"6a71181f-49a4-4b69-a3e6-2413929b81dc\") " pod="openshift-image-registry/image-registry-697d97f7c8-8b7z5" Nov 28 10:00:13 crc kubenswrapper[4838]: E1128 10:00:13.387372 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 10:00:13.887341364 +0000 UTC m=+185.586315634 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8b7z5" (UID: "6a71181f-49a4-4b69-a3e6-2413929b81dc") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 10:00:13 crc kubenswrapper[4838]: I1128 10:00:13.388273 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-p69l6" Nov 28 10:00:13 crc kubenswrapper[4838]: I1128 10:00:13.488181 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 10:00:13 crc kubenswrapper[4838]: I1128 10:00:13.488346 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d8b6f770-e994-4bf8-92de-7e359cbe75a8-utilities\") pod \"redhat-operators-n5fj2\" (UID: \"d8b6f770-e994-4bf8-92de-7e359cbe75a8\") " pod="openshift-marketplace/redhat-operators-n5fj2" Nov 28 10:00:13 crc kubenswrapper[4838]: I1128 10:00:13.488381 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d8b6f770-e994-4bf8-92de-7e359cbe75a8-catalog-content\") pod \"redhat-operators-n5fj2\" (UID: \"d8b6f770-e994-4bf8-92de-7e359cbe75a8\") " pod="openshift-marketplace/redhat-operators-n5fj2" Nov 28 10:00:13 crc kubenswrapper[4838]: I1128 10:00:13.488426 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t9dlv\" (UniqueName: \"kubernetes.io/projected/d8b6f770-e994-4bf8-92de-7e359cbe75a8-kube-api-access-t9dlv\") pod \"redhat-operators-n5fj2\" (UID: \"d8b6f770-e994-4bf8-92de-7e359cbe75a8\") " pod="openshift-marketplace/redhat-operators-n5fj2" Nov 28 10:00:13 crc kubenswrapper[4838]: E1128 10:00:13.488561 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 10:00:13.988546654 +0000 UTC m=+185.687520824 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 10:00:13 crc kubenswrapper[4838]: I1128 10:00:13.590057 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d8b6f770-e994-4bf8-92de-7e359cbe75a8-utilities\") pod \"redhat-operators-n5fj2\" (UID: \"d8b6f770-e994-4bf8-92de-7e359cbe75a8\") " pod="openshift-marketplace/redhat-operators-n5fj2" Nov 28 10:00:13 crc kubenswrapper[4838]: I1128 10:00:13.590150 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d8b6f770-e994-4bf8-92de-7e359cbe75a8-catalog-content\") pod \"redhat-operators-n5fj2\" (UID: \"d8b6f770-e994-4bf8-92de-7e359cbe75a8\") " pod="openshift-marketplace/redhat-operators-n5fj2" Nov 28 10:00:13 crc kubenswrapper[4838]: I1128 10:00:13.590236 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t9dlv\" (UniqueName: \"kubernetes.io/projected/d8b6f770-e994-4bf8-92de-7e359cbe75a8-kube-api-access-t9dlv\") pod \"redhat-operators-n5fj2\" (UID: \"d8b6f770-e994-4bf8-92de-7e359cbe75a8\") " pod="openshift-marketplace/redhat-operators-n5fj2" Nov 28 10:00:13 crc kubenswrapper[4838]: I1128 10:00:13.590296 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8b7z5\" (UID: \"6a71181f-49a4-4b69-a3e6-2413929b81dc\") " pod="openshift-image-registry/image-registry-697d97f7c8-8b7z5" Nov 28 10:00:13 crc kubenswrapper[4838]: E1128 10:00:13.590793 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 10:00:14.090770045 +0000 UTC m=+185.789744255 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8b7z5" (UID: "6a71181f-49a4-4b69-a3e6-2413929b81dc") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 10:00:13 crc kubenswrapper[4838]: I1128 10:00:13.691657 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 10:00:13 crc kubenswrapper[4838]: E1128 10:00:13.691879 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 10:00:14.19184489 +0000 UTC m=+185.890819060 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 10:00:13 crc kubenswrapper[4838]: I1128 10:00:13.692088 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8b7z5\" (UID: \"6a71181f-49a4-4b69-a3e6-2413929b81dc\") " pod="openshift-image-registry/image-registry-697d97f7c8-8b7z5" Nov 28 10:00:13 crc kubenswrapper[4838]: E1128 10:00:13.692426 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 10:00:14.192418304 +0000 UTC m=+185.891392474 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8b7z5" (UID: "6a71181f-49a4-4b69-a3e6-2413929b81dc") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 10:00:13 crc kubenswrapper[4838]: I1128 10:00:13.793668 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 10:00:13 crc kubenswrapper[4838]: E1128 10:00:13.793864 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 10:00:14.293833863 +0000 UTC m=+185.992808033 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 10:00:13 crc kubenswrapper[4838]: I1128 10:00:13.794206 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8b7z5\" (UID: \"6a71181f-49a4-4b69-a3e6-2413929b81dc\") " pod="openshift-image-registry/image-registry-697d97f7c8-8b7z5" Nov 28 10:00:13 crc kubenswrapper[4838]: E1128 10:00:13.794556 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 10:00:14.294546392 +0000 UTC m=+185.993520562 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8b7z5" (UID: "6a71181f-49a4-4b69-a3e6-2413929b81dc") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 10:00:13 crc kubenswrapper[4838]: I1128 10:00:13.895682 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 10:00:13 crc kubenswrapper[4838]: E1128 10:00:13.895874 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 10:00:14.395833856 +0000 UTC m=+186.094808026 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 10:00:13 crc kubenswrapper[4838]: I1128 10:00:13.895989 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8b7z5\" (UID: \"6a71181f-49a4-4b69-a3e6-2413929b81dc\") " pod="openshift-image-registry/image-registry-697d97f7c8-8b7z5" Nov 28 10:00:13 crc kubenswrapper[4838]: E1128 10:00:13.896351 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 10:00:14.396340026 +0000 UTC m=+186.095314276 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8b7z5" (UID: "6a71181f-49a4-4b69-a3e6-2413929b81dc") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 10:00:13 crc kubenswrapper[4838]: I1128 10:00:13.927492 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d8b6f770-e994-4bf8-92de-7e359cbe75a8-catalog-content\") pod \"redhat-operators-n5fj2\" (UID: \"d8b6f770-e994-4bf8-92de-7e359cbe75a8\") " pod="openshift-marketplace/redhat-operators-n5fj2" Nov 28 10:00:13 crc kubenswrapper[4838]: I1128 10:00:13.927764 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d8b6f770-e994-4bf8-92de-7e359cbe75a8-utilities\") pod \"redhat-operators-n5fj2\" (UID: \"d8b6f770-e994-4bf8-92de-7e359cbe75a8\") " pod="openshift-marketplace/redhat-operators-n5fj2" Nov 28 10:00:13 crc kubenswrapper[4838]: I1128 10:00:13.951019 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t9dlv\" (UniqueName: \"kubernetes.io/projected/d8b6f770-e994-4bf8-92de-7e359cbe75a8-kube-api-access-t9dlv\") pod \"redhat-operators-n5fj2\" (UID: \"d8b6f770-e994-4bf8-92de-7e359cbe75a8\") " pod="openshift-marketplace/redhat-operators-n5fj2" Nov 28 10:00:13 crc kubenswrapper[4838]: I1128 10:00:13.997262 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 10:00:13 crc kubenswrapper[4838]: E1128 10:00:13.997529 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 10:00:14.497508535 +0000 UTC m=+186.196482705 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 10:00:13 crc kubenswrapper[4838]: I1128 10:00:13.998297 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8b7z5\" (UID: \"6a71181f-49a4-4b69-a3e6-2413929b81dc\") " pod="openshift-image-registry/image-registry-697d97f7c8-8b7z5" Nov 28 10:00:13 crc kubenswrapper[4838]: E1128 10:00:13.998653 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 10:00:14.498645422 +0000 UTC m=+186.197619592 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8b7z5" (UID: "6a71181f-49a4-4b69-a3e6-2413929b81dc") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 10:00:14 crc kubenswrapper[4838]: W1128 10:00:14.005828 4838 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0ee3d402_f8eb_4319_aacc_f5c15fd7dc49.slice/crio-c18f6466dbcd92d29a9ccfeeaee982912a8d889d4580c1bbdd34a5f200ab62f8 WatchSource:0}: Error finding container c18f6466dbcd92d29a9ccfeeaee982912a8d889d4580c1bbdd34a5f200ab62f8: Status 404 returned error can't find the container with id c18f6466dbcd92d29a9ccfeeaee982912a8d889d4580c1bbdd34a5f200ab62f8 Nov 28 10:00:14 crc kubenswrapper[4838]: I1128 10:00:14.008579 4838 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-xsjrl container/marketplace-operator namespace/openshift-marketplace: Liveness probe status=failure output="Get \"http://10.217.0.31:8080/healthz\": dial tcp 10.217.0.31:8080: connect: connection refused" start-of-body= Nov 28 10:00:14 crc kubenswrapper[4838]: I1128 10:00:14.008815 4838 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-marketplace/marketplace-operator-79b997595-xsjrl" podUID="8a281de3-cc12-4dd1-b9be-0ca03a0613ec" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.31:8080/healthz\": dial tcp 10.217.0.31:8080: connect: connection refused" Nov 28 10:00:14 crc kubenswrapper[4838]: I1128 10:00:14.008981 4838 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-xsjrl container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.31:8080/healthz\": dial tcp 10.217.0.31:8080: connect: connection refused" start-of-body= Nov 28 10:00:14 crc kubenswrapper[4838]: I1128 10:00:14.009020 4838 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-xsjrl" podUID="8a281de3-cc12-4dd1-b9be-0ca03a0613ec" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.31:8080/healthz\": dial tcp 10.217.0.31:8080: connect: connection refused" Nov 28 10:00:14 crc kubenswrapper[4838]: I1128 10:00:14.032326 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vmqxd" event={"ID":"bf9fc775-df38-4de1-b17a-d093a477938a","Type":"ContainerStarted","Data":"5a4a520c7f7c3e0bcb780656a962e2d4d97466f3b309ebc7b9dbd9cf91ce80f9"} Nov 28 10:00:14 crc kubenswrapper[4838]: I1128 10:00:14.041987 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zlxs4" event={"ID":"d14771c4-48e3-4efe-a5f2-31331a30979f","Type":"ContainerStarted","Data":"ae1a562ed874232481cd075629141113a8b15940346070ef252389c2165e3dc2"} Nov 28 10:00:14 crc kubenswrapper[4838]: I1128 10:00:14.043985 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bcnzk" event={"ID":"e35b5813-d7b9-4cbc-b002-44d465476046","Type":"ContainerStarted","Data":"c4dd42833b4b184ec1387bdd3e57b17f3cfb7cfffd87fdd66e51ba314b8e583e"} Nov 28 10:00:14 crc kubenswrapper[4838]: I1128 10:00:14.045470 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6gkhp" event={"ID":"0a0eea75-7b92-4cde-bb7d-5ee4cb46ddd0","Type":"ContainerStarted","Data":"4ca4591549ce38fb1e07a464118ff15d26bb366ff930a6274bfecb026e7e2bc5"} Nov 28 10:00:14 crc kubenswrapper[4838]: I1128 10:00:14.059303 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-d2lkr" event={"ID":"0ee3d402-f8eb-4319-aacc-f5c15fd7dc49","Type":"ContainerStarted","Data":"c18f6466dbcd92d29a9ccfeeaee982912a8d889d4580c1bbdd34a5f200ab62f8"} Nov 28 10:00:14 crc kubenswrapper[4838]: I1128 10:00:14.099977 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 10:00:14 crc kubenswrapper[4838]: E1128 10:00:14.100701 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 10:00:14.600687236 +0000 UTC m=+186.299661396 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 10:00:14 crc kubenswrapper[4838]: I1128 10:00:14.174228 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-config-operator/openshift-config-operator-7777fb866f-z695f" Nov 28 10:00:14 crc kubenswrapper[4838]: I1128 10:00:14.202844 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-f9d7485db-lvtzk" Nov 28 10:00:14 crc kubenswrapper[4838]: I1128 10:00:14.204411 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8b7z5\" (UID: \"6a71181f-49a4-4b69-a3e6-2413929b81dc\") " pod="openshift-image-registry/image-registry-697d97f7c8-8b7z5" Nov 28 10:00:14 crc kubenswrapper[4838]: E1128 10:00:14.204790 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 10:00:14.704779293 +0000 UTC m=+186.403753463 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8b7z5" (UID: "6a71181f-49a4-4b69-a3e6-2413929b81dc") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 10:00:14 crc kubenswrapper[4838]: I1128 10:00:14.205263 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-f9d7485db-lvtzk" Nov 28 10:00:14 crc kubenswrapper[4838]: I1128 10:00:14.207696 4838 patch_prober.go:28] interesting pod/console-f9d7485db-lvtzk container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.23:8443/health\": dial tcp 10.217.0.23:8443: connect: connection refused" start-of-body= Nov 28 10:00:14 crc kubenswrapper[4838]: I1128 10:00:14.207766 4838 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-f9d7485db-lvtzk" podUID="9fb065c7-1402-4294-a8f6-f1aa662ecbb0" containerName="console" probeResult="failure" output="Get \"https://10.217.0.23:8443/health\": dial tcp 10.217.0.23:8443: connect: connection refused" Nov 28 10:00:14 crc kubenswrapper[4838]: I1128 10:00:14.229353 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-n5fj2" Nov 28 10:00:14 crc kubenswrapper[4838]: I1128 10:00:14.282508 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ingress/router-default-5444994796-zmc4g" Nov 28 10:00:14 crc kubenswrapper[4838]: I1128 10:00:14.287795 4838 patch_prober.go:28] interesting pod/router-default-5444994796-zmc4g container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 28 10:00:14 crc kubenswrapper[4838]: [-]has-synced failed: reason withheld Nov 28 10:00:14 crc kubenswrapper[4838]: [+]process-running ok Nov 28 10:00:14 crc kubenswrapper[4838]: healthz check failed Nov 28 10:00:14 crc kubenswrapper[4838]: I1128 10:00:14.287850 4838 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-zmc4g" podUID="e3505c3b-461c-489c-8af1-117b3cbc433b" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 28 10:00:14 crc kubenswrapper[4838]: I1128 10:00:14.305705 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 10:00:14 crc kubenswrapper[4838]: E1128 10:00:14.306726 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 10:00:14.806695123 +0000 UTC m=+186.505669293 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 10:00:14 crc kubenswrapper[4838]: I1128 10:00:14.407339 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8b7z5\" (UID: \"6a71181f-49a4-4b69-a3e6-2413929b81dc\") " pod="openshift-image-registry/image-registry-697d97f7c8-8b7z5" Nov 28 10:00:14 crc kubenswrapper[4838]: E1128 10:00:14.408912 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 10:00:14.908900854 +0000 UTC m=+186.607875014 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8b7z5" (UID: "6a71181f-49a4-4b69-a3e6-2413929b81dc") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 10:00:14 crc kubenswrapper[4838]: I1128 10:00:14.509326 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 10:00:14 crc kubenswrapper[4838]: E1128 10:00:14.509789 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 10:00:15.009771971 +0000 UTC m=+186.708746151 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 10:00:14 crc kubenswrapper[4838]: I1128 10:00:14.575015 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-7l5mk" Nov 28 10:00:14 crc kubenswrapper[4838]: I1128 10:00:14.623512 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8b7z5\" (UID: \"6a71181f-49a4-4b69-a3e6-2413929b81dc\") " pod="openshift-image-registry/image-registry-697d97f7c8-8b7z5" Nov 28 10:00:14 crc kubenswrapper[4838]: E1128 10:00:14.623807 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 10:00:15.123795322 +0000 UTC m=+186.822769492 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8b7z5" (UID: "6a71181f-49a4-4b69-a3e6-2413929b81dc") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 10:00:14 crc kubenswrapper[4838]: I1128 10:00:14.644696 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-p69l6"] Nov 28 10:00:14 crc kubenswrapper[4838]: W1128 10:00:14.660566 4838 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2a223cc8_af33_4e83_8bfc_2676c5700447.slice/crio-7b96371626aad69de4b218429b160c47084469413681b16baa698dd4a35c9881 WatchSource:0}: Error finding container 7b96371626aad69de4b218429b160c47084469413681b16baa698dd4a35c9881: Status 404 returned error can't find the container with id 7b96371626aad69de4b218429b160c47084469413681b16baa698dd4a35c9881 Nov 28 10:00:14 crc kubenswrapper[4838]: I1128 10:00:14.687014 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-mcgzz"] Nov 28 10:00:14 crc kubenswrapper[4838]: I1128 10:00:14.728634 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 10:00:14 crc kubenswrapper[4838]: E1128 10:00:14.729971 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 10:00:15.229954944 +0000 UTC m=+186.928929114 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 10:00:14 crc kubenswrapper[4838]: I1128 10:00:14.830219 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8b7z5\" (UID: \"6a71181f-49a4-4b69-a3e6-2413929b81dc\") " pod="openshift-image-registry/image-registry-697d97f7c8-8b7z5" Nov 28 10:00:14 crc kubenswrapper[4838]: E1128 10:00:14.831343 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 10:00:15.331326901 +0000 UTC m=+187.030301071 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8b7z5" (UID: "6a71181f-49a4-4b69-a3e6-2413929b81dc") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 10:00:14 crc kubenswrapper[4838]: I1128 10:00:14.860339 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-n5fj2"] Nov 28 10:00:14 crc kubenswrapper[4838]: I1128 10:00:14.933679 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 10:00:14 crc kubenswrapper[4838]: E1128 10:00:14.933980 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 10:00:15.43396007 +0000 UTC m=+187.132934240 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 10:00:15 crc kubenswrapper[4838]: I1128 10:00:15.035829 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8b7z5\" (UID: \"6a71181f-49a4-4b69-a3e6-2413929b81dc\") " pod="openshift-image-registry/image-registry-697d97f7c8-8b7z5" Nov 28 10:00:15 crc kubenswrapper[4838]: E1128 10:00:15.036220 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 10:00:15.536207632 +0000 UTC m=+187.235181792 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8b7z5" (UID: "6a71181f-49a4-4b69-a3e6-2413929b81dc") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 10:00:15 crc kubenswrapper[4838]: I1128 10:00:15.067178 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-p69l6" event={"ID":"2a223cc8-af33-4e83-8bfc-2676c5700447","Type":"ContainerStarted","Data":"7b96371626aad69de4b218429b160c47084469413681b16baa698dd4a35c9881"} Nov 28 10:00:15 crc kubenswrapper[4838]: I1128 10:00:15.079117 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-zj778" event={"ID":"02ae83a0-06cd-46f5-a5de-3127a57b8d5c","Type":"ContainerStarted","Data":"88b56513d369be5b81346f1206cb5fbfaade40ffef37b199096f004940105b72"} Nov 28 10:00:15 crc kubenswrapper[4838]: I1128 10:00:15.083105 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6gkhp" event={"ID":"0a0eea75-7b92-4cde-bb7d-5ee4cb46ddd0","Type":"ContainerStarted","Data":"dfdb37a7ca77c79d0676f1cad327232e1a104e83ada4ee6a9ebcb5e583376643"} Nov 28 10:00:15 crc kubenswrapper[4838]: I1128 10:00:15.085321 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-bs82t" event={"ID":"d91a5fa7-8c8e-455a-a237-e0ec0baa2197","Type":"ContainerStarted","Data":"525095f8fd0d0f7f182ecafdb76bae56d895a211633c2e26611a885e073d55cf"} Nov 28 10:00:15 crc kubenswrapper[4838]: I1128 10:00:15.087748 4838 generic.go:334] "Generic (PLEG): container finished" podID="d14771c4-48e3-4efe-a5f2-31331a30979f" containerID="0ea25c5002bbff70fed0fbf0c911698d78650d3ef0cc02b83426c721fb2e5b8c" exitCode=0 Nov 28 10:00:15 crc kubenswrapper[4838]: I1128 10:00:15.087804 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zlxs4" event={"ID":"d14771c4-48e3-4efe-a5f2-31331a30979f","Type":"ContainerDied","Data":"0ea25c5002bbff70fed0fbf0c911698d78650d3ef0cc02b83426c721fb2e5b8c"} Nov 28 10:00:15 crc kubenswrapper[4838]: I1128 10:00:15.089608 4838 generic.go:334] "Generic (PLEG): container finished" podID="e1f1e017-546a-4f0b-965e-bd050ad48e44" containerID="d775e52040ce98e7e6734093f7423db819e3bec331e24658ecf5056e3df9bb7f" exitCode=0 Nov 28 10:00:15 crc kubenswrapper[4838]: I1128 10:00:15.089666 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-sdvg7" event={"ID":"e1f1e017-546a-4f0b-965e-bd050ad48e44","Type":"ContainerDied","Data":"d775e52040ce98e7e6734093f7423db819e3bec331e24658ecf5056e3df9bb7f"} Nov 28 10:00:15 crc kubenswrapper[4838]: I1128 10:00:15.089844 4838 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 28 10:00:15 crc kubenswrapper[4838]: I1128 10:00:15.092746 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bcnzk" event={"ID":"e35b5813-d7b9-4cbc-b002-44d465476046","Type":"ContainerStarted","Data":"f0db1cd3861414b9fe53eb897c0ed154e2a22135be0e3be34adbeb76a16a56a1"} Nov 28 10:00:15 crc kubenswrapper[4838]: I1128 10:00:15.094011 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-n5fj2" event={"ID":"d8b6f770-e994-4bf8-92de-7e359cbe75a8","Type":"ContainerStarted","Data":"78417b97ae79fca8e27906fe79e31a84bfc4b0e15126842c884c7514e94c87fc"} Nov 28 10:00:15 crc kubenswrapper[4838]: I1128 10:00:15.095422 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mcgzz" event={"ID":"ac5ded1f-10ca-4db6-b3a6-80f30f28cb34","Type":"ContainerStarted","Data":"4816a76d064a60017fefb4b0c4485525b3504b57e3381c3881b2b23c5308ff60"} Nov 28 10:00:15 crc kubenswrapper[4838]: I1128 10:00:15.097694 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-jvhxj" event={"ID":"69a82003-94b3-4aaa-9904-8485cfa5f662","Type":"ContainerStarted","Data":"9e9e2b86881e949933c85ba66053853b1df0d6a93dfcd030bcf059ebea1bf8ca"} Nov 28 10:00:15 crc kubenswrapper[4838]: I1128 10:00:15.099462 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vmqxd" event={"ID":"bf9fc775-df38-4de1-b17a-d093a477938a","Type":"ContainerStarted","Data":"ebc74bd129c2ec0bc04d0f54fe512529c50b565a4b308f18d62e36aa273af4dc"} Nov 28 10:00:15 crc kubenswrapper[4838]: I1128 10:00:15.100834 4838 generic.go:334] "Generic (PLEG): container finished" podID="89ca4c63-f97b-4a71-8b2e-613170fdde6b" containerID="b99401cb44d9c0221a17a49233459c6b66892d5c9d6f4fa120c97998fd551abe" exitCode=0 Nov 28 10:00:15 crc kubenswrapper[4838]: I1128 10:00:15.100901 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405400-6d4qp" event={"ID":"89ca4c63-f97b-4a71-8b2e-613170fdde6b","Type":"ContainerDied","Data":"b99401cb44d9c0221a17a49233459c6b66892d5c9d6f4fa120c97998fd551abe"} Nov 28 10:00:15 crc kubenswrapper[4838]: I1128 10:00:15.135495 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-jvhxj" podStartSLOduration=146.135471104 podStartE2EDuration="2m26.135471104s" podCreationTimestamp="2025-11-28 09:57:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 10:00:15.122587991 +0000 UTC m=+186.821562161" watchObservedRunningTime="2025-11-28 10:00:15.135471104 +0000 UTC m=+186.834445274" Nov 28 10:00:15 crc kubenswrapper[4838]: I1128 10:00:15.146017 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 10:00:15 crc kubenswrapper[4838]: E1128 10:00:15.146122 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 10:00:15.646100015 +0000 UTC m=+187.345074185 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 10:00:15 crc kubenswrapper[4838]: I1128 10:00:15.146666 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8b7z5\" (UID: \"6a71181f-49a4-4b69-a3e6-2413929b81dc\") " pod="openshift-image-registry/image-registry-697d97f7c8-8b7z5" Nov 28 10:00:15 crc kubenswrapper[4838]: E1128 10:00:15.147987 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 10:00:15.647975762 +0000 UTC m=+187.346950032 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8b7z5" (UID: "6a71181f-49a4-4b69-a3e6-2413929b81dc") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 10:00:15 crc kubenswrapper[4838]: I1128 10:00:15.247878 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 10:00:15 crc kubenswrapper[4838]: E1128 10:00:15.248531 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 10:00:15.748502805 +0000 UTC m=+187.447476995 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 10:00:15 crc kubenswrapper[4838]: I1128 10:00:15.287205 4838 patch_prober.go:28] interesting pod/router-default-5444994796-zmc4g container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 28 10:00:15 crc kubenswrapper[4838]: [-]has-synced failed: reason withheld Nov 28 10:00:15 crc kubenswrapper[4838]: [+]process-running ok Nov 28 10:00:15 crc kubenswrapper[4838]: healthz check failed Nov 28 10:00:15 crc kubenswrapper[4838]: I1128 10:00:15.287294 4838 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-zmc4g" podUID="e3505c3b-461c-489c-8af1-117b3cbc433b" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 28 10:00:15 crc kubenswrapper[4838]: I1128 10:00:15.350596 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8b7z5\" (UID: \"6a71181f-49a4-4b69-a3e6-2413929b81dc\") " pod="openshift-image-registry/image-registry-697d97f7c8-8b7z5" Nov 28 10:00:15 crc kubenswrapper[4838]: E1128 10:00:15.351275 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 10:00:15.851240458 +0000 UTC m=+187.550214668 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8b7z5" (UID: "6a71181f-49a4-4b69-a3e6-2413929b81dc") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 10:00:15 crc kubenswrapper[4838]: I1128 10:00:15.406430 4838 plugin_watcher.go:194] "Adding socket path or updating timestamp to desired state cache" path="/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock" Nov 28 10:00:15 crc kubenswrapper[4838]: I1128 10:00:15.451643 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 10:00:15 crc kubenswrapper[4838]: E1128 10:00:15.452214 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 10:00:15.952170057 +0000 UTC m=+187.651144267 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 10:00:15 crc kubenswrapper[4838]: I1128 10:00:15.553769 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8b7z5\" (UID: \"6a71181f-49a4-4b69-a3e6-2413929b81dc\") " pod="openshift-image-registry/image-registry-697d97f7c8-8b7z5" Nov 28 10:00:15 crc kubenswrapper[4838]: E1128 10:00:15.554277 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 10:00:16.054257753 +0000 UTC m=+187.753231933 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8b7z5" (UID: "6a71181f-49a4-4b69-a3e6-2413929b81dc") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 10:00:15 crc kubenswrapper[4838]: I1128 10:00:15.654382 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 10:00:15 crc kubenswrapper[4838]: E1128 10:00:15.654554 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 10:00:16.154529216 +0000 UTC m=+187.853503396 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 10:00:15 crc kubenswrapper[4838]: I1128 10:00:15.655023 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8b7z5\" (UID: \"6a71181f-49a4-4b69-a3e6-2413929b81dc\") " pod="openshift-image-registry/image-registry-697d97f7c8-8b7z5" Nov 28 10:00:15 crc kubenswrapper[4838]: E1128 10:00:15.655339 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 10:00:16.155326408 +0000 UTC m=+187.854300578 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8b7z5" (UID: "6a71181f-49a4-4b69-a3e6-2413929b81dc") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 10:00:15 crc kubenswrapper[4838]: I1128 10:00:15.756312 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 10:00:15 crc kubenswrapper[4838]: E1128 10:00:15.756481 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 10:00:16.256454355 +0000 UTC m=+187.955428525 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 10:00:15 crc kubenswrapper[4838]: I1128 10:00:15.756809 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8b7z5\" (UID: \"6a71181f-49a4-4b69-a3e6-2413929b81dc\") " pod="openshift-image-registry/image-registry-697d97f7c8-8b7z5" Nov 28 10:00:15 crc kubenswrapper[4838]: E1128 10:00:15.757133 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 10:00:16.257125762 +0000 UTC m=+187.956099932 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8b7z5" (UID: "6a71181f-49a4-4b69-a3e6-2413929b81dc") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 10:00:15 crc kubenswrapper[4838]: I1128 10:00:15.772384 4838 reconciler.go:161] "OperationExecutor.RegisterPlugin started" plugin={"SocketPath":"/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock","Timestamp":"2025-11-28T10:00:15.40646322Z","Handler":null,"Name":""} Nov 28 10:00:15 crc kubenswrapper[4838]: I1128 10:00:15.774928 4838 csi_plugin.go:100] kubernetes.io/csi: Trying to validate a new CSI Driver with name: kubevirt.io.hostpath-provisioner endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock versions: 1.0.0 Nov 28 10:00:15 crc kubenswrapper[4838]: I1128 10:00:15.774983 4838 csi_plugin.go:113] kubernetes.io/csi: Register new plugin with name: kubevirt.io.hostpath-provisioner at endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock Nov 28 10:00:15 crc kubenswrapper[4838]: I1128 10:00:15.857600 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 10:00:15 crc kubenswrapper[4838]: I1128 10:00:15.861081 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Nov 28 10:00:15 crc kubenswrapper[4838]: I1128 10:00:15.958672 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8b7z5\" (UID: \"6a71181f-49a4-4b69-a3e6-2413929b81dc\") " pod="openshift-image-registry/image-registry-697d97f7c8-8b7z5" Nov 28 10:00:16 crc kubenswrapper[4838]: I1128 10:00:16.108333 4838 generic.go:334] "Generic (PLEG): container finished" podID="bf9fc775-df38-4de1-b17a-d093a477938a" containerID="ebc74bd129c2ec0bc04d0f54fe512529c50b565a4b308f18d62e36aa273af4dc" exitCode=0 Nov 28 10:00:16 crc kubenswrapper[4838]: I1128 10:00:16.108417 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vmqxd" event={"ID":"bf9fc775-df38-4de1-b17a-d093a477938a","Type":"ContainerDied","Data":"ebc74bd129c2ec0bc04d0f54fe512529c50b565a4b308f18d62e36aa273af4dc"} Nov 28 10:00:16 crc kubenswrapper[4838]: I1128 10:00:16.110101 4838 generic.go:334] "Generic (PLEG): container finished" podID="0a0eea75-7b92-4cde-bb7d-5ee4cb46ddd0" containerID="dfdb37a7ca77c79d0676f1cad327232e1a104e83ada4ee6a9ebcb5e583376643" exitCode=0 Nov 28 10:00:16 crc kubenswrapper[4838]: I1128 10:00:16.110174 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6gkhp" event={"ID":"0a0eea75-7b92-4cde-bb7d-5ee4cb46ddd0","Type":"ContainerDied","Data":"dfdb37a7ca77c79d0676f1cad327232e1a104e83ada4ee6a9ebcb5e583376643"} Nov 28 10:00:16 crc kubenswrapper[4838]: I1128 10:00:16.111760 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-d2lkr" event={"ID":"0ee3d402-f8eb-4319-aacc-f5c15fd7dc49","Type":"ContainerStarted","Data":"60e3743cadc69a9e8179b5f5c13e5c3d2d0c9906af45a4ee648713a7ba854bbf"} Nov 28 10:00:16 crc kubenswrapper[4838]: I1128 10:00:16.288215 4838 patch_prober.go:28] interesting pod/router-default-5444994796-zmc4g container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 28 10:00:16 crc kubenswrapper[4838]: [-]has-synced failed: reason withheld Nov 28 10:00:16 crc kubenswrapper[4838]: [+]process-running ok Nov 28 10:00:16 crc kubenswrapper[4838]: healthz check failed Nov 28 10:00:16 crc kubenswrapper[4838]: I1128 10:00:16.288579 4838 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-zmc4g" podUID="e3505c3b-461c-489c-8af1-117b3cbc433b" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 28 10:00:16 crc kubenswrapper[4838]: I1128 10:00:16.418597 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405400-6d4qp" Nov 28 10:00:16 crc kubenswrapper[4838]: I1128 10:00:16.470541 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/89ca4c63-f97b-4a71-8b2e-613170fdde6b-secret-volume\") pod \"89ca4c63-f97b-4a71-8b2e-613170fdde6b\" (UID: \"89ca4c63-f97b-4a71-8b2e-613170fdde6b\") " Nov 28 10:00:16 crc kubenswrapper[4838]: I1128 10:00:16.470624 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sf77v\" (UniqueName: \"kubernetes.io/projected/89ca4c63-f97b-4a71-8b2e-613170fdde6b-kube-api-access-sf77v\") pod \"89ca4c63-f97b-4a71-8b2e-613170fdde6b\" (UID: \"89ca4c63-f97b-4a71-8b2e-613170fdde6b\") " Nov 28 10:00:16 crc kubenswrapper[4838]: I1128 10:00:16.470664 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/89ca4c63-f97b-4a71-8b2e-613170fdde6b-config-volume\") pod \"89ca4c63-f97b-4a71-8b2e-613170fdde6b\" (UID: \"89ca4c63-f97b-4a71-8b2e-613170fdde6b\") " Nov 28 10:00:16 crc kubenswrapper[4838]: I1128 10:00:16.472243 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/89ca4c63-f97b-4a71-8b2e-613170fdde6b-config-volume" (OuterVolumeSpecName: "config-volume") pod "89ca4c63-f97b-4a71-8b2e-613170fdde6b" (UID: "89ca4c63-f97b-4a71-8b2e-613170fdde6b"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 10:00:16 crc kubenswrapper[4838]: I1128 10:00:16.478457 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/89ca4c63-f97b-4a71-8b2e-613170fdde6b-kube-api-access-sf77v" (OuterVolumeSpecName: "kube-api-access-sf77v") pod "89ca4c63-f97b-4a71-8b2e-613170fdde6b" (UID: "89ca4c63-f97b-4a71-8b2e-613170fdde6b"). InnerVolumeSpecName "kube-api-access-sf77v". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:00:16 crc kubenswrapper[4838]: I1128 10:00:16.481196 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/89ca4c63-f97b-4a71-8b2e-613170fdde6b-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "89ca4c63-f97b-4a71-8b2e-613170fdde6b" (UID: "89ca4c63-f97b-4a71-8b2e-613170fdde6b"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:00:16 crc kubenswrapper[4838]: I1128 10:00:16.572227 4838 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/89ca4c63-f97b-4a71-8b2e-613170fdde6b-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 28 10:00:16 crc kubenswrapper[4838]: I1128 10:00:16.572277 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sf77v\" (UniqueName: \"kubernetes.io/projected/89ca4c63-f97b-4a71-8b2e-613170fdde6b-kube-api-access-sf77v\") on node \"crc\" DevicePath \"\"" Nov 28 10:00:16 crc kubenswrapper[4838]: I1128 10:00:16.572289 4838 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/89ca4c63-f97b-4a71-8b2e-613170fdde6b-config-volume\") on node \"crc\" DevicePath \"\"" Nov 28 10:00:16 crc kubenswrapper[4838]: I1128 10:00:16.578748 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8f668bae-612b-4b75-9490-919e737c6a3b" path="/var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes" Nov 28 10:00:16 crc kubenswrapper[4838]: I1128 10:00:16.737843 4838 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 28 10:00:16 crc kubenswrapper[4838]: I1128 10:00:16.738183 4838 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8b7z5\" (UID: \"6a71181f-49a4-4b69-a3e6-2413929b81dc\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount\"" pod="openshift-image-registry/image-registry-697d97f7c8-8b7z5" Nov 28 10:00:16 crc kubenswrapper[4838]: I1128 10:00:16.780650 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8b7z5\" (UID: \"6a71181f-49a4-4b69-a3e6-2413929b81dc\") " pod="openshift-image-registry/image-registry-697d97f7c8-8b7z5" Nov 28 10:00:17 crc kubenswrapper[4838]: I1128 10:00:17.058333 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"registry-dockercfg-kzzsd" Nov 28 10:00:17 crc kubenswrapper[4838]: I1128 10:00:17.067047 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-8b7z5" Nov 28 10:00:17 crc kubenswrapper[4838]: I1128 10:00:17.120266 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405400-6d4qp" event={"ID":"89ca4c63-f97b-4a71-8b2e-613170fdde6b","Type":"ContainerDied","Data":"230976085bb66f3c5c6b9f39803c7d36885415f9627fb9d5e8734e8fe4b2a0f1"} Nov 28 10:00:17 crc kubenswrapper[4838]: I1128 10:00:17.120322 4838 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="230976085bb66f3c5c6b9f39803c7d36885415f9627fb9d5e8734e8fe4b2a0f1" Nov 28 10:00:17 crc kubenswrapper[4838]: I1128 10:00:17.120374 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405400-6d4qp" Nov 28 10:00:17 crc kubenswrapper[4838]: I1128 10:00:17.296054 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-jvhxj" Nov 28 10:00:17 crc kubenswrapper[4838]: I1128 10:00:17.296116 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-jvhxj" Nov 28 10:00:17 crc kubenswrapper[4838]: I1128 10:00:17.311967 4838 patch_prober.go:28] interesting pod/router-default-5444994796-zmc4g container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 28 10:00:17 crc kubenswrapper[4838]: [-]has-synced failed: reason withheld Nov 28 10:00:17 crc kubenswrapper[4838]: [+]process-running ok Nov 28 10:00:17 crc kubenswrapper[4838]: healthz check failed Nov 28 10:00:17 crc kubenswrapper[4838]: I1128 10:00:17.312022 4838 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-zmc4g" podUID="e3505c3b-461c-489c-8af1-117b3cbc433b" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 28 10:00:17 crc kubenswrapper[4838]: I1128 10:00:17.317417 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-jvhxj" Nov 28 10:00:17 crc kubenswrapper[4838]: I1128 10:00:17.663160 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-8b7z5"] Nov 28 10:00:17 crc kubenswrapper[4838]: I1128 10:00:17.969933 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Nov 28 10:00:17 crc kubenswrapper[4838]: E1128 10:00:17.970374 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="89ca4c63-f97b-4a71-8b2e-613170fdde6b" containerName="collect-profiles" Nov 28 10:00:17 crc kubenswrapper[4838]: I1128 10:00:17.970404 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="89ca4c63-f97b-4a71-8b2e-613170fdde6b" containerName="collect-profiles" Nov 28 10:00:17 crc kubenswrapper[4838]: I1128 10:00:17.970609 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="89ca4c63-f97b-4a71-8b2e-613170fdde6b" containerName="collect-profiles" Nov 28 10:00:17 crc kubenswrapper[4838]: I1128 10:00:17.971335 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 28 10:00:17 crc kubenswrapper[4838]: I1128 10:00:17.974006 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager"/"kube-root-ca.crt" Nov 28 10:00:17 crc kubenswrapper[4838]: I1128 10:00:17.974565 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager"/"installer-sa-dockercfg-kjl2n" Nov 28 10:00:17 crc kubenswrapper[4838]: I1128 10:00:17.976107 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Nov 28 10:00:18 crc kubenswrapper[4838]: I1128 10:00:18.017229 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/3e324038-70bc-4a0e-a3a5-4176dd530fa0-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"3e324038-70bc-4a0e-a3a5-4176dd530fa0\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 28 10:00:18 crc kubenswrapper[4838]: I1128 10:00:18.017299 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/3e324038-70bc-4a0e-a3a5-4176dd530fa0-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"3e324038-70bc-4a0e-a3a5-4176dd530fa0\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 28 10:00:18 crc kubenswrapper[4838]: I1128 10:00:18.118460 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/3e324038-70bc-4a0e-a3a5-4176dd530fa0-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"3e324038-70bc-4a0e-a3a5-4176dd530fa0\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 28 10:00:18 crc kubenswrapper[4838]: I1128 10:00:18.118502 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/3e324038-70bc-4a0e-a3a5-4176dd530fa0-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"3e324038-70bc-4a0e-a3a5-4176dd530fa0\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 28 10:00:18 crc kubenswrapper[4838]: I1128 10:00:18.118567 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/3e324038-70bc-4a0e-a3a5-4176dd530fa0-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"3e324038-70bc-4a0e-a3a5-4176dd530fa0\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 28 10:00:18 crc kubenswrapper[4838]: I1128 10:00:18.130886 4838 generic.go:334] "Generic (PLEG): container finished" podID="e35b5813-d7b9-4cbc-b002-44d465476046" containerID="f0db1cd3861414b9fe53eb897c0ed154e2a22135be0e3be34adbeb76a16a56a1" exitCode=0 Nov 28 10:00:18 crc kubenswrapper[4838]: I1128 10:00:18.130970 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bcnzk" event={"ID":"e35b5813-d7b9-4cbc-b002-44d465476046","Type":"ContainerDied","Data":"f0db1cd3861414b9fe53eb897c0ed154e2a22135be0e3be34adbeb76a16a56a1"} Nov 28 10:00:18 crc kubenswrapper[4838]: I1128 10:00:18.136887 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-8b7z5" event={"ID":"6a71181f-49a4-4b69-a3e6-2413929b81dc","Type":"ContainerStarted","Data":"27b62c4fa8d7c8c5399493520456896c69662d33b5a028f8ba42508168f4ce8f"} Nov 28 10:00:18 crc kubenswrapper[4838]: I1128 10:00:18.141939 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/3e324038-70bc-4a0e-a3a5-4176dd530fa0-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"3e324038-70bc-4a0e-a3a5-4176dd530fa0\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 28 10:00:18 crc kubenswrapper[4838]: I1128 10:00:18.143924 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-jvhxj" Nov 28 10:00:18 crc kubenswrapper[4838]: I1128 10:00:18.285705 4838 patch_prober.go:28] interesting pod/router-default-5444994796-zmc4g container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 28 10:00:18 crc kubenswrapper[4838]: [-]has-synced failed: reason withheld Nov 28 10:00:18 crc kubenswrapper[4838]: [+]process-running ok Nov 28 10:00:18 crc kubenswrapper[4838]: healthz check failed Nov 28 10:00:18 crc kubenswrapper[4838]: I1128 10:00:18.285774 4838 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-zmc4g" podUID="e3505c3b-461c-489c-8af1-117b3cbc433b" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 28 10:00:18 crc kubenswrapper[4838]: I1128 10:00:18.333393 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 28 10:00:18 crc kubenswrapper[4838]: I1128 10:00:18.834883 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Nov 28 10:00:18 crc kubenswrapper[4838]: W1128 10:00:18.838800 4838 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-pod3e324038_70bc_4a0e_a3a5_4176dd530fa0.slice/crio-dbf6a94a16ce268867074636eb880ea4dbae5f2bd4128996def4b4e13b15ee96 WatchSource:0}: Error finding container dbf6a94a16ce268867074636eb880ea4dbae5f2bd4128996def4b4e13b15ee96: Status 404 returned error can't find the container with id dbf6a94a16ce268867074636eb880ea4dbae5f2bd4128996def4b4e13b15ee96 Nov 28 10:00:19 crc kubenswrapper[4838]: I1128 10:00:19.065101 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-dns/dns-default-9pfms" Nov 28 10:00:19 crc kubenswrapper[4838]: I1128 10:00:19.155940 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-bs82t" event={"ID":"d91a5fa7-8c8e-455a-a237-e0ec0baa2197","Type":"ContainerStarted","Data":"c6ba8b52197dfa5af0505682dfd2e090775b5a2749c942b0b7a253bcdad4413b"} Nov 28 10:00:19 crc kubenswrapper[4838]: I1128 10:00:19.157097 4838 generic.go:334] "Generic (PLEG): container finished" podID="d8b6f770-e994-4bf8-92de-7e359cbe75a8" containerID="fa9315aadc1944769c86803802d324f970426c4b51cf3cb3407b157a61bb94ca" exitCode=0 Nov 28 10:00:19 crc kubenswrapper[4838]: I1128 10:00:19.157209 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-n5fj2" event={"ID":"d8b6f770-e994-4bf8-92de-7e359cbe75a8","Type":"ContainerDied","Data":"fa9315aadc1944769c86803802d324f970426c4b51cf3cb3407b157a61bb94ca"} Nov 28 10:00:19 crc kubenswrapper[4838]: I1128 10:00:19.158462 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-zj778" event={"ID":"02ae83a0-06cd-46f5-a5de-3127a57b8d5c","Type":"ContainerStarted","Data":"f67f8fedc351059634992094e2e1b8a86e31faa7edca82acc48c070d33027d45"} Nov 28 10:00:19 crc kubenswrapper[4838]: I1128 10:00:19.159452 4838 generic.go:334] "Generic (PLEG): container finished" podID="ac5ded1f-10ca-4db6-b3a6-80f30f28cb34" containerID="1b656d0ecbb8cacf14c3701393397027a6e79c82dee8cbab0309b1ed5c34654f" exitCode=0 Nov 28 10:00:19 crc kubenswrapper[4838]: I1128 10:00:19.159568 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mcgzz" event={"ID":"ac5ded1f-10ca-4db6-b3a6-80f30f28cb34","Type":"ContainerDied","Data":"1b656d0ecbb8cacf14c3701393397027a6e79c82dee8cbab0309b1ed5c34654f"} Nov 28 10:00:19 crc kubenswrapper[4838]: I1128 10:00:19.163301 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-8b7z5" event={"ID":"6a71181f-49a4-4b69-a3e6-2413929b81dc","Type":"ContainerStarted","Data":"c8e3c4c42af8dce8330d366d20ec3dbab413107d5610ec798984050696aae641"} Nov 28 10:00:19 crc kubenswrapper[4838]: I1128 10:00:19.176316 4838 generic.go:334] "Generic (PLEG): container finished" podID="0ee3d402-f8eb-4319-aacc-f5c15fd7dc49" containerID="60e3743cadc69a9e8179b5f5c13e5c3d2d0c9906af45a4ee648713a7ba854bbf" exitCode=0 Nov 28 10:00:19 crc kubenswrapper[4838]: I1128 10:00:19.176394 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-d2lkr" event={"ID":"0ee3d402-f8eb-4319-aacc-f5c15fd7dc49","Type":"ContainerDied","Data":"60e3743cadc69a9e8179b5f5c13e5c3d2d0c9906af45a4ee648713a7ba854bbf"} Nov 28 10:00:19 crc kubenswrapper[4838]: I1128 10:00:19.177317 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"3e324038-70bc-4a0e-a3a5-4176dd530fa0","Type":"ContainerStarted","Data":"dbf6a94a16ce268867074636eb880ea4dbae5f2bd4128996def4b4e13b15ee96"} Nov 28 10:00:19 crc kubenswrapper[4838]: I1128 10:00:19.178911 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-p69l6" event={"ID":"2a223cc8-af33-4e83-8bfc-2676c5700447","Type":"ContainerStarted","Data":"ef7c0b17fb51b204a1f4402c4fa95faaf7087f01aeae0ace8e2429ca4c913e1b"} Nov 28 10:00:19 crc kubenswrapper[4838]: I1128 10:00:19.189857 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver/apiserver-76f77b778f-bs82t" podStartSLOduration=150.189841863 podStartE2EDuration="2m30.189841863s" podCreationTimestamp="2025-11-28 09:57:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 10:00:19.189801141 +0000 UTC m=+190.888775311" watchObservedRunningTime="2025-11-28 10:00:19.189841863 +0000 UTC m=+190.888816033" Nov 28 10:00:19 crc kubenswrapper[4838]: I1128 10:00:19.289376 4838 patch_prober.go:28] interesting pod/router-default-5444994796-zmc4g container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 28 10:00:19 crc kubenswrapper[4838]: [-]has-synced failed: reason withheld Nov 28 10:00:19 crc kubenswrapper[4838]: [+]process-running ok Nov 28 10:00:19 crc kubenswrapper[4838]: healthz check failed Nov 28 10:00:19 crc kubenswrapper[4838]: I1128 10:00:19.289428 4838 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-zmc4g" podUID="e3505c3b-461c-489c-8af1-117b3cbc433b" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 28 10:00:20 crc kubenswrapper[4838]: I1128 10:00:20.206765 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"3e324038-70bc-4a0e-a3a5-4176dd530fa0","Type":"ContainerStarted","Data":"0f7d0504d25acaab295b858617e94789a33a25ad806422d046d1100d68a453f4"} Nov 28 10:00:20 crc kubenswrapper[4838]: I1128 10:00:20.217044 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-p69l6" event={"ID":"2a223cc8-af33-4e83-8bfc-2676c5700447","Type":"ContainerStarted","Data":"e99d91ee9f5b2f3e7a23c3bab216a2c84cde4f4cec7413630be9a1cb60e22a3a"} Nov 28 10:00:20 crc kubenswrapper[4838]: I1128 10:00:20.243522 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager/revision-pruner-9-crc" podStartSLOduration=3.243488097 podStartE2EDuration="3.243488097s" podCreationTimestamp="2025-11-28 10:00:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 10:00:20.233081794 +0000 UTC m=+191.932055964" watchObservedRunningTime="2025-11-28 10:00:20.243488097 +0000 UTC m=+191.942462267" Nov 28 10:00:20 crc kubenswrapper[4838]: I1128 10:00:20.251955 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/network-metrics-daemon-p69l6" podStartSLOduration=151.251926979 podStartE2EDuration="2m31.251926979s" podCreationTimestamp="2025-11-28 09:57:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 10:00:20.251658769 +0000 UTC m=+191.950632939" watchObservedRunningTime="2025-11-28 10:00:20.251926979 +0000 UTC m=+191.950901149" Nov 28 10:00:20 crc kubenswrapper[4838]: I1128 10:00:20.275739 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-zj778" event={"ID":"02ae83a0-06cd-46f5-a5de-3127a57b8d5c","Type":"ContainerStarted","Data":"63ecbeb395cb98c3d12aa0519f1f73dd88136da9a28d9abb470c92620eb5b263"} Nov 28 10:00:20 crc kubenswrapper[4838]: I1128 10:00:20.276138 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-697d97f7c8-8b7z5" Nov 28 10:00:20 crc kubenswrapper[4838]: I1128 10:00:20.290477 4838 patch_prober.go:28] interesting pod/router-default-5444994796-zmc4g container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 28 10:00:20 crc kubenswrapper[4838]: [-]has-synced failed: reason withheld Nov 28 10:00:20 crc kubenswrapper[4838]: [+]process-running ok Nov 28 10:00:20 crc kubenswrapper[4838]: healthz check failed Nov 28 10:00:20 crc kubenswrapper[4838]: I1128 10:00:20.290556 4838 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-zmc4g" podUID="e3505c3b-461c-489c-8af1-117b3cbc433b" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 28 10:00:20 crc kubenswrapper[4838]: I1128 10:00:20.304953 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-697d97f7c8-8b7z5" podStartSLOduration=151.304850329 podStartE2EDuration="2m31.304850329s" podCreationTimestamp="2025-11-28 09:57:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 10:00:20.301548085 +0000 UTC m=+192.000522255" watchObservedRunningTime="2025-11-28 10:00:20.304850329 +0000 UTC m=+192.003824499" Nov 28 10:00:20 crc kubenswrapper[4838]: I1128 10:00:20.364387 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="hostpath-provisioner/csi-hostpathplugin-zj778" podStartSLOduration=20.364366826 podStartE2EDuration="20.364366826s" podCreationTimestamp="2025-11-28 10:00:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 10:00:20.360393055 +0000 UTC m=+192.059367225" watchObservedRunningTime="2025-11-28 10:00:20.364366826 +0000 UTC m=+192.063341006" Nov 28 10:00:21 crc kubenswrapper[4838]: I1128 10:00:21.284755 4838 patch_prober.go:28] interesting pod/router-default-5444994796-zmc4g container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 28 10:00:21 crc kubenswrapper[4838]: [-]has-synced failed: reason withheld Nov 28 10:00:21 crc kubenswrapper[4838]: [+]process-running ok Nov 28 10:00:21 crc kubenswrapper[4838]: healthz check failed Nov 28 10:00:21 crc kubenswrapper[4838]: I1128 10:00:21.285020 4838 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-zmc4g" podUID="e3505c3b-461c-489c-8af1-117b3cbc433b" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 28 10:00:21 crc kubenswrapper[4838]: I1128 10:00:21.300578 4838 generic.go:334] "Generic (PLEG): container finished" podID="3e324038-70bc-4a0e-a3a5-4176dd530fa0" containerID="0f7d0504d25acaab295b858617e94789a33a25ad806422d046d1100d68a453f4" exitCode=0 Nov 28 10:00:21 crc kubenswrapper[4838]: I1128 10:00:21.301425 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"3e324038-70bc-4a0e-a3a5-4176dd530fa0","Type":"ContainerDied","Data":"0f7d0504d25acaab295b858617e94789a33a25ad806422d046d1100d68a453f4"} Nov 28 10:00:22 crc kubenswrapper[4838]: I1128 10:00:22.000939 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/downloads-7954f5f757-2w9k9" Nov 28 10:00:22 crc kubenswrapper[4838]: I1128 10:00:22.284631 4838 patch_prober.go:28] interesting pod/router-default-5444994796-zmc4g container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 28 10:00:22 crc kubenswrapper[4838]: [-]has-synced failed: reason withheld Nov 28 10:00:22 crc kubenswrapper[4838]: [+]process-running ok Nov 28 10:00:22 crc kubenswrapper[4838]: healthz check failed Nov 28 10:00:22 crc kubenswrapper[4838]: I1128 10:00:22.284697 4838 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-zmc4g" podUID="e3505c3b-461c-489c-8af1-117b3cbc433b" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 28 10:00:22 crc kubenswrapper[4838]: I1128 10:00:22.427096 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Nov 28 10:00:22 crc kubenswrapper[4838]: I1128 10:00:22.427959 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 28 10:00:22 crc kubenswrapper[4838]: I1128 10:00:22.430589 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Nov 28 10:00:22 crc kubenswrapper[4838]: I1128 10:00:22.433539 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Nov 28 10:00:22 crc kubenswrapper[4838]: I1128 10:00:22.436966 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Nov 28 10:00:22 crc kubenswrapper[4838]: I1128 10:00:22.578931 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/67466f01-02c6-4e47-9ea7-2a012fa280aa-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"67466f01-02c6-4e47-9ea7-2a012fa280aa\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 28 10:00:22 crc kubenswrapper[4838]: I1128 10:00:22.578984 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/67466f01-02c6-4e47-9ea7-2a012fa280aa-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"67466f01-02c6-4e47-9ea7-2a012fa280aa\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 28 10:00:22 crc kubenswrapper[4838]: I1128 10:00:22.635385 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 28 10:00:22 crc kubenswrapper[4838]: I1128 10:00:22.680403 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/67466f01-02c6-4e47-9ea7-2a012fa280aa-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"67466f01-02c6-4e47-9ea7-2a012fa280aa\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 28 10:00:22 crc kubenswrapper[4838]: I1128 10:00:22.680453 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/67466f01-02c6-4e47-9ea7-2a012fa280aa-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"67466f01-02c6-4e47-9ea7-2a012fa280aa\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 28 10:00:22 crc kubenswrapper[4838]: I1128 10:00:22.680529 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/67466f01-02c6-4e47-9ea7-2a012fa280aa-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"67466f01-02c6-4e47-9ea7-2a012fa280aa\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 28 10:00:22 crc kubenswrapper[4838]: I1128 10:00:22.699161 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/67466f01-02c6-4e47-9ea7-2a012fa280aa-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"67466f01-02c6-4e47-9ea7-2a012fa280aa\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 28 10:00:22 crc kubenswrapper[4838]: I1128 10:00:22.745364 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 28 10:00:22 crc kubenswrapper[4838]: I1128 10:00:22.781625 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/3e324038-70bc-4a0e-a3a5-4176dd530fa0-kube-api-access\") pod \"3e324038-70bc-4a0e-a3a5-4176dd530fa0\" (UID: \"3e324038-70bc-4a0e-a3a5-4176dd530fa0\") " Nov 28 10:00:22 crc kubenswrapper[4838]: I1128 10:00:22.781782 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/3e324038-70bc-4a0e-a3a5-4176dd530fa0-kubelet-dir\") pod \"3e324038-70bc-4a0e-a3a5-4176dd530fa0\" (UID: \"3e324038-70bc-4a0e-a3a5-4176dd530fa0\") " Nov 28 10:00:22 crc kubenswrapper[4838]: I1128 10:00:22.781856 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3e324038-70bc-4a0e-a3a5-4176dd530fa0-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "3e324038-70bc-4a0e-a3a5-4176dd530fa0" (UID: "3e324038-70bc-4a0e-a3a5-4176dd530fa0"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 10:00:22 crc kubenswrapper[4838]: I1128 10:00:22.782124 4838 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/3e324038-70bc-4a0e-a3a5-4176dd530fa0-kubelet-dir\") on node \"crc\" DevicePath \"\"" Nov 28 10:00:22 crc kubenswrapper[4838]: I1128 10:00:22.785020 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3e324038-70bc-4a0e-a3a5-4176dd530fa0-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "3e324038-70bc-4a0e-a3a5-4176dd530fa0" (UID: "3e324038-70bc-4a0e-a3a5-4176dd530fa0"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:00:22 crc kubenswrapper[4838]: I1128 10:00:22.883037 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/3e324038-70bc-4a0e-a3a5-4176dd530fa0-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 28 10:00:23 crc kubenswrapper[4838]: I1128 10:00:23.115852 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Nov 28 10:00:23 crc kubenswrapper[4838]: I1128 10:00:23.286833 4838 patch_prober.go:28] interesting pod/router-default-5444994796-zmc4g container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 28 10:00:23 crc kubenswrapper[4838]: [-]has-synced failed: reason withheld Nov 28 10:00:23 crc kubenswrapper[4838]: [+]process-running ok Nov 28 10:00:23 crc kubenswrapper[4838]: healthz check failed Nov 28 10:00:23 crc kubenswrapper[4838]: I1128 10:00:23.286901 4838 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-zmc4g" podUID="e3505c3b-461c-489c-8af1-117b3cbc433b" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 28 10:00:23 crc kubenswrapper[4838]: I1128 10:00:23.331912 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"3e324038-70bc-4a0e-a3a5-4176dd530fa0","Type":"ContainerDied","Data":"dbf6a94a16ce268867074636eb880ea4dbae5f2bd4128996def4b4e13b15ee96"} Nov 28 10:00:23 crc kubenswrapper[4838]: I1128 10:00:23.332221 4838 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="dbf6a94a16ce268867074636eb880ea4dbae5f2bd4128996def4b4e13b15ee96" Nov 28 10:00:23 crc kubenswrapper[4838]: I1128 10:00:23.332387 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 28 10:00:23 crc kubenswrapper[4838]: I1128 10:00:23.334135 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"67466f01-02c6-4e47-9ea7-2a012fa280aa","Type":"ContainerStarted","Data":"b90e6b4f1b4d0514c5e2c1652b08fbcfc5df3f025ce092404ae5204f9178a554"} Nov 28 10:00:23 crc kubenswrapper[4838]: I1128 10:00:23.880937 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-apiserver/apiserver-76f77b778f-bs82t" Nov 28 10:00:23 crc kubenswrapper[4838]: I1128 10:00:23.881256 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-apiserver/apiserver-76f77b778f-bs82t" Nov 28 10:00:23 crc kubenswrapper[4838]: I1128 10:00:23.888848 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-apiserver/apiserver-76f77b778f-bs82t" Nov 28 10:00:23 crc kubenswrapper[4838]: I1128 10:00:23.940565 4838 patch_prober.go:28] interesting pod/machine-config-daemon-5dxdd container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 10:00:23 crc kubenswrapper[4838]: I1128 10:00:23.940644 4838 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 10:00:24 crc kubenswrapper[4838]: I1128 10:00:24.008894 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-gmhsj" Nov 28 10:00:24 crc kubenswrapper[4838]: I1128 10:00:24.013545 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-xsjrl" Nov 28 10:00:24 crc kubenswrapper[4838]: I1128 10:00:24.201278 4838 patch_prober.go:28] interesting pod/console-f9d7485db-lvtzk container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.23:8443/health\": dial tcp 10.217.0.23:8443: connect: connection refused" start-of-body= Nov 28 10:00:24 crc kubenswrapper[4838]: I1128 10:00:24.201350 4838 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-f9d7485db-lvtzk" podUID="9fb065c7-1402-4294-a8f6-f1aa662ecbb0" containerName="console" probeResult="failure" output="Get \"https://10.217.0.23:8443/health\": dial tcp 10.217.0.23:8443: connect: connection refused" Nov 28 10:00:24 crc kubenswrapper[4838]: I1128 10:00:24.285768 4838 patch_prober.go:28] interesting pod/router-default-5444994796-zmc4g container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 28 10:00:24 crc kubenswrapper[4838]: [-]has-synced failed: reason withheld Nov 28 10:00:24 crc kubenswrapper[4838]: [+]process-running ok Nov 28 10:00:24 crc kubenswrapper[4838]: healthz check failed Nov 28 10:00:24 crc kubenswrapper[4838]: I1128 10:00:24.285844 4838 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-zmc4g" podUID="e3505c3b-461c-489c-8af1-117b3cbc433b" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 28 10:00:24 crc kubenswrapper[4838]: I1128 10:00:24.345446 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-apiserver/apiserver-76f77b778f-bs82t" Nov 28 10:00:25 crc kubenswrapper[4838]: I1128 10:00:25.285483 4838 patch_prober.go:28] interesting pod/router-default-5444994796-zmc4g container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 28 10:00:25 crc kubenswrapper[4838]: [-]has-synced failed: reason withheld Nov 28 10:00:25 crc kubenswrapper[4838]: [+]process-running ok Nov 28 10:00:25 crc kubenswrapper[4838]: healthz check failed Nov 28 10:00:25 crc kubenswrapper[4838]: I1128 10:00:25.285536 4838 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-zmc4g" podUID="e3505c3b-461c-489c-8af1-117b3cbc433b" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 28 10:00:26 crc kubenswrapper[4838]: I1128 10:00:26.294105 4838 patch_prober.go:28] interesting pod/router-default-5444994796-zmc4g container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 28 10:00:26 crc kubenswrapper[4838]: [-]has-synced failed: reason withheld Nov 28 10:00:26 crc kubenswrapper[4838]: [+]process-running ok Nov 28 10:00:26 crc kubenswrapper[4838]: healthz check failed Nov 28 10:00:26 crc kubenswrapper[4838]: I1128 10:00:26.294169 4838 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-zmc4g" podUID="e3505c3b-461c-489c-8af1-117b3cbc433b" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 28 10:00:27 crc kubenswrapper[4838]: I1128 10:00:27.285165 4838 patch_prober.go:28] interesting pod/router-default-5444994796-zmc4g container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 28 10:00:27 crc kubenswrapper[4838]: [-]has-synced failed: reason withheld Nov 28 10:00:27 crc kubenswrapper[4838]: [+]process-running ok Nov 28 10:00:27 crc kubenswrapper[4838]: healthz check failed Nov 28 10:00:27 crc kubenswrapper[4838]: I1128 10:00:27.285572 4838 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-zmc4g" podUID="e3505c3b-461c-489c-8af1-117b3cbc433b" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 28 10:00:27 crc kubenswrapper[4838]: I1128 10:00:27.360764 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"67466f01-02c6-4e47-9ea7-2a012fa280aa","Type":"ContainerStarted","Data":"1d77adb4232b650cb621e6e36f9a1ee900bb819605bc5c1fd4f10287d751fda4"} Nov 28 10:00:28 crc kubenswrapper[4838]: I1128 10:00:28.285479 4838 patch_prober.go:28] interesting pod/router-default-5444994796-zmc4g container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 28 10:00:28 crc kubenswrapper[4838]: [-]has-synced failed: reason withheld Nov 28 10:00:28 crc kubenswrapper[4838]: [+]process-running ok Nov 28 10:00:28 crc kubenswrapper[4838]: healthz check failed Nov 28 10:00:28 crc kubenswrapper[4838]: I1128 10:00:28.285549 4838 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-zmc4g" podUID="e3505c3b-461c-489c-8af1-117b3cbc433b" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 28 10:00:29 crc kubenswrapper[4838]: I1128 10:00:29.284477 4838 patch_prober.go:28] interesting pod/router-default-5444994796-zmc4g container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 28 10:00:29 crc kubenswrapper[4838]: [-]has-synced failed: reason withheld Nov 28 10:00:29 crc kubenswrapper[4838]: [+]process-running ok Nov 28 10:00:29 crc kubenswrapper[4838]: healthz check failed Nov 28 10:00:29 crc kubenswrapper[4838]: I1128 10:00:29.284560 4838 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-zmc4g" podUID="e3505c3b-461c-489c-8af1-117b3cbc433b" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 28 10:00:30 crc kubenswrapper[4838]: I1128 10:00:30.285758 4838 patch_prober.go:28] interesting pod/router-default-5444994796-zmc4g container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 28 10:00:30 crc kubenswrapper[4838]: [-]has-synced failed: reason withheld Nov 28 10:00:30 crc kubenswrapper[4838]: [+]process-running ok Nov 28 10:00:30 crc kubenswrapper[4838]: healthz check failed Nov 28 10:00:30 crc kubenswrapper[4838]: I1128 10:00:30.286317 4838 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-zmc4g" podUID="e3505c3b-461c-489c-8af1-117b3cbc433b" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 28 10:00:30 crc kubenswrapper[4838]: I1128 10:00:30.378964 4838 generic.go:334] "Generic (PLEG): container finished" podID="67466f01-02c6-4e47-9ea7-2a012fa280aa" containerID="1d77adb4232b650cb621e6e36f9a1ee900bb819605bc5c1fd4f10287d751fda4" exitCode=0 Nov 28 10:00:30 crc kubenswrapper[4838]: I1128 10:00:30.379006 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"67466f01-02c6-4e47-9ea7-2a012fa280aa","Type":"ContainerDied","Data":"1d77adb4232b650cb621e6e36f9a1ee900bb819605bc5c1fd4f10287d751fda4"} Nov 28 10:00:31 crc kubenswrapper[4838]: I1128 10:00:31.285464 4838 patch_prober.go:28] interesting pod/router-default-5444994796-zmc4g container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 28 10:00:31 crc kubenswrapper[4838]: [-]has-synced failed: reason withheld Nov 28 10:00:31 crc kubenswrapper[4838]: [+]process-running ok Nov 28 10:00:31 crc kubenswrapper[4838]: healthz check failed Nov 28 10:00:31 crc kubenswrapper[4838]: I1128 10:00:31.285611 4838 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-zmc4g" podUID="e3505c3b-461c-489c-8af1-117b3cbc433b" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 28 10:00:32 crc kubenswrapper[4838]: I1128 10:00:32.286271 4838 patch_prober.go:28] interesting pod/router-default-5444994796-zmc4g container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 28 10:00:32 crc kubenswrapper[4838]: [-]has-synced failed: reason withheld Nov 28 10:00:32 crc kubenswrapper[4838]: [+]process-running ok Nov 28 10:00:32 crc kubenswrapper[4838]: healthz check failed Nov 28 10:00:32 crc kubenswrapper[4838]: I1128 10:00:32.286649 4838 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-zmc4g" podUID="e3505c3b-461c-489c-8af1-117b3cbc433b" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 28 10:00:33 crc kubenswrapper[4838]: I1128 10:00:33.286334 4838 patch_prober.go:28] interesting pod/router-default-5444994796-zmc4g container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 28 10:00:33 crc kubenswrapper[4838]: [-]has-synced failed: reason withheld Nov 28 10:00:33 crc kubenswrapper[4838]: [+]process-running ok Nov 28 10:00:33 crc kubenswrapper[4838]: healthz check failed Nov 28 10:00:33 crc kubenswrapper[4838]: I1128 10:00:33.286421 4838 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-zmc4g" podUID="e3505c3b-461c-489c-8af1-117b3cbc433b" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 28 10:00:34 crc kubenswrapper[4838]: I1128 10:00:34.201550 4838 patch_prober.go:28] interesting pod/console-f9d7485db-lvtzk container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.23:8443/health\": dial tcp 10.217.0.23:8443: connect: connection refused" start-of-body= Nov 28 10:00:34 crc kubenswrapper[4838]: I1128 10:00:34.201637 4838 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-f9d7485db-lvtzk" podUID="9fb065c7-1402-4294-a8f6-f1aa662ecbb0" containerName="console" probeResult="failure" output="Get \"https://10.217.0.23:8443/health\": dial tcp 10.217.0.23:8443: connect: connection refused" Nov 28 10:00:34 crc kubenswrapper[4838]: I1128 10:00:34.285623 4838 patch_prober.go:28] interesting pod/router-default-5444994796-zmc4g container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 28 10:00:34 crc kubenswrapper[4838]: [-]has-synced failed: reason withheld Nov 28 10:00:34 crc kubenswrapper[4838]: [+]process-running ok Nov 28 10:00:34 crc kubenswrapper[4838]: healthz check failed Nov 28 10:00:34 crc kubenswrapper[4838]: I1128 10:00:34.285688 4838 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-zmc4g" podUID="e3505c3b-461c-489c-8af1-117b3cbc433b" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 28 10:00:35 crc kubenswrapper[4838]: I1128 10:00:35.286689 4838 patch_prober.go:28] interesting pod/router-default-5444994796-zmc4g container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 28 10:00:35 crc kubenswrapper[4838]: [-]has-synced failed: reason withheld Nov 28 10:00:35 crc kubenswrapper[4838]: [+]process-running ok Nov 28 10:00:35 crc kubenswrapper[4838]: healthz check failed Nov 28 10:00:35 crc kubenswrapper[4838]: I1128 10:00:35.287950 4838 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-zmc4g" podUID="e3505c3b-461c-489c-8af1-117b3cbc433b" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 28 10:00:36 crc kubenswrapper[4838]: I1128 10:00:36.285042 4838 patch_prober.go:28] interesting pod/router-default-5444994796-zmc4g container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 28 10:00:36 crc kubenswrapper[4838]: [-]has-synced failed: reason withheld Nov 28 10:00:36 crc kubenswrapper[4838]: [+]process-running ok Nov 28 10:00:36 crc kubenswrapper[4838]: healthz check failed Nov 28 10:00:36 crc kubenswrapper[4838]: I1128 10:00:36.285120 4838 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-zmc4g" podUID="e3505c3b-461c-489c-8af1-117b3cbc433b" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 28 10:00:37 crc kubenswrapper[4838]: I1128 10:00:37.074296 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-697d97f7c8-8b7z5" Nov 28 10:00:37 crc kubenswrapper[4838]: I1128 10:00:37.288783 4838 patch_prober.go:28] interesting pod/router-default-5444994796-zmc4g container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 28 10:00:37 crc kubenswrapper[4838]: [-]has-synced failed: reason withheld Nov 28 10:00:37 crc kubenswrapper[4838]: [+]process-running ok Nov 28 10:00:37 crc kubenswrapper[4838]: healthz check failed Nov 28 10:00:37 crc kubenswrapper[4838]: I1128 10:00:37.288891 4838 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-zmc4g" podUID="e3505c3b-461c-489c-8af1-117b3cbc433b" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 28 10:00:38 crc kubenswrapper[4838]: I1128 10:00:38.287702 4838 patch_prober.go:28] interesting pod/router-default-5444994796-zmc4g container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 28 10:00:38 crc kubenswrapper[4838]: [-]has-synced failed: reason withheld Nov 28 10:00:38 crc kubenswrapper[4838]: [+]process-running ok Nov 28 10:00:38 crc kubenswrapper[4838]: healthz check failed Nov 28 10:00:38 crc kubenswrapper[4838]: I1128 10:00:38.287843 4838 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-zmc4g" podUID="e3505c3b-461c-489c-8af1-117b3cbc433b" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 28 10:00:39 crc kubenswrapper[4838]: I1128 10:00:39.285331 4838 patch_prober.go:28] interesting pod/router-default-5444994796-zmc4g container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 28 10:00:39 crc kubenswrapper[4838]: [-]has-synced failed: reason withheld Nov 28 10:00:39 crc kubenswrapper[4838]: [+]process-running ok Nov 28 10:00:39 crc kubenswrapper[4838]: healthz check failed Nov 28 10:00:39 crc kubenswrapper[4838]: I1128 10:00:39.285481 4838 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-zmc4g" podUID="e3505c3b-461c-489c-8af1-117b3cbc433b" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 28 10:00:40 crc kubenswrapper[4838]: I1128 10:00:40.288634 4838 patch_prober.go:28] interesting pod/router-default-5444994796-zmc4g container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 28 10:00:40 crc kubenswrapper[4838]: [-]has-synced failed: reason withheld Nov 28 10:00:40 crc kubenswrapper[4838]: [+]process-running ok Nov 28 10:00:40 crc kubenswrapper[4838]: healthz check failed Nov 28 10:00:40 crc kubenswrapper[4838]: I1128 10:00:40.288955 4838 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-zmc4g" podUID="e3505c3b-461c-489c-8af1-117b3cbc433b" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 28 10:00:41 crc kubenswrapper[4838]: I1128 10:00:41.287489 4838 patch_prober.go:28] interesting pod/router-default-5444994796-zmc4g container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 28 10:00:41 crc kubenswrapper[4838]: [-]has-synced failed: reason withheld Nov 28 10:00:41 crc kubenswrapper[4838]: [+]process-running ok Nov 28 10:00:41 crc kubenswrapper[4838]: healthz check failed Nov 28 10:00:41 crc kubenswrapper[4838]: I1128 10:00:41.287584 4838 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-zmc4g" podUID="e3505c3b-461c-489c-8af1-117b3cbc433b" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 28 10:00:41 crc kubenswrapper[4838]: I1128 10:00:41.735248 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 10:00:42 crc kubenswrapper[4838]: I1128 10:00:42.284572 4838 patch_prober.go:28] interesting pod/router-default-5444994796-zmc4g container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 28 10:00:42 crc kubenswrapper[4838]: [-]has-synced failed: reason withheld Nov 28 10:00:42 crc kubenswrapper[4838]: [+]process-running ok Nov 28 10:00:42 crc kubenswrapper[4838]: healthz check failed Nov 28 10:00:42 crc kubenswrapper[4838]: I1128 10:00:42.284659 4838 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-zmc4g" podUID="e3505c3b-461c-489c-8af1-117b3cbc433b" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 28 10:00:43 crc kubenswrapper[4838]: I1128 10:00:43.285758 4838 patch_prober.go:28] interesting pod/router-default-5444994796-zmc4g container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 28 10:00:43 crc kubenswrapper[4838]: [-]has-synced failed: reason withheld Nov 28 10:00:43 crc kubenswrapper[4838]: [+]process-running ok Nov 28 10:00:43 crc kubenswrapper[4838]: healthz check failed Nov 28 10:00:43 crc kubenswrapper[4838]: I1128 10:00:43.285825 4838 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-zmc4g" podUID="e3505c3b-461c-489c-8af1-117b3cbc433b" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 28 10:00:44 crc kubenswrapper[4838]: I1128 10:00:44.098952 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 28 10:00:44 crc kubenswrapper[4838]: I1128 10:00:44.176185 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/67466f01-02c6-4e47-9ea7-2a012fa280aa-kubelet-dir\") pod \"67466f01-02c6-4e47-9ea7-2a012fa280aa\" (UID: \"67466f01-02c6-4e47-9ea7-2a012fa280aa\") " Nov 28 10:00:44 crc kubenswrapper[4838]: I1128 10:00:44.176267 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/67466f01-02c6-4e47-9ea7-2a012fa280aa-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "67466f01-02c6-4e47-9ea7-2a012fa280aa" (UID: "67466f01-02c6-4e47-9ea7-2a012fa280aa"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 10:00:44 crc kubenswrapper[4838]: I1128 10:00:44.176907 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/67466f01-02c6-4e47-9ea7-2a012fa280aa-kube-api-access\") pod \"67466f01-02c6-4e47-9ea7-2a012fa280aa\" (UID: \"67466f01-02c6-4e47-9ea7-2a012fa280aa\") " Nov 28 10:00:44 crc kubenswrapper[4838]: I1128 10:00:44.177227 4838 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/67466f01-02c6-4e47-9ea7-2a012fa280aa-kubelet-dir\") on node \"crc\" DevicePath \"\"" Nov 28 10:00:44 crc kubenswrapper[4838]: I1128 10:00:44.184355 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/67466f01-02c6-4e47-9ea7-2a012fa280aa-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "67466f01-02c6-4e47-9ea7-2a012fa280aa" (UID: "67466f01-02c6-4e47-9ea7-2a012fa280aa"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:00:44 crc kubenswrapper[4838]: I1128 10:00:44.204006 4838 patch_prober.go:28] interesting pod/console-f9d7485db-lvtzk container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.23:8443/health\": dial tcp 10.217.0.23:8443: connect: connection refused" start-of-body= Nov 28 10:00:44 crc kubenswrapper[4838]: I1128 10:00:44.204067 4838 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-f9d7485db-lvtzk" podUID="9fb065c7-1402-4294-a8f6-f1aa662ecbb0" containerName="console" probeResult="failure" output="Get \"https://10.217.0.23:8443/health\": dial tcp 10.217.0.23:8443: connect: connection refused" Nov 28 10:00:44 crc kubenswrapper[4838]: I1128 10:00:44.255042 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-zn7tf" Nov 28 10:00:44 crc kubenswrapper[4838]: I1128 10:00:44.278489 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/67466f01-02c6-4e47-9ea7-2a012fa280aa-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 28 10:00:44 crc kubenswrapper[4838]: I1128 10:00:44.293982 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-ingress/router-default-5444994796-zmc4g" Nov 28 10:00:44 crc kubenswrapper[4838]: I1128 10:00:44.297564 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ingress/router-default-5444994796-zmc4g" Nov 28 10:00:44 crc kubenswrapper[4838]: I1128 10:00:44.468248 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"67466f01-02c6-4e47-9ea7-2a012fa280aa","Type":"ContainerDied","Data":"b90e6b4f1b4d0514c5e2c1652b08fbcfc5df3f025ce092404ae5204f9178a554"} Nov 28 10:00:44 crc kubenswrapper[4838]: I1128 10:00:44.468307 4838 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b90e6b4f1b4d0514c5e2c1652b08fbcfc5df3f025ce092404ae5204f9178a554" Nov 28 10:00:44 crc kubenswrapper[4838]: I1128 10:00:44.468380 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 28 10:00:53 crc kubenswrapper[4838]: I1128 10:00:53.941426 4838 patch_prober.go:28] interesting pod/machine-config-daemon-5dxdd container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 10:00:53 crc kubenswrapper[4838]: I1128 10:00:53.942078 4838 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 10:00:54 crc kubenswrapper[4838]: I1128 10:00:54.202291 4838 patch_prober.go:28] interesting pod/console-f9d7485db-lvtzk container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.23:8443/health\": dial tcp 10.217.0.23:8443: connect: connection refused" start-of-body= Nov 28 10:00:54 crc kubenswrapper[4838]: I1128 10:00:54.202377 4838 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-f9d7485db-lvtzk" podUID="9fb065c7-1402-4294-a8f6-f1aa662ecbb0" containerName="console" probeResult="failure" output="Get \"https://10.217.0.23:8443/health\": dial tcp 10.217.0.23:8443: connect: connection refused" Nov 28 10:00:54 crc kubenswrapper[4838]: I1128 10:00:54.232802 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Nov 28 10:00:54 crc kubenswrapper[4838]: E1128 10:00:54.233094 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3e324038-70bc-4a0e-a3a5-4176dd530fa0" containerName="pruner" Nov 28 10:00:54 crc kubenswrapper[4838]: I1128 10:00:54.233115 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="3e324038-70bc-4a0e-a3a5-4176dd530fa0" containerName="pruner" Nov 28 10:00:54 crc kubenswrapper[4838]: E1128 10:00:54.233136 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="67466f01-02c6-4e47-9ea7-2a012fa280aa" containerName="pruner" Nov 28 10:00:54 crc kubenswrapper[4838]: I1128 10:00:54.233144 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="67466f01-02c6-4e47-9ea7-2a012fa280aa" containerName="pruner" Nov 28 10:00:54 crc kubenswrapper[4838]: I1128 10:00:54.233298 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="3e324038-70bc-4a0e-a3a5-4176dd530fa0" containerName="pruner" Nov 28 10:00:54 crc kubenswrapper[4838]: I1128 10:00:54.233314 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="67466f01-02c6-4e47-9ea7-2a012fa280aa" containerName="pruner" Nov 28 10:00:54 crc kubenswrapper[4838]: I1128 10:00:54.233864 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 28 10:00:54 crc kubenswrapper[4838]: I1128 10:00:54.237652 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Nov 28 10:00:54 crc kubenswrapper[4838]: I1128 10:00:54.237664 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Nov 28 10:00:54 crc kubenswrapper[4838]: I1128 10:00:54.247223 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Nov 28 10:00:54 crc kubenswrapper[4838]: I1128 10:00:54.320894 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/66a94c0f-fb8e-4a92-bb75-5966a3b361c7-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"66a94c0f-fb8e-4a92-bb75-5966a3b361c7\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 28 10:00:54 crc kubenswrapper[4838]: I1128 10:00:54.321033 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/66a94c0f-fb8e-4a92-bb75-5966a3b361c7-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"66a94c0f-fb8e-4a92-bb75-5966a3b361c7\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 28 10:00:54 crc kubenswrapper[4838]: I1128 10:00:54.422111 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/66a94c0f-fb8e-4a92-bb75-5966a3b361c7-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"66a94c0f-fb8e-4a92-bb75-5966a3b361c7\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 28 10:00:54 crc kubenswrapper[4838]: I1128 10:00:54.422194 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/66a94c0f-fb8e-4a92-bb75-5966a3b361c7-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"66a94c0f-fb8e-4a92-bb75-5966a3b361c7\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 28 10:00:54 crc kubenswrapper[4838]: I1128 10:00:54.422273 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/66a94c0f-fb8e-4a92-bb75-5966a3b361c7-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"66a94c0f-fb8e-4a92-bb75-5966a3b361c7\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 28 10:00:54 crc kubenswrapper[4838]: I1128 10:00:54.487238 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/66a94c0f-fb8e-4a92-bb75-5966a3b361c7-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"66a94c0f-fb8e-4a92-bb75-5966a3b361c7\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 28 10:00:54 crc kubenswrapper[4838]: I1128 10:00:54.605260 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 28 10:01:00 crc kubenswrapper[4838]: I1128 10:01:00.032661 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Nov 28 10:01:00 crc kubenswrapper[4838]: I1128 10:01:00.034406 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Nov 28 10:01:00 crc kubenswrapper[4838]: I1128 10:01:00.043961 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Nov 28 10:01:00 crc kubenswrapper[4838]: I1128 10:01:00.226111 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/37cf39e1-d5d9-4b5d-a80b-5f9e371ebf2b-kube-api-access\") pod \"installer-9-crc\" (UID: \"37cf39e1-d5d9-4b5d-a80b-5f9e371ebf2b\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 28 10:01:00 crc kubenswrapper[4838]: I1128 10:01:00.226510 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/37cf39e1-d5d9-4b5d-a80b-5f9e371ebf2b-kubelet-dir\") pod \"installer-9-crc\" (UID: \"37cf39e1-d5d9-4b5d-a80b-5f9e371ebf2b\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 28 10:01:00 crc kubenswrapper[4838]: I1128 10:01:00.226635 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/37cf39e1-d5d9-4b5d-a80b-5f9e371ebf2b-var-lock\") pod \"installer-9-crc\" (UID: \"37cf39e1-d5d9-4b5d-a80b-5f9e371ebf2b\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 28 10:01:00 crc kubenswrapper[4838]: I1128 10:01:00.327753 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/37cf39e1-d5d9-4b5d-a80b-5f9e371ebf2b-kube-api-access\") pod \"installer-9-crc\" (UID: \"37cf39e1-d5d9-4b5d-a80b-5f9e371ebf2b\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 28 10:01:00 crc kubenswrapper[4838]: I1128 10:01:00.328163 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/37cf39e1-d5d9-4b5d-a80b-5f9e371ebf2b-kubelet-dir\") pod \"installer-9-crc\" (UID: \"37cf39e1-d5d9-4b5d-a80b-5f9e371ebf2b\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 28 10:01:00 crc kubenswrapper[4838]: I1128 10:01:00.328186 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/37cf39e1-d5d9-4b5d-a80b-5f9e371ebf2b-var-lock\") pod \"installer-9-crc\" (UID: \"37cf39e1-d5d9-4b5d-a80b-5f9e371ebf2b\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 28 10:01:00 crc kubenswrapper[4838]: I1128 10:01:00.328309 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/37cf39e1-d5d9-4b5d-a80b-5f9e371ebf2b-var-lock\") pod \"installer-9-crc\" (UID: \"37cf39e1-d5d9-4b5d-a80b-5f9e371ebf2b\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 28 10:01:00 crc kubenswrapper[4838]: I1128 10:01:00.328297 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/37cf39e1-d5d9-4b5d-a80b-5f9e371ebf2b-kubelet-dir\") pod \"installer-9-crc\" (UID: \"37cf39e1-d5d9-4b5d-a80b-5f9e371ebf2b\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 28 10:01:00 crc kubenswrapper[4838]: I1128 10:01:00.352988 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/37cf39e1-d5d9-4b5d-a80b-5f9e371ebf2b-kube-api-access\") pod \"installer-9-crc\" (UID: \"37cf39e1-d5d9-4b5d-a80b-5f9e371ebf2b\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 28 10:01:00 crc kubenswrapper[4838]: I1128 10:01:00.373435 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Nov 28 10:01:04 crc kubenswrapper[4838]: I1128 10:01:04.211199 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-f9d7485db-lvtzk" Nov 28 10:01:04 crc kubenswrapper[4838]: I1128 10:01:04.219824 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-f9d7485db-lvtzk" Nov 28 10:01:17 crc kubenswrapper[4838]: E1128 10:01:17.381853 4838 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-marketplace-index:v4.18" Nov 28 10:01:17 crc kubenswrapper[4838]: E1128 10:01:17.383031 4838 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-marketplace-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-rgsp8,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-marketplace-vmqxd_openshift-marketplace(bf9fc775-df38-4de1-b17a-d093a477938a): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 28 10:01:17 crc kubenswrapper[4838]: E1128 10:01:17.386217 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-marketplace-vmqxd" podUID="bf9fc775-df38-4de1-b17a-d093a477938a" Nov 28 10:01:17 crc kubenswrapper[4838]: E1128 10:01:17.628181 4838 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-marketplace-index:v4.18" Nov 28 10:01:17 crc kubenswrapper[4838]: E1128 10:01:17.628592 4838 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-marketplace-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-sbcft,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-marketplace-d2lkr_openshift-marketplace(0ee3d402-f8eb-4319-aacc-f5c15fd7dc49): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 28 10:01:17 crc kubenswrapper[4838]: E1128 10:01:17.629961 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-marketplace-d2lkr" podUID="0ee3d402-f8eb-4319-aacc-f5c15fd7dc49" Nov 28 10:01:18 crc kubenswrapper[4838]: E1128 10:01:18.781302 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-marketplace-d2lkr" podUID="0ee3d402-f8eb-4319-aacc-f5c15fd7dc49" Nov 28 10:01:18 crc kubenswrapper[4838]: E1128 10:01:18.883266 4838 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/community-operator-index:v4.18" Nov 28 10:01:18 crc kubenswrapper[4838]: E1128 10:01:18.883396 4838 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/community-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-cwtjv,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod community-operators-6gkhp_openshift-marketplace(0a0eea75-7b92-4cde-bb7d-5ee4cb46ddd0): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 28 10:01:18 crc kubenswrapper[4838]: E1128 10:01:18.884685 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/community-operators-6gkhp" podUID="0a0eea75-7b92-4cde-bb7d-5ee4cb46ddd0" Nov 28 10:01:21 crc kubenswrapper[4838]: E1128 10:01:21.875626 4838 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-operator-index:v4.18" Nov 28 10:01:21 crc kubenswrapper[4838]: E1128 10:01:21.876247 4838 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-t9dlv,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-operators-n5fj2_openshift-marketplace(d8b6f770-e994-4bf8-92de-7e359cbe75a8): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 28 10:01:21 crc kubenswrapper[4838]: E1128 10:01:21.877330 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-operators-n5fj2" podUID="d8b6f770-e994-4bf8-92de-7e359cbe75a8" Nov 28 10:01:21 crc kubenswrapper[4838]: E1128 10:01:21.891095 4838 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/community-operator-index:v4.18" Nov 28 10:01:21 crc kubenswrapper[4838]: E1128 10:01:21.891215 4838 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/community-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-k6g2v,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod community-operators-bcnzk_openshift-marketplace(e35b5813-d7b9-4cbc-b002-44d465476046): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 28 10:01:21 crc kubenswrapper[4838]: E1128 10:01:21.892335 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/community-operators-bcnzk" podUID="e35b5813-d7b9-4cbc-b002-44d465476046" Nov 28 10:01:23 crc kubenswrapper[4838]: I1128 10:01:23.940605 4838 patch_prober.go:28] interesting pod/machine-config-daemon-5dxdd container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 10:01:23 crc kubenswrapper[4838]: I1128 10:01:23.941162 4838 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 10:01:23 crc kubenswrapper[4838]: I1128 10:01:23.941227 4838 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" Nov 28 10:01:23 crc kubenswrapper[4838]: I1128 10:01:23.942110 4838 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"5374d5a62ca21176826339023247946593cc1a7bdb4fb39596f12bf598790697"} pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 10:01:23 crc kubenswrapper[4838]: I1128 10:01:23.942208 4838 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" containerName="machine-config-daemon" containerID="cri-o://5374d5a62ca21176826339023247946593cc1a7bdb4fb39596f12bf598790697" gracePeriod=600 Nov 28 10:01:24 crc kubenswrapper[4838]: E1128 10:01:24.087899 4838 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/certified-operator-index:v4.18" Nov 28 10:01:24 crc kubenswrapper[4838]: E1128 10:01:24.088349 4838 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-4vls5,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-zlxs4_openshift-marketplace(d14771c4-48e3-4efe-a5f2-31331a30979f): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 28 10:01:24 crc kubenswrapper[4838]: E1128 10:01:24.090372 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/certified-operators-zlxs4" podUID="d14771c4-48e3-4efe-a5f2-31331a30979f" Nov 28 10:01:24 crc kubenswrapper[4838]: E1128 10:01:24.094829 4838 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-operator-index:v4.18" Nov 28 10:01:24 crc kubenswrapper[4838]: E1128 10:01:24.094981 4838 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-7mw6c,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-operators-mcgzz_openshift-marketplace(ac5ded1f-10ca-4db6-b3a6-80f30f28cb34): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 28 10:01:24 crc kubenswrapper[4838]: E1128 10:01:24.096796 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-operators-mcgzz" podUID="ac5ded1f-10ca-4db6-b3a6-80f30f28cb34" Nov 28 10:01:24 crc kubenswrapper[4838]: E1128 10:01:24.132891 4838 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/certified-operator-index:v4.18" Nov 28 10:01:24 crc kubenswrapper[4838]: E1128 10:01:24.133046 4838 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-kxlsh,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-sdvg7_openshift-marketplace(e1f1e017-546a-4f0b-965e-bd050ad48e44): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 28 10:01:24 crc kubenswrapper[4838]: E1128 10:01:24.134236 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/certified-operators-sdvg7" podUID="e1f1e017-546a-4f0b-965e-bd050ad48e44" Nov 28 10:01:24 crc kubenswrapper[4838]: I1128 10:01:24.425229 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Nov 28 10:01:24 crc kubenswrapper[4838]: W1128 10:01:24.431112 4838 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-pod37cf39e1_d5d9_4b5d_a80b_5f9e371ebf2b.slice/crio-e0491cbad3921626d21754a6f7088ef16bc902d4f7c58271742a5054a08cefbb WatchSource:0}: Error finding container e0491cbad3921626d21754a6f7088ef16bc902d4f7c58271742a5054a08cefbb: Status 404 returned error can't find the container with id e0491cbad3921626d21754a6f7088ef16bc902d4f7c58271742a5054a08cefbb Nov 28 10:01:24 crc kubenswrapper[4838]: I1128 10:01:24.481108 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Nov 28 10:01:24 crc kubenswrapper[4838]: I1128 10:01:24.694615 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"37cf39e1-d5d9-4b5d-a80b-5f9e371ebf2b","Type":"ContainerStarted","Data":"fb1c4fc0af81b81e544452f3515780db57ee95ef051ddbca10f323baf3d05318"} Nov 28 10:01:24 crc kubenswrapper[4838]: I1128 10:01:24.694899 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"37cf39e1-d5d9-4b5d-a80b-5f9e371ebf2b","Type":"ContainerStarted","Data":"e0491cbad3921626d21754a6f7088ef16bc902d4f7c58271742a5054a08cefbb"} Nov 28 10:01:24 crc kubenswrapper[4838]: I1128 10:01:24.696322 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"66a94c0f-fb8e-4a92-bb75-5966a3b361c7","Type":"ContainerStarted","Data":"7cbd318321219344289e9f59a81b98a8362868c7e6c1ac93e005c37c656ffbd8"} Nov 28 10:01:24 crc kubenswrapper[4838]: I1128 10:01:24.698602 4838 generic.go:334] "Generic (PLEG): container finished" podID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" containerID="5374d5a62ca21176826339023247946593cc1a7bdb4fb39596f12bf598790697" exitCode=0 Nov 28 10:01:24 crc kubenswrapper[4838]: I1128 10:01:24.698741 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" event={"ID":"5c3daa53-8c4e-4e30-aeba-146602dd45cd","Type":"ContainerDied","Data":"5374d5a62ca21176826339023247946593cc1a7bdb4fb39596f12bf598790697"} Nov 28 10:01:24 crc kubenswrapper[4838]: I1128 10:01:24.698787 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" event={"ID":"5c3daa53-8c4e-4e30-aeba-146602dd45cd","Type":"ContainerStarted","Data":"b32d00e2222988bb06c20222520ec2748f8cbe0ff2a2fb2b4f993227ebd10de6"} Nov 28 10:01:24 crc kubenswrapper[4838]: E1128 10:01:24.700488 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-operators-mcgzz" podUID="ac5ded1f-10ca-4db6-b3a6-80f30f28cb34" Nov 28 10:01:24 crc kubenswrapper[4838]: E1128 10:01:24.700699 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-zlxs4" podUID="d14771c4-48e3-4efe-a5f2-31331a30979f" Nov 28 10:01:24 crc kubenswrapper[4838]: E1128 10:01:24.700770 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-sdvg7" podUID="e1f1e017-546a-4f0b-965e-bd050ad48e44" Nov 28 10:01:24 crc kubenswrapper[4838]: I1128 10:01:24.710402 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/installer-9-crc" podStartSLOduration=24.710388236 podStartE2EDuration="24.710388236s" podCreationTimestamp="2025-11-28 10:01:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 10:01:24.708805743 +0000 UTC m=+256.407779913" watchObservedRunningTime="2025-11-28 10:01:24.710388236 +0000 UTC m=+256.409362406" Nov 28 10:01:25 crc kubenswrapper[4838]: I1128 10:01:25.129309 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-df5f6"] Nov 28 10:01:25 crc kubenswrapper[4838]: I1128 10:01:25.708516 4838 generic.go:334] "Generic (PLEG): container finished" podID="66a94c0f-fb8e-4a92-bb75-5966a3b361c7" containerID="7d7aafd59134e415623b1b2f3bc2ccbff5e5badb9e6ac07fc1a83400b25d79b8" exitCode=0 Nov 28 10:01:25 crc kubenswrapper[4838]: I1128 10:01:25.709108 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"66a94c0f-fb8e-4a92-bb75-5966a3b361c7","Type":"ContainerDied","Data":"7d7aafd59134e415623b1b2f3bc2ccbff5e5badb9e6ac07fc1a83400b25d79b8"} Nov 28 10:01:27 crc kubenswrapper[4838]: I1128 10:01:27.069602 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 28 10:01:27 crc kubenswrapper[4838]: I1128 10:01:27.196753 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/66a94c0f-fb8e-4a92-bb75-5966a3b361c7-kube-api-access\") pod \"66a94c0f-fb8e-4a92-bb75-5966a3b361c7\" (UID: \"66a94c0f-fb8e-4a92-bb75-5966a3b361c7\") " Nov 28 10:01:27 crc kubenswrapper[4838]: I1128 10:01:27.196880 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/66a94c0f-fb8e-4a92-bb75-5966a3b361c7-kubelet-dir\") pod \"66a94c0f-fb8e-4a92-bb75-5966a3b361c7\" (UID: \"66a94c0f-fb8e-4a92-bb75-5966a3b361c7\") " Nov 28 10:01:27 crc kubenswrapper[4838]: I1128 10:01:27.197206 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/66a94c0f-fb8e-4a92-bb75-5966a3b361c7-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "66a94c0f-fb8e-4a92-bb75-5966a3b361c7" (UID: "66a94c0f-fb8e-4a92-bb75-5966a3b361c7"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 10:01:27 crc kubenswrapper[4838]: I1128 10:01:27.203564 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/66a94c0f-fb8e-4a92-bb75-5966a3b361c7-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "66a94c0f-fb8e-4a92-bb75-5966a3b361c7" (UID: "66a94c0f-fb8e-4a92-bb75-5966a3b361c7"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:01:27 crc kubenswrapper[4838]: I1128 10:01:27.298381 4838 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/66a94c0f-fb8e-4a92-bb75-5966a3b361c7-kubelet-dir\") on node \"crc\" DevicePath \"\"" Nov 28 10:01:27 crc kubenswrapper[4838]: I1128 10:01:27.298427 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/66a94c0f-fb8e-4a92-bb75-5966a3b361c7-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 28 10:01:27 crc kubenswrapper[4838]: I1128 10:01:27.721390 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"66a94c0f-fb8e-4a92-bb75-5966a3b361c7","Type":"ContainerDied","Data":"7cbd318321219344289e9f59a81b98a8362868c7e6c1ac93e005c37c656ffbd8"} Nov 28 10:01:27 crc kubenswrapper[4838]: I1128 10:01:27.721763 4838 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7cbd318321219344289e9f59a81b98a8362868c7e6c1ac93e005c37c656ffbd8" Nov 28 10:01:27 crc kubenswrapper[4838]: I1128 10:01:27.721529 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 28 10:01:31 crc kubenswrapper[4838]: I1128 10:01:31.740556 4838 generic.go:334] "Generic (PLEG): container finished" podID="0ee3d402-f8eb-4319-aacc-f5c15fd7dc49" containerID="c29ae14836ef9149b36e9a72b46e41e75cd2687fdf0c1e9aade020df506564b5" exitCode=0 Nov 28 10:01:31 crc kubenswrapper[4838]: I1128 10:01:31.740637 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-d2lkr" event={"ID":"0ee3d402-f8eb-4319-aacc-f5c15fd7dc49","Type":"ContainerDied","Data":"c29ae14836ef9149b36e9a72b46e41e75cd2687fdf0c1e9aade020df506564b5"} Nov 28 10:01:31 crc kubenswrapper[4838]: I1128 10:01:31.744020 4838 generic.go:334] "Generic (PLEG): container finished" podID="bf9fc775-df38-4de1-b17a-d093a477938a" containerID="ac2cf7eb990edffbc429d7b339379188834ad1467899ad9966432ce3b01c9471" exitCode=0 Nov 28 10:01:31 crc kubenswrapper[4838]: I1128 10:01:31.744062 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vmqxd" event={"ID":"bf9fc775-df38-4de1-b17a-d093a477938a","Type":"ContainerDied","Data":"ac2cf7eb990edffbc429d7b339379188834ad1467899ad9966432ce3b01c9471"} Nov 28 10:01:32 crc kubenswrapper[4838]: I1128 10:01:32.751764 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-d2lkr" event={"ID":"0ee3d402-f8eb-4319-aacc-f5c15fd7dc49","Type":"ContainerStarted","Data":"0f83505fadacf0ae5cb7ebe921232bb27c966534070344446f9b5998ba127f60"} Nov 28 10:01:32 crc kubenswrapper[4838]: I1128 10:01:32.755524 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vmqxd" event={"ID":"bf9fc775-df38-4de1-b17a-d093a477938a","Type":"ContainerStarted","Data":"f008e458bc1dca0a2781deea356d38d890938c45045c1466a681fff535226ead"} Nov 28 10:01:32 crc kubenswrapper[4838]: I1128 10:01:32.767910 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-d2lkr" podStartSLOduration=7.482589354 podStartE2EDuration="1m20.767883927s" podCreationTimestamp="2025-11-28 10:00:12 +0000 UTC" firstStartedPulling="2025-11-28 10:00:19.183897132 +0000 UTC m=+190.882871292" lastFinishedPulling="2025-11-28 10:01:32.469191695 +0000 UTC m=+264.168165865" observedRunningTime="2025-11-28 10:01:32.765355924 +0000 UTC m=+264.464330104" watchObservedRunningTime="2025-11-28 10:01:32.767883927 +0000 UTC m=+264.466858097" Nov 28 10:01:32 crc kubenswrapper[4838]: I1128 10:01:32.784445 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-vmqxd" podStartSLOduration=8.788360446 podStartE2EDuration="1m21.784422706s" podCreationTimestamp="2025-11-28 10:00:11 +0000 UTC" firstStartedPulling="2025-11-28 10:00:19.189943067 +0000 UTC m=+190.888917247" lastFinishedPulling="2025-11-28 10:01:32.186005347 +0000 UTC m=+263.884979507" observedRunningTime="2025-11-28 10:01:32.782341434 +0000 UTC m=+264.481315604" watchObservedRunningTime="2025-11-28 10:01:32.784422706 +0000 UTC m=+264.483396876" Nov 28 10:01:36 crc kubenswrapper[4838]: I1128 10:01:36.778197 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6gkhp" event={"ID":"0a0eea75-7b92-4cde-bb7d-5ee4cb46ddd0","Type":"ContainerStarted","Data":"0c8cdc81d32988e542c95a81369f654dcc3d55dce34a8d6da5f27324fd237065"} Nov 28 10:01:37 crc kubenswrapper[4838]: I1128 10:01:37.784612 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bcnzk" event={"ID":"e35b5813-d7b9-4cbc-b002-44d465476046","Type":"ContainerStarted","Data":"c062455fc7b8ac59bdd8a3d3cbe4f31c3dab14cbc0e10712ff9eae9ea5248808"} Nov 28 10:01:37 crc kubenswrapper[4838]: I1128 10:01:37.786865 4838 generic.go:334] "Generic (PLEG): container finished" podID="0a0eea75-7b92-4cde-bb7d-5ee4cb46ddd0" containerID="0c8cdc81d32988e542c95a81369f654dcc3d55dce34a8d6da5f27324fd237065" exitCode=0 Nov 28 10:01:37 crc kubenswrapper[4838]: I1128 10:01:37.786906 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6gkhp" event={"ID":"0a0eea75-7b92-4cde-bb7d-5ee4cb46ddd0","Type":"ContainerDied","Data":"0c8cdc81d32988e542c95a81369f654dcc3d55dce34a8d6da5f27324fd237065"} Nov 28 10:01:38 crc kubenswrapper[4838]: I1128 10:01:38.794071 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-sdvg7" event={"ID":"e1f1e017-546a-4f0b-965e-bd050ad48e44","Type":"ContainerStarted","Data":"9aef0bb53412b66d5aaff4ca64cd815054b8c645bca80ad62eb81ee6052c9736"} Nov 28 10:01:38 crc kubenswrapper[4838]: I1128 10:01:38.796112 4838 generic.go:334] "Generic (PLEG): container finished" podID="e35b5813-d7b9-4cbc-b002-44d465476046" containerID="c062455fc7b8ac59bdd8a3d3cbe4f31c3dab14cbc0e10712ff9eae9ea5248808" exitCode=0 Nov 28 10:01:38 crc kubenswrapper[4838]: I1128 10:01:38.796189 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bcnzk" event={"ID":"e35b5813-d7b9-4cbc-b002-44d465476046","Type":"ContainerDied","Data":"c062455fc7b8ac59bdd8a3d3cbe4f31c3dab14cbc0e10712ff9eae9ea5248808"} Nov 28 10:01:38 crc kubenswrapper[4838]: I1128 10:01:38.802529 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zlxs4" event={"ID":"d14771c4-48e3-4efe-a5f2-31331a30979f","Type":"ContainerStarted","Data":"fec6538109d1a8179f976da0cc7c2718e61b255d0e758466f4e3c69baab2eacb"} Nov 28 10:01:39 crc kubenswrapper[4838]: I1128 10:01:39.812355 4838 generic.go:334] "Generic (PLEG): container finished" podID="d14771c4-48e3-4efe-a5f2-31331a30979f" containerID="fec6538109d1a8179f976da0cc7c2718e61b255d0e758466f4e3c69baab2eacb" exitCode=0 Nov 28 10:01:39 crc kubenswrapper[4838]: I1128 10:01:39.812466 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zlxs4" event={"ID":"d14771c4-48e3-4efe-a5f2-31331a30979f","Type":"ContainerDied","Data":"fec6538109d1a8179f976da0cc7c2718e61b255d0e758466f4e3c69baab2eacb"} Nov 28 10:01:39 crc kubenswrapper[4838]: I1128 10:01:39.815324 4838 generic.go:334] "Generic (PLEG): container finished" podID="e1f1e017-546a-4f0b-965e-bd050ad48e44" containerID="9aef0bb53412b66d5aaff4ca64cd815054b8c645bca80ad62eb81ee6052c9736" exitCode=0 Nov 28 10:01:39 crc kubenswrapper[4838]: I1128 10:01:39.815391 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-sdvg7" event={"ID":"e1f1e017-546a-4f0b-965e-bd050ad48e44","Type":"ContainerDied","Data":"9aef0bb53412b66d5aaff4ca64cd815054b8c645bca80ad62eb81ee6052c9736"} Nov 28 10:01:42 crc kubenswrapper[4838]: I1128 10:01:42.293780 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-vmqxd" Nov 28 10:01:42 crc kubenswrapper[4838]: I1128 10:01:42.294157 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-vmqxd" Nov 28 10:01:42 crc kubenswrapper[4838]: I1128 10:01:42.613322 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-vmqxd" Nov 28 10:01:42 crc kubenswrapper[4838]: I1128 10:01:42.701131 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-d2lkr" Nov 28 10:01:42 crc kubenswrapper[4838]: I1128 10:01:42.701212 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-d2lkr" Nov 28 10:01:42 crc kubenswrapper[4838]: I1128 10:01:42.772923 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-d2lkr" Nov 28 10:01:42 crc kubenswrapper[4838]: I1128 10:01:42.902703 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-vmqxd" Nov 28 10:01:42 crc kubenswrapper[4838]: I1128 10:01:42.903060 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-d2lkr" Nov 28 10:01:46 crc kubenswrapper[4838]: I1128 10:01:45.797680 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-d2lkr"] Nov 28 10:01:46 crc kubenswrapper[4838]: I1128 10:01:45.798448 4838 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-d2lkr" podUID="0ee3d402-f8eb-4319-aacc-f5c15fd7dc49" containerName="registry-server" containerID="cri-o://0f83505fadacf0ae5cb7ebe921232bb27c966534070344446f9b5998ba127f60" gracePeriod=2 Nov 28 10:01:47 crc kubenswrapper[4838]: I1128 10:01:47.875665 4838 generic.go:334] "Generic (PLEG): container finished" podID="0ee3d402-f8eb-4319-aacc-f5c15fd7dc49" containerID="0f83505fadacf0ae5cb7ebe921232bb27c966534070344446f9b5998ba127f60" exitCode=0 Nov 28 10:01:47 crc kubenswrapper[4838]: I1128 10:01:47.875751 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-d2lkr" event={"ID":"0ee3d402-f8eb-4319-aacc-f5c15fd7dc49","Type":"ContainerDied","Data":"0f83505fadacf0ae5cb7ebe921232bb27c966534070344446f9b5998ba127f60"} Nov 28 10:01:49 crc kubenswrapper[4838]: I1128 10:01:49.335517 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-d2lkr" Nov 28 10:01:49 crc kubenswrapper[4838]: I1128 10:01:49.362995 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0ee3d402-f8eb-4319-aacc-f5c15fd7dc49-catalog-content\") pod \"0ee3d402-f8eb-4319-aacc-f5c15fd7dc49\" (UID: \"0ee3d402-f8eb-4319-aacc-f5c15fd7dc49\") " Nov 28 10:01:49 crc kubenswrapper[4838]: I1128 10:01:49.363145 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0ee3d402-f8eb-4319-aacc-f5c15fd7dc49-utilities\") pod \"0ee3d402-f8eb-4319-aacc-f5c15fd7dc49\" (UID: \"0ee3d402-f8eb-4319-aacc-f5c15fd7dc49\") " Nov 28 10:01:49 crc kubenswrapper[4838]: I1128 10:01:49.363350 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sbcft\" (UniqueName: \"kubernetes.io/projected/0ee3d402-f8eb-4319-aacc-f5c15fd7dc49-kube-api-access-sbcft\") pod \"0ee3d402-f8eb-4319-aacc-f5c15fd7dc49\" (UID: \"0ee3d402-f8eb-4319-aacc-f5c15fd7dc49\") " Nov 28 10:01:49 crc kubenswrapper[4838]: I1128 10:01:49.371033 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0ee3d402-f8eb-4319-aacc-f5c15fd7dc49-utilities" (OuterVolumeSpecName: "utilities") pod "0ee3d402-f8eb-4319-aacc-f5c15fd7dc49" (UID: "0ee3d402-f8eb-4319-aacc-f5c15fd7dc49"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 10:01:49 crc kubenswrapper[4838]: I1128 10:01:49.373932 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0ee3d402-f8eb-4319-aacc-f5c15fd7dc49-kube-api-access-sbcft" (OuterVolumeSpecName: "kube-api-access-sbcft") pod "0ee3d402-f8eb-4319-aacc-f5c15fd7dc49" (UID: "0ee3d402-f8eb-4319-aacc-f5c15fd7dc49"). InnerVolumeSpecName "kube-api-access-sbcft". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:01:49 crc kubenswrapper[4838]: I1128 10:01:49.401515 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0ee3d402-f8eb-4319-aacc-f5c15fd7dc49-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "0ee3d402-f8eb-4319-aacc-f5c15fd7dc49" (UID: "0ee3d402-f8eb-4319-aacc-f5c15fd7dc49"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 10:01:49 crc kubenswrapper[4838]: I1128 10:01:49.465254 4838 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0ee3d402-f8eb-4319-aacc-f5c15fd7dc49-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 10:01:49 crc kubenswrapper[4838]: I1128 10:01:49.465314 4838 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0ee3d402-f8eb-4319-aacc-f5c15fd7dc49-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 10:01:49 crc kubenswrapper[4838]: I1128 10:01:49.465325 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sbcft\" (UniqueName: \"kubernetes.io/projected/0ee3d402-f8eb-4319-aacc-f5c15fd7dc49-kube-api-access-sbcft\") on node \"crc\" DevicePath \"\"" Nov 28 10:01:49 crc kubenswrapper[4838]: I1128 10:01:49.896132 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6gkhp" event={"ID":"0a0eea75-7b92-4cde-bb7d-5ee4cb46ddd0","Type":"ContainerStarted","Data":"c7f193b4ec4568793495fffecdd0e3503720c269ac15d45d3716d5b6853b6d50"} Nov 28 10:01:49 crc kubenswrapper[4838]: I1128 10:01:49.898729 4838 generic.go:334] "Generic (PLEG): container finished" podID="ac5ded1f-10ca-4db6-b3a6-80f30f28cb34" containerID="03469c72ac0991ae157861743220c2f5dc230717fd21348bba20c6d870f9c815" exitCode=0 Nov 28 10:01:49 crc kubenswrapper[4838]: I1128 10:01:49.898782 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mcgzz" event={"ID":"ac5ded1f-10ca-4db6-b3a6-80f30f28cb34","Type":"ContainerDied","Data":"03469c72ac0991ae157861743220c2f5dc230717fd21348bba20c6d870f9c815"} Nov 28 10:01:49 crc kubenswrapper[4838]: I1128 10:01:49.902698 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-d2lkr" event={"ID":"0ee3d402-f8eb-4319-aacc-f5c15fd7dc49","Type":"ContainerDied","Data":"c18f6466dbcd92d29a9ccfeeaee982912a8d889d4580c1bbdd34a5f200ab62f8"} Nov 28 10:01:49 crc kubenswrapper[4838]: I1128 10:01:49.902713 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-d2lkr" Nov 28 10:01:49 crc kubenswrapper[4838]: I1128 10:01:49.902767 4838 scope.go:117] "RemoveContainer" containerID="0f83505fadacf0ae5cb7ebe921232bb27c966534070344446f9b5998ba127f60" Nov 28 10:01:49 crc kubenswrapper[4838]: I1128 10:01:49.907677 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zlxs4" event={"ID":"d14771c4-48e3-4efe-a5f2-31331a30979f","Type":"ContainerStarted","Data":"4ed370e04b7437cc2fc8e19ee2fd3cde7591bdb6b2a77b3cfb9192180d951d6c"} Nov 28 10:01:49 crc kubenswrapper[4838]: I1128 10:01:49.915312 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-n5fj2" event={"ID":"d8b6f770-e994-4bf8-92de-7e359cbe75a8","Type":"ContainerStarted","Data":"2a444bedf0b76e2433f75ae44b5203b000e460cdc1fbd076fe1009a86151e3a4"} Nov 28 10:01:49 crc kubenswrapper[4838]: I1128 10:01:49.928568 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-sdvg7" event={"ID":"e1f1e017-546a-4f0b-965e-bd050ad48e44","Type":"ContainerStarted","Data":"9130cd5998cc69dcf460ea6a726b4f0a2dbe0ba992f14901e0bec996820573e9"} Nov 28 10:01:49 crc kubenswrapper[4838]: I1128 10:01:49.939656 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bcnzk" event={"ID":"e35b5813-d7b9-4cbc-b002-44d465476046","Type":"ContainerStarted","Data":"36446f37d5c4cfd29f4db891c95a00b3319ba8e54c2136cec7bffc9c2e3f6555"} Nov 28 10:01:49 crc kubenswrapper[4838]: I1128 10:01:49.942664 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-6gkhp" podStartSLOduration=11.050272853 podStartE2EDuration="1m40.942647255s" podCreationTimestamp="2025-11-28 10:00:09 +0000 UTC" firstStartedPulling="2025-11-28 10:00:19.184084419 +0000 UTC m=+190.883058589" lastFinishedPulling="2025-11-28 10:01:49.076458821 +0000 UTC m=+280.775432991" observedRunningTime="2025-11-28 10:01:49.927912041 +0000 UTC m=+281.626886231" watchObservedRunningTime="2025-11-28 10:01:49.942647255 +0000 UTC m=+281.641621425" Nov 28 10:01:49 crc kubenswrapper[4838]: I1128 10:01:49.944033 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-zlxs4" podStartSLOduration=5.486526352 podStartE2EDuration="1m39.94401307s" podCreationTimestamp="2025-11-28 10:00:10 +0000 UTC" firstStartedPulling="2025-11-28 10:00:15.089501397 +0000 UTC m=+186.788475567" lastFinishedPulling="2025-11-28 10:01:49.546988115 +0000 UTC m=+281.245962285" observedRunningTime="2025-11-28 10:01:49.941559109 +0000 UTC m=+281.640533289" watchObservedRunningTime="2025-11-28 10:01:49.94401307 +0000 UTC m=+281.642987240" Nov 28 10:01:49 crc kubenswrapper[4838]: I1128 10:01:49.951700 4838 scope.go:117] "RemoveContainer" containerID="c29ae14836ef9149b36e9a72b46e41e75cd2687fdf0c1e9aade020df506564b5" Nov 28 10:01:50 crc kubenswrapper[4838]: I1128 10:01:50.003455 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-bcnzk" podStartSLOduration=9.617391601 podStartE2EDuration="1m40.0034378s" podCreationTimestamp="2025-11-28 10:00:10 +0000 UTC" firstStartedPulling="2025-11-28 10:00:19.189749409 +0000 UTC m=+190.888723579" lastFinishedPulling="2025-11-28 10:01:49.575795608 +0000 UTC m=+281.274769778" observedRunningTime="2025-11-28 10:01:49.999337379 +0000 UTC m=+281.698311549" watchObservedRunningTime="2025-11-28 10:01:50.0034378 +0000 UTC m=+281.702411970" Nov 28 10:01:50 crc kubenswrapper[4838]: I1128 10:01:50.013262 4838 scope.go:117] "RemoveContainer" containerID="60e3743cadc69a9e8179b5f5c13e5c3d2d0c9906af45a4ee648713a7ba854bbf" Nov 28 10:01:50 crc kubenswrapper[4838]: I1128 10:01:50.013291 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-d2lkr"] Nov 28 10:01:50 crc kubenswrapper[4838]: I1128 10:01:50.019659 4838 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-d2lkr"] Nov 28 10:01:50 crc kubenswrapper[4838]: I1128 10:01:50.040156 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-sdvg7" podStartSLOduration=10.750522873 podStartE2EDuration="1m41.040141488s" podCreationTimestamp="2025-11-28 10:00:09 +0000 UTC" firstStartedPulling="2025-11-28 10:00:19.189543231 +0000 UTC m=+190.888517401" lastFinishedPulling="2025-11-28 10:01:49.479161846 +0000 UTC m=+281.178136016" observedRunningTime="2025-11-28 10:01:50.039066042 +0000 UTC m=+281.738040212" watchObservedRunningTime="2025-11-28 10:01:50.040141488 +0000 UTC m=+281.739115648" Nov 28 10:01:50 crc kubenswrapper[4838]: I1128 10:01:50.079818 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-sdvg7" Nov 28 10:01:50 crc kubenswrapper[4838]: I1128 10:01:50.080156 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-sdvg7" Nov 28 10:01:50 crc kubenswrapper[4838]: I1128 10:01:50.152457 4838 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-authentication/oauth-openshift-558db77b4-df5f6" podUID="8d6cc687-8b13-44b1-a15b-488c17e8b50c" containerName="oauth-openshift" containerID="cri-o://05f94b1d5efe9901e6ee05d54e5dac334982ec559032fb38f6482fccdd938936" gracePeriod=15 Nov 28 10:01:50 crc kubenswrapper[4838]: I1128 10:01:50.289423 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-6gkhp" Nov 28 10:01:50 crc kubenswrapper[4838]: I1128 10:01:50.289492 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-6gkhp" Nov 28 10:01:50 crc kubenswrapper[4838]: I1128 10:01:50.479393 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-zlxs4" Nov 28 10:01:50 crc kubenswrapper[4838]: I1128 10:01:50.479455 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-zlxs4" Nov 28 10:01:50 crc kubenswrapper[4838]: I1128 10:01:50.543743 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-df5f6" Nov 28 10:01:50 crc kubenswrapper[4838]: I1128 10:01:50.573054 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0ee3d402-f8eb-4319-aacc-f5c15fd7dc49" path="/var/lib/kubelet/pods/0ee3d402-f8eb-4319-aacc-f5c15fd7dc49/volumes" Nov 28 10:01:50 crc kubenswrapper[4838]: I1128 10:01:50.582558 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/8d6cc687-8b13-44b1-a15b-488c17e8b50c-v4-0-config-user-template-error\") pod \"8d6cc687-8b13-44b1-a15b-488c17e8b50c\" (UID: \"8d6cc687-8b13-44b1-a15b-488c17e8b50c\") " Nov 28 10:01:50 crc kubenswrapper[4838]: I1128 10:01:50.582599 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/8d6cc687-8b13-44b1-a15b-488c17e8b50c-v4-0-config-system-session\") pod \"8d6cc687-8b13-44b1-a15b-488c17e8b50c\" (UID: \"8d6cc687-8b13-44b1-a15b-488c17e8b50c\") " Nov 28 10:01:50 crc kubenswrapper[4838]: I1128 10:01:50.582621 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/8d6cc687-8b13-44b1-a15b-488c17e8b50c-audit-dir\") pod \"8d6cc687-8b13-44b1-a15b-488c17e8b50c\" (UID: \"8d6cc687-8b13-44b1-a15b-488c17e8b50c\") " Nov 28 10:01:50 crc kubenswrapper[4838]: I1128 10:01:50.582645 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/8d6cc687-8b13-44b1-a15b-488c17e8b50c-v4-0-config-system-trusted-ca-bundle\") pod \"8d6cc687-8b13-44b1-a15b-488c17e8b50c\" (UID: \"8d6cc687-8b13-44b1-a15b-488c17e8b50c\") " Nov 28 10:01:50 crc kubenswrapper[4838]: I1128 10:01:50.582688 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/8d6cc687-8b13-44b1-a15b-488c17e8b50c-v4-0-config-system-ocp-branding-template\") pod \"8d6cc687-8b13-44b1-a15b-488c17e8b50c\" (UID: \"8d6cc687-8b13-44b1-a15b-488c17e8b50c\") " Nov 28 10:01:50 crc kubenswrapper[4838]: I1128 10:01:50.582748 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/8d6cc687-8b13-44b1-a15b-488c17e8b50c-audit-dir" (OuterVolumeSpecName: "audit-dir") pod "8d6cc687-8b13-44b1-a15b-488c17e8b50c" (UID: "8d6cc687-8b13-44b1-a15b-488c17e8b50c"). InnerVolumeSpecName "audit-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 10:01:50 crc kubenswrapper[4838]: I1128 10:01:50.583596 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8d6cc687-8b13-44b1-a15b-488c17e8b50c-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "8d6cc687-8b13-44b1-a15b-488c17e8b50c" (UID: "8d6cc687-8b13-44b1-a15b-488c17e8b50c"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 10:01:50 crc kubenswrapper[4838]: I1128 10:01:50.582740 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/8d6cc687-8b13-44b1-a15b-488c17e8b50c-v4-0-config-system-router-certs\") pod \"8d6cc687-8b13-44b1-a15b-488c17e8b50c\" (UID: \"8d6cc687-8b13-44b1-a15b-488c17e8b50c\") " Nov 28 10:01:50 crc kubenswrapper[4838]: I1128 10:01:50.583690 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/8d6cc687-8b13-44b1-a15b-488c17e8b50c-v4-0-config-user-template-provider-selection\") pod \"8d6cc687-8b13-44b1-a15b-488c17e8b50c\" (UID: \"8d6cc687-8b13-44b1-a15b-488c17e8b50c\") " Nov 28 10:01:50 crc kubenswrapper[4838]: I1128 10:01:50.583758 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/8d6cc687-8b13-44b1-a15b-488c17e8b50c-v4-0-config-system-cliconfig\") pod \"8d6cc687-8b13-44b1-a15b-488c17e8b50c\" (UID: \"8d6cc687-8b13-44b1-a15b-488c17e8b50c\") " Nov 28 10:01:50 crc kubenswrapper[4838]: I1128 10:01:50.583794 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/8d6cc687-8b13-44b1-a15b-488c17e8b50c-v4-0-config-user-idp-0-file-data\") pod \"8d6cc687-8b13-44b1-a15b-488c17e8b50c\" (UID: \"8d6cc687-8b13-44b1-a15b-488c17e8b50c\") " Nov 28 10:01:50 crc kubenswrapper[4838]: I1128 10:01:50.583819 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/8d6cc687-8b13-44b1-a15b-488c17e8b50c-v4-0-config-system-service-ca\") pod \"8d6cc687-8b13-44b1-a15b-488c17e8b50c\" (UID: \"8d6cc687-8b13-44b1-a15b-488c17e8b50c\") " Nov 28 10:01:50 crc kubenswrapper[4838]: I1128 10:01:50.583842 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/8d6cc687-8b13-44b1-a15b-488c17e8b50c-audit-policies\") pod \"8d6cc687-8b13-44b1-a15b-488c17e8b50c\" (UID: \"8d6cc687-8b13-44b1-a15b-488c17e8b50c\") " Nov 28 10:01:50 crc kubenswrapper[4838]: I1128 10:01:50.583873 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/8d6cc687-8b13-44b1-a15b-488c17e8b50c-v4-0-config-system-serving-cert\") pod \"8d6cc687-8b13-44b1-a15b-488c17e8b50c\" (UID: \"8d6cc687-8b13-44b1-a15b-488c17e8b50c\") " Nov 28 10:01:50 crc kubenswrapper[4838]: I1128 10:01:50.583898 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/8d6cc687-8b13-44b1-a15b-488c17e8b50c-v4-0-config-user-template-login\") pod \"8d6cc687-8b13-44b1-a15b-488c17e8b50c\" (UID: \"8d6cc687-8b13-44b1-a15b-488c17e8b50c\") " Nov 28 10:01:50 crc kubenswrapper[4838]: I1128 10:01:50.583930 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9llj4\" (UniqueName: \"kubernetes.io/projected/8d6cc687-8b13-44b1-a15b-488c17e8b50c-kube-api-access-9llj4\") pod \"8d6cc687-8b13-44b1-a15b-488c17e8b50c\" (UID: \"8d6cc687-8b13-44b1-a15b-488c17e8b50c\") " Nov 28 10:01:50 crc kubenswrapper[4838]: I1128 10:01:50.584262 4838 reconciler_common.go:293] "Volume detached for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/8d6cc687-8b13-44b1-a15b-488c17e8b50c-audit-dir\") on node \"crc\" DevicePath \"\"" Nov 28 10:01:50 crc kubenswrapper[4838]: I1128 10:01:50.584287 4838 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/8d6cc687-8b13-44b1-a15b-488c17e8b50c-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 10:01:50 crc kubenswrapper[4838]: I1128 10:01:50.584283 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8d6cc687-8b13-44b1-a15b-488c17e8b50c-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "8d6cc687-8b13-44b1-a15b-488c17e8b50c" (UID: "8d6cc687-8b13-44b1-a15b-488c17e8b50c"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 10:01:50 crc kubenswrapper[4838]: I1128 10:01:50.584795 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8d6cc687-8b13-44b1-a15b-488c17e8b50c-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "8d6cc687-8b13-44b1-a15b-488c17e8b50c" (UID: "8d6cc687-8b13-44b1-a15b-488c17e8b50c"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 10:01:50 crc kubenswrapper[4838]: I1128 10:01:50.585260 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8d6cc687-8b13-44b1-a15b-488c17e8b50c-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "8d6cc687-8b13-44b1-a15b-488c17e8b50c" (UID: "8d6cc687-8b13-44b1-a15b-488c17e8b50c"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 10:01:50 crc kubenswrapper[4838]: I1128 10:01:50.587730 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8d6cc687-8b13-44b1-a15b-488c17e8b50c-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "8d6cc687-8b13-44b1-a15b-488c17e8b50c" (UID: "8d6cc687-8b13-44b1-a15b-488c17e8b50c"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:01:50 crc kubenswrapper[4838]: I1128 10:01:50.588036 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8d6cc687-8b13-44b1-a15b-488c17e8b50c-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "8d6cc687-8b13-44b1-a15b-488c17e8b50c" (UID: "8d6cc687-8b13-44b1-a15b-488c17e8b50c"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:01:50 crc kubenswrapper[4838]: I1128 10:01:50.588162 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8d6cc687-8b13-44b1-a15b-488c17e8b50c-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "8d6cc687-8b13-44b1-a15b-488c17e8b50c" (UID: "8d6cc687-8b13-44b1-a15b-488c17e8b50c"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:01:50 crc kubenswrapper[4838]: I1128 10:01:50.589733 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8d6cc687-8b13-44b1-a15b-488c17e8b50c-kube-api-access-9llj4" (OuterVolumeSpecName: "kube-api-access-9llj4") pod "8d6cc687-8b13-44b1-a15b-488c17e8b50c" (UID: "8d6cc687-8b13-44b1-a15b-488c17e8b50c"). InnerVolumeSpecName "kube-api-access-9llj4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:01:50 crc kubenswrapper[4838]: I1128 10:01:50.597035 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8d6cc687-8b13-44b1-a15b-488c17e8b50c-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "8d6cc687-8b13-44b1-a15b-488c17e8b50c" (UID: "8d6cc687-8b13-44b1-a15b-488c17e8b50c"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:01:50 crc kubenswrapper[4838]: I1128 10:01:50.597342 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8d6cc687-8b13-44b1-a15b-488c17e8b50c-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "8d6cc687-8b13-44b1-a15b-488c17e8b50c" (UID: "8d6cc687-8b13-44b1-a15b-488c17e8b50c"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:01:50 crc kubenswrapper[4838]: I1128 10:01:50.599617 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8d6cc687-8b13-44b1-a15b-488c17e8b50c-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "8d6cc687-8b13-44b1-a15b-488c17e8b50c" (UID: "8d6cc687-8b13-44b1-a15b-488c17e8b50c"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:01:50 crc kubenswrapper[4838]: I1128 10:01:50.599885 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8d6cc687-8b13-44b1-a15b-488c17e8b50c-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "8d6cc687-8b13-44b1-a15b-488c17e8b50c" (UID: "8d6cc687-8b13-44b1-a15b-488c17e8b50c"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:01:50 crc kubenswrapper[4838]: I1128 10:01:50.600067 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8d6cc687-8b13-44b1-a15b-488c17e8b50c-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "8d6cc687-8b13-44b1-a15b-488c17e8b50c" (UID: "8d6cc687-8b13-44b1-a15b-488c17e8b50c"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:01:50 crc kubenswrapper[4838]: I1128 10:01:50.685949 4838 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/8d6cc687-8b13-44b1-a15b-488c17e8b50c-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Nov 28 10:01:50 crc kubenswrapper[4838]: I1128 10:01:50.685978 4838 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/8d6cc687-8b13-44b1-a15b-488c17e8b50c-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Nov 28 10:01:50 crc kubenswrapper[4838]: I1128 10:01:50.685988 4838 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/8d6cc687-8b13-44b1-a15b-488c17e8b50c-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Nov 28 10:01:50 crc kubenswrapper[4838]: I1128 10:01:50.685998 4838 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/8d6cc687-8b13-44b1-a15b-488c17e8b50c-audit-policies\") on node \"crc\" DevicePath \"\"" Nov 28 10:01:50 crc kubenswrapper[4838]: I1128 10:01:50.686008 4838 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/8d6cc687-8b13-44b1-a15b-488c17e8b50c-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 10:01:50 crc kubenswrapper[4838]: I1128 10:01:50.686018 4838 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/8d6cc687-8b13-44b1-a15b-488c17e8b50c-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Nov 28 10:01:50 crc kubenswrapper[4838]: I1128 10:01:50.686029 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9llj4\" (UniqueName: \"kubernetes.io/projected/8d6cc687-8b13-44b1-a15b-488c17e8b50c-kube-api-access-9llj4\") on node \"crc\" DevicePath \"\"" Nov 28 10:01:50 crc kubenswrapper[4838]: I1128 10:01:50.686038 4838 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/8d6cc687-8b13-44b1-a15b-488c17e8b50c-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Nov 28 10:01:50 crc kubenswrapper[4838]: I1128 10:01:50.686046 4838 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/8d6cc687-8b13-44b1-a15b-488c17e8b50c-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Nov 28 10:01:50 crc kubenswrapper[4838]: I1128 10:01:50.686055 4838 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/8d6cc687-8b13-44b1-a15b-488c17e8b50c-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Nov 28 10:01:50 crc kubenswrapper[4838]: I1128 10:01:50.686064 4838 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/8d6cc687-8b13-44b1-a15b-488c17e8b50c-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Nov 28 10:01:50 crc kubenswrapper[4838]: I1128 10:01:50.686077 4838 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/8d6cc687-8b13-44b1-a15b-488c17e8b50c-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Nov 28 10:01:50 crc kubenswrapper[4838]: I1128 10:01:50.714873 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-bcnzk" Nov 28 10:01:50 crc kubenswrapper[4838]: I1128 10:01:50.714971 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-bcnzk" Nov 28 10:01:50 crc kubenswrapper[4838]: I1128 10:01:50.967950 4838 generic.go:334] "Generic (PLEG): container finished" podID="8d6cc687-8b13-44b1-a15b-488c17e8b50c" containerID="05f94b1d5efe9901e6ee05d54e5dac334982ec559032fb38f6482fccdd938936" exitCode=0 Nov 28 10:01:50 crc kubenswrapper[4838]: I1128 10:01:50.968033 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-df5f6" event={"ID":"8d6cc687-8b13-44b1-a15b-488c17e8b50c","Type":"ContainerDied","Data":"05f94b1d5efe9901e6ee05d54e5dac334982ec559032fb38f6482fccdd938936"} Nov 28 10:01:50 crc kubenswrapper[4838]: I1128 10:01:50.968368 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-df5f6" event={"ID":"8d6cc687-8b13-44b1-a15b-488c17e8b50c","Type":"ContainerDied","Data":"5f3ece3652b669403065c2b4a84281cd2e781b0954be5e4d96273be56c182770"} Nov 28 10:01:50 crc kubenswrapper[4838]: I1128 10:01:50.968391 4838 scope.go:117] "RemoveContainer" containerID="05f94b1d5efe9901e6ee05d54e5dac334982ec559032fb38f6482fccdd938936" Nov 28 10:01:50 crc kubenswrapper[4838]: I1128 10:01:50.968043 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-df5f6" Nov 28 10:01:50 crc kubenswrapper[4838]: I1128 10:01:50.971314 4838 generic.go:334] "Generic (PLEG): container finished" podID="d8b6f770-e994-4bf8-92de-7e359cbe75a8" containerID="2a444bedf0b76e2433f75ae44b5203b000e460cdc1fbd076fe1009a86151e3a4" exitCode=0 Nov 28 10:01:50 crc kubenswrapper[4838]: I1128 10:01:50.971370 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-n5fj2" event={"ID":"d8b6f770-e994-4bf8-92de-7e359cbe75a8","Type":"ContainerDied","Data":"2a444bedf0b76e2433f75ae44b5203b000e460cdc1fbd076fe1009a86151e3a4"} Nov 28 10:01:50 crc kubenswrapper[4838]: I1128 10:01:50.980805 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mcgzz" event={"ID":"ac5ded1f-10ca-4db6-b3a6-80f30f28cb34","Type":"ContainerStarted","Data":"0c552617179b3cea76f22cfdaf8d852af57ced4aabea154b74088bf2fe6615b0"} Nov 28 10:01:50 crc kubenswrapper[4838]: I1128 10:01:50.986278 4838 scope.go:117] "RemoveContainer" containerID="05f94b1d5efe9901e6ee05d54e5dac334982ec559032fb38f6482fccdd938936" Nov 28 10:01:50 crc kubenswrapper[4838]: E1128 10:01:50.988526 4838 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"05f94b1d5efe9901e6ee05d54e5dac334982ec559032fb38f6482fccdd938936\": container with ID starting with 05f94b1d5efe9901e6ee05d54e5dac334982ec559032fb38f6482fccdd938936 not found: ID does not exist" containerID="05f94b1d5efe9901e6ee05d54e5dac334982ec559032fb38f6482fccdd938936" Nov 28 10:01:50 crc kubenswrapper[4838]: I1128 10:01:50.988560 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"05f94b1d5efe9901e6ee05d54e5dac334982ec559032fb38f6482fccdd938936"} err="failed to get container status \"05f94b1d5efe9901e6ee05d54e5dac334982ec559032fb38f6482fccdd938936\": rpc error: code = NotFound desc = could not find container \"05f94b1d5efe9901e6ee05d54e5dac334982ec559032fb38f6482fccdd938936\": container with ID starting with 05f94b1d5efe9901e6ee05d54e5dac334982ec559032fb38f6482fccdd938936 not found: ID does not exist" Nov 28 10:01:51 crc kubenswrapper[4838]: I1128 10:01:51.014439 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-mcgzz" podStartSLOduration=7.467536449 podStartE2EDuration="1m39.014415408s" podCreationTimestamp="2025-11-28 10:00:12 +0000 UTC" firstStartedPulling="2025-11-28 10:00:19.161074635 +0000 UTC m=+190.860048805" lastFinishedPulling="2025-11-28 10:01:50.707953594 +0000 UTC m=+282.406927764" observedRunningTime="2025-11-28 10:01:51.013349282 +0000 UTC m=+282.712323452" watchObservedRunningTime="2025-11-28 10:01:51.014415408 +0000 UTC m=+282.713389578" Nov 28 10:01:51 crc kubenswrapper[4838]: I1128 10:01:51.031453 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-df5f6"] Nov 28 10:01:51 crc kubenswrapper[4838]: I1128 10:01:51.038147 4838 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-df5f6"] Nov 28 10:01:51 crc kubenswrapper[4838]: I1128 10:01:51.118986 4838 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/certified-operators-sdvg7" podUID="e1f1e017-546a-4f0b-965e-bd050ad48e44" containerName="registry-server" probeResult="failure" output=< Nov 28 10:01:51 crc kubenswrapper[4838]: timeout: failed to connect service ":50051" within 1s Nov 28 10:01:51 crc kubenswrapper[4838]: > Nov 28 10:01:51 crc kubenswrapper[4838]: I1128 10:01:51.330944 4838 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-6gkhp" podUID="0a0eea75-7b92-4cde-bb7d-5ee4cb46ddd0" containerName="registry-server" probeResult="failure" output=< Nov 28 10:01:51 crc kubenswrapper[4838]: timeout: failed to connect service ":50051" within 1s Nov 28 10:01:51 crc kubenswrapper[4838]: > Nov 28 10:01:51 crc kubenswrapper[4838]: I1128 10:01:51.523407 4838 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/certified-operators-zlxs4" podUID="d14771c4-48e3-4efe-a5f2-31331a30979f" containerName="registry-server" probeResult="failure" output=< Nov 28 10:01:51 crc kubenswrapper[4838]: timeout: failed to connect service ":50051" within 1s Nov 28 10:01:51 crc kubenswrapper[4838]: > Nov 28 10:01:51 crc kubenswrapper[4838]: I1128 10:01:51.753037 4838 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-bcnzk" podUID="e35b5813-d7b9-4cbc-b002-44d465476046" containerName="registry-server" probeResult="failure" output=< Nov 28 10:01:51 crc kubenswrapper[4838]: timeout: failed to connect service ":50051" within 1s Nov 28 10:01:51 crc kubenswrapper[4838]: > Nov 28 10:01:52 crc kubenswrapper[4838]: I1128 10:01:52.570384 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8d6cc687-8b13-44b1-a15b-488c17e8b50c" path="/var/lib/kubelet/pods/8d6cc687-8b13-44b1-a15b-488c17e8b50c/volumes" Nov 28 10:01:54 crc kubenswrapper[4838]: I1128 10:01:52.995390 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-n5fj2" event={"ID":"d8b6f770-e994-4bf8-92de-7e359cbe75a8","Type":"ContainerStarted","Data":"5e6b8d3df090431f382ebb130cd462a38463f6742c3215993268d50c4fe52e50"} Nov 28 10:01:54 crc kubenswrapper[4838]: I1128 10:01:53.014504 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-n5fj2" podStartSLOduration=8.353892301 podStartE2EDuration="1m40.014482872s" podCreationTimestamp="2025-11-28 10:00:13 +0000 UTC" firstStartedPulling="2025-11-28 10:00:20.281018961 +0000 UTC m=+191.979993131" lastFinishedPulling="2025-11-28 10:01:51.941609532 +0000 UTC m=+283.640583702" observedRunningTime="2025-11-28 10:01:53.010788071 +0000 UTC m=+284.709762251" watchObservedRunningTime="2025-11-28 10:01:53.014482872 +0000 UTC m=+284.713457052" Nov 28 10:01:54 crc kubenswrapper[4838]: I1128 10:01:53.293393 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-mcgzz" Nov 28 10:01:54 crc kubenswrapper[4838]: I1128 10:01:53.293431 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-mcgzz" Nov 28 10:01:54 crc kubenswrapper[4838]: I1128 10:01:53.978312 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-6bffc96f45-v5ctp"] Nov 28 10:01:54 crc kubenswrapper[4838]: E1128 10:01:53.978632 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0ee3d402-f8eb-4319-aacc-f5c15fd7dc49" containerName="extract-utilities" Nov 28 10:01:54 crc kubenswrapper[4838]: I1128 10:01:53.978645 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="0ee3d402-f8eb-4319-aacc-f5c15fd7dc49" containerName="extract-utilities" Nov 28 10:01:54 crc kubenswrapper[4838]: E1128 10:01:53.978663 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0ee3d402-f8eb-4319-aacc-f5c15fd7dc49" containerName="extract-content" Nov 28 10:01:54 crc kubenswrapper[4838]: I1128 10:01:53.978669 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="0ee3d402-f8eb-4319-aacc-f5c15fd7dc49" containerName="extract-content" Nov 28 10:01:54 crc kubenswrapper[4838]: E1128 10:01:53.978684 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="66a94c0f-fb8e-4a92-bb75-5966a3b361c7" containerName="pruner" Nov 28 10:01:54 crc kubenswrapper[4838]: I1128 10:01:53.978690 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="66a94c0f-fb8e-4a92-bb75-5966a3b361c7" containerName="pruner" Nov 28 10:01:54 crc kubenswrapper[4838]: E1128 10:01:53.978699 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8d6cc687-8b13-44b1-a15b-488c17e8b50c" containerName="oauth-openshift" Nov 28 10:01:54 crc kubenswrapper[4838]: I1128 10:01:53.978706 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="8d6cc687-8b13-44b1-a15b-488c17e8b50c" containerName="oauth-openshift" Nov 28 10:01:54 crc kubenswrapper[4838]: E1128 10:01:53.978730 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0ee3d402-f8eb-4319-aacc-f5c15fd7dc49" containerName="registry-server" Nov 28 10:01:54 crc kubenswrapper[4838]: I1128 10:01:53.978737 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="0ee3d402-f8eb-4319-aacc-f5c15fd7dc49" containerName="registry-server" Nov 28 10:01:54 crc kubenswrapper[4838]: I1128 10:01:53.978849 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="66a94c0f-fb8e-4a92-bb75-5966a3b361c7" containerName="pruner" Nov 28 10:01:54 crc kubenswrapper[4838]: I1128 10:01:53.978867 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="0ee3d402-f8eb-4319-aacc-f5c15fd7dc49" containerName="registry-server" Nov 28 10:01:54 crc kubenswrapper[4838]: I1128 10:01:53.978878 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="8d6cc687-8b13-44b1-a15b-488c17e8b50c" containerName="oauth-openshift" Nov 28 10:01:54 crc kubenswrapper[4838]: I1128 10:01:53.979334 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-6bffc96f45-v5ctp" Nov 28 10:01:54 crc kubenswrapper[4838]: I1128 10:01:53.981956 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Nov 28 10:01:54 crc kubenswrapper[4838]: I1128 10:01:53.982179 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Nov 28 10:01:54 crc kubenswrapper[4838]: I1128 10:01:53.982748 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Nov 28 10:01:54 crc kubenswrapper[4838]: I1128 10:01:53.982752 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Nov 28 10:01:54 crc kubenswrapper[4838]: I1128 10:01:53.982980 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Nov 28 10:01:54 crc kubenswrapper[4838]: I1128 10:01:53.983013 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Nov 28 10:01:54 crc kubenswrapper[4838]: I1128 10:01:53.983262 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Nov 28 10:01:54 crc kubenswrapper[4838]: I1128 10:01:53.983522 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Nov 28 10:01:54 crc kubenswrapper[4838]: I1128 10:01:53.984188 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Nov 28 10:01:54 crc kubenswrapper[4838]: I1128 10:01:53.987686 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Nov 28 10:01:54 crc kubenswrapper[4838]: I1128 10:01:53.990232 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Nov 28 10:01:54 crc kubenswrapper[4838]: I1128 10:01:53.991166 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Nov 28 10:01:54 crc kubenswrapper[4838]: I1128 10:01:53.992143 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-6bffc96f45-v5ctp"] Nov 28 10:01:54 crc kubenswrapper[4838]: I1128 10:01:53.994104 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Nov 28 10:01:54 crc kubenswrapper[4838]: I1128 10:01:54.001137 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Nov 28 10:01:54 crc kubenswrapper[4838]: I1128 10:01:54.002863 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Nov 28 10:01:54 crc kubenswrapper[4838]: I1128 10:01:54.030704 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/be6231bb-32fa-4ee1-b294-ff3d784bef88-v4-0-config-system-router-certs\") pod \"oauth-openshift-6bffc96f45-v5ctp\" (UID: \"be6231bb-32fa-4ee1-b294-ff3d784bef88\") " pod="openshift-authentication/oauth-openshift-6bffc96f45-v5ctp" Nov 28 10:01:54 crc kubenswrapper[4838]: I1128 10:01:54.030786 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/be6231bb-32fa-4ee1-b294-ff3d784bef88-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-6bffc96f45-v5ctp\" (UID: \"be6231bb-32fa-4ee1-b294-ff3d784bef88\") " pod="openshift-authentication/oauth-openshift-6bffc96f45-v5ctp" Nov 28 10:01:54 crc kubenswrapper[4838]: I1128 10:01:54.030839 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/be6231bb-32fa-4ee1-b294-ff3d784bef88-v4-0-config-user-template-error\") pod \"oauth-openshift-6bffc96f45-v5ctp\" (UID: \"be6231bb-32fa-4ee1-b294-ff3d784bef88\") " pod="openshift-authentication/oauth-openshift-6bffc96f45-v5ctp" Nov 28 10:01:54 crc kubenswrapper[4838]: I1128 10:01:54.030861 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/be6231bb-32fa-4ee1-b294-ff3d784bef88-v4-0-config-system-cliconfig\") pod \"oauth-openshift-6bffc96f45-v5ctp\" (UID: \"be6231bb-32fa-4ee1-b294-ff3d784bef88\") " pod="openshift-authentication/oauth-openshift-6bffc96f45-v5ctp" Nov 28 10:01:54 crc kubenswrapper[4838]: I1128 10:01:54.030904 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/be6231bb-32fa-4ee1-b294-ff3d784bef88-audit-policies\") pod \"oauth-openshift-6bffc96f45-v5ctp\" (UID: \"be6231bb-32fa-4ee1-b294-ff3d784bef88\") " pod="openshift-authentication/oauth-openshift-6bffc96f45-v5ctp" Nov 28 10:01:54 crc kubenswrapper[4838]: I1128 10:01:54.030997 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/be6231bb-32fa-4ee1-b294-ff3d784bef88-v4-0-config-system-session\") pod \"oauth-openshift-6bffc96f45-v5ctp\" (UID: \"be6231bb-32fa-4ee1-b294-ff3d784bef88\") " pod="openshift-authentication/oauth-openshift-6bffc96f45-v5ctp" Nov 28 10:01:54 crc kubenswrapper[4838]: I1128 10:01:54.031048 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/be6231bb-32fa-4ee1-b294-ff3d784bef88-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-6bffc96f45-v5ctp\" (UID: \"be6231bb-32fa-4ee1-b294-ff3d784bef88\") " pod="openshift-authentication/oauth-openshift-6bffc96f45-v5ctp" Nov 28 10:01:54 crc kubenswrapper[4838]: I1128 10:01:54.031096 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/be6231bb-32fa-4ee1-b294-ff3d784bef88-audit-dir\") pod \"oauth-openshift-6bffc96f45-v5ctp\" (UID: \"be6231bb-32fa-4ee1-b294-ff3d784bef88\") " pod="openshift-authentication/oauth-openshift-6bffc96f45-v5ctp" Nov 28 10:01:54 crc kubenswrapper[4838]: I1128 10:01:54.031116 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/be6231bb-32fa-4ee1-b294-ff3d784bef88-v4-0-config-user-template-login\") pod \"oauth-openshift-6bffc96f45-v5ctp\" (UID: \"be6231bb-32fa-4ee1-b294-ff3d784bef88\") " pod="openshift-authentication/oauth-openshift-6bffc96f45-v5ctp" Nov 28 10:01:54 crc kubenswrapper[4838]: I1128 10:01:54.031133 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/be6231bb-32fa-4ee1-b294-ff3d784bef88-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-6bffc96f45-v5ctp\" (UID: \"be6231bb-32fa-4ee1-b294-ff3d784bef88\") " pod="openshift-authentication/oauth-openshift-6bffc96f45-v5ctp" Nov 28 10:01:54 crc kubenswrapper[4838]: I1128 10:01:54.031171 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/be6231bb-32fa-4ee1-b294-ff3d784bef88-v4-0-config-system-serving-cert\") pod \"oauth-openshift-6bffc96f45-v5ctp\" (UID: \"be6231bb-32fa-4ee1-b294-ff3d784bef88\") " pod="openshift-authentication/oauth-openshift-6bffc96f45-v5ctp" Nov 28 10:01:54 crc kubenswrapper[4838]: I1128 10:01:54.031198 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/be6231bb-32fa-4ee1-b294-ff3d784bef88-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-6bffc96f45-v5ctp\" (UID: \"be6231bb-32fa-4ee1-b294-ff3d784bef88\") " pod="openshift-authentication/oauth-openshift-6bffc96f45-v5ctp" Nov 28 10:01:54 crc kubenswrapper[4838]: I1128 10:01:54.031268 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g97v8\" (UniqueName: \"kubernetes.io/projected/be6231bb-32fa-4ee1-b294-ff3d784bef88-kube-api-access-g97v8\") pod \"oauth-openshift-6bffc96f45-v5ctp\" (UID: \"be6231bb-32fa-4ee1-b294-ff3d784bef88\") " pod="openshift-authentication/oauth-openshift-6bffc96f45-v5ctp" Nov 28 10:01:54 crc kubenswrapper[4838]: I1128 10:01:54.031292 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/be6231bb-32fa-4ee1-b294-ff3d784bef88-v4-0-config-system-service-ca\") pod \"oauth-openshift-6bffc96f45-v5ctp\" (UID: \"be6231bb-32fa-4ee1-b294-ff3d784bef88\") " pod="openshift-authentication/oauth-openshift-6bffc96f45-v5ctp" Nov 28 10:01:54 crc kubenswrapper[4838]: I1128 10:01:54.132406 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/be6231bb-32fa-4ee1-b294-ff3d784bef88-v4-0-config-system-serving-cert\") pod \"oauth-openshift-6bffc96f45-v5ctp\" (UID: \"be6231bb-32fa-4ee1-b294-ff3d784bef88\") " pod="openshift-authentication/oauth-openshift-6bffc96f45-v5ctp" Nov 28 10:01:54 crc kubenswrapper[4838]: I1128 10:01:54.132451 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/be6231bb-32fa-4ee1-b294-ff3d784bef88-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-6bffc96f45-v5ctp\" (UID: \"be6231bb-32fa-4ee1-b294-ff3d784bef88\") " pod="openshift-authentication/oauth-openshift-6bffc96f45-v5ctp" Nov 28 10:01:54 crc kubenswrapper[4838]: I1128 10:01:54.132473 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g97v8\" (UniqueName: \"kubernetes.io/projected/be6231bb-32fa-4ee1-b294-ff3d784bef88-kube-api-access-g97v8\") pod \"oauth-openshift-6bffc96f45-v5ctp\" (UID: \"be6231bb-32fa-4ee1-b294-ff3d784bef88\") " pod="openshift-authentication/oauth-openshift-6bffc96f45-v5ctp" Nov 28 10:01:54 crc kubenswrapper[4838]: I1128 10:01:54.132490 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/be6231bb-32fa-4ee1-b294-ff3d784bef88-v4-0-config-system-service-ca\") pod \"oauth-openshift-6bffc96f45-v5ctp\" (UID: \"be6231bb-32fa-4ee1-b294-ff3d784bef88\") " pod="openshift-authentication/oauth-openshift-6bffc96f45-v5ctp" Nov 28 10:01:54 crc kubenswrapper[4838]: I1128 10:01:54.132518 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/be6231bb-32fa-4ee1-b294-ff3d784bef88-v4-0-config-system-router-certs\") pod \"oauth-openshift-6bffc96f45-v5ctp\" (UID: \"be6231bb-32fa-4ee1-b294-ff3d784bef88\") " pod="openshift-authentication/oauth-openshift-6bffc96f45-v5ctp" Nov 28 10:01:54 crc kubenswrapper[4838]: I1128 10:01:54.132537 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/be6231bb-32fa-4ee1-b294-ff3d784bef88-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-6bffc96f45-v5ctp\" (UID: \"be6231bb-32fa-4ee1-b294-ff3d784bef88\") " pod="openshift-authentication/oauth-openshift-6bffc96f45-v5ctp" Nov 28 10:01:54 crc kubenswrapper[4838]: I1128 10:01:54.132585 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/be6231bb-32fa-4ee1-b294-ff3d784bef88-v4-0-config-user-template-error\") pod \"oauth-openshift-6bffc96f45-v5ctp\" (UID: \"be6231bb-32fa-4ee1-b294-ff3d784bef88\") " pod="openshift-authentication/oauth-openshift-6bffc96f45-v5ctp" Nov 28 10:01:54 crc kubenswrapper[4838]: I1128 10:01:54.132612 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/be6231bb-32fa-4ee1-b294-ff3d784bef88-v4-0-config-system-cliconfig\") pod \"oauth-openshift-6bffc96f45-v5ctp\" (UID: \"be6231bb-32fa-4ee1-b294-ff3d784bef88\") " pod="openshift-authentication/oauth-openshift-6bffc96f45-v5ctp" Nov 28 10:01:54 crc kubenswrapper[4838]: I1128 10:01:54.132639 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/be6231bb-32fa-4ee1-b294-ff3d784bef88-audit-policies\") pod \"oauth-openshift-6bffc96f45-v5ctp\" (UID: \"be6231bb-32fa-4ee1-b294-ff3d784bef88\") " pod="openshift-authentication/oauth-openshift-6bffc96f45-v5ctp" Nov 28 10:01:54 crc kubenswrapper[4838]: I1128 10:01:54.132664 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/be6231bb-32fa-4ee1-b294-ff3d784bef88-v4-0-config-system-session\") pod \"oauth-openshift-6bffc96f45-v5ctp\" (UID: \"be6231bb-32fa-4ee1-b294-ff3d784bef88\") " pod="openshift-authentication/oauth-openshift-6bffc96f45-v5ctp" Nov 28 10:01:54 crc kubenswrapper[4838]: I1128 10:01:54.132691 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/be6231bb-32fa-4ee1-b294-ff3d784bef88-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-6bffc96f45-v5ctp\" (UID: \"be6231bb-32fa-4ee1-b294-ff3d784bef88\") " pod="openshift-authentication/oauth-openshift-6bffc96f45-v5ctp" Nov 28 10:01:54 crc kubenswrapper[4838]: I1128 10:01:54.132729 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/be6231bb-32fa-4ee1-b294-ff3d784bef88-audit-dir\") pod \"oauth-openshift-6bffc96f45-v5ctp\" (UID: \"be6231bb-32fa-4ee1-b294-ff3d784bef88\") " pod="openshift-authentication/oauth-openshift-6bffc96f45-v5ctp" Nov 28 10:01:54 crc kubenswrapper[4838]: I1128 10:01:54.132751 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/be6231bb-32fa-4ee1-b294-ff3d784bef88-v4-0-config-user-template-login\") pod \"oauth-openshift-6bffc96f45-v5ctp\" (UID: \"be6231bb-32fa-4ee1-b294-ff3d784bef88\") " pod="openshift-authentication/oauth-openshift-6bffc96f45-v5ctp" Nov 28 10:01:54 crc kubenswrapper[4838]: I1128 10:01:54.132773 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/be6231bb-32fa-4ee1-b294-ff3d784bef88-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-6bffc96f45-v5ctp\" (UID: \"be6231bb-32fa-4ee1-b294-ff3d784bef88\") " pod="openshift-authentication/oauth-openshift-6bffc96f45-v5ctp" Nov 28 10:01:54 crc kubenswrapper[4838]: I1128 10:01:54.133699 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/be6231bb-32fa-4ee1-b294-ff3d784bef88-v4-0-config-system-cliconfig\") pod \"oauth-openshift-6bffc96f45-v5ctp\" (UID: \"be6231bb-32fa-4ee1-b294-ff3d784bef88\") " pod="openshift-authentication/oauth-openshift-6bffc96f45-v5ctp" Nov 28 10:01:54 crc kubenswrapper[4838]: I1128 10:01:54.133807 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/be6231bb-32fa-4ee1-b294-ff3d784bef88-audit-dir\") pod \"oauth-openshift-6bffc96f45-v5ctp\" (UID: \"be6231bb-32fa-4ee1-b294-ff3d784bef88\") " pod="openshift-authentication/oauth-openshift-6bffc96f45-v5ctp" Nov 28 10:01:54 crc kubenswrapper[4838]: I1128 10:01:54.134689 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/be6231bb-32fa-4ee1-b294-ff3d784bef88-audit-policies\") pod \"oauth-openshift-6bffc96f45-v5ctp\" (UID: \"be6231bb-32fa-4ee1-b294-ff3d784bef88\") " pod="openshift-authentication/oauth-openshift-6bffc96f45-v5ctp" Nov 28 10:01:54 crc kubenswrapper[4838]: I1128 10:01:54.134690 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/be6231bb-32fa-4ee1-b294-ff3d784bef88-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-6bffc96f45-v5ctp\" (UID: \"be6231bb-32fa-4ee1-b294-ff3d784bef88\") " pod="openshift-authentication/oauth-openshift-6bffc96f45-v5ctp" Nov 28 10:01:54 crc kubenswrapper[4838]: I1128 10:01:54.133738 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/be6231bb-32fa-4ee1-b294-ff3d784bef88-v4-0-config-system-service-ca\") pod \"oauth-openshift-6bffc96f45-v5ctp\" (UID: \"be6231bb-32fa-4ee1-b294-ff3d784bef88\") " pod="openshift-authentication/oauth-openshift-6bffc96f45-v5ctp" Nov 28 10:01:54 crc kubenswrapper[4838]: I1128 10:01:54.139265 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/be6231bb-32fa-4ee1-b294-ff3d784bef88-v4-0-config-system-session\") pod \"oauth-openshift-6bffc96f45-v5ctp\" (UID: \"be6231bb-32fa-4ee1-b294-ff3d784bef88\") " pod="openshift-authentication/oauth-openshift-6bffc96f45-v5ctp" Nov 28 10:01:54 crc kubenswrapper[4838]: I1128 10:01:54.140268 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/be6231bb-32fa-4ee1-b294-ff3d784bef88-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-6bffc96f45-v5ctp\" (UID: \"be6231bb-32fa-4ee1-b294-ff3d784bef88\") " pod="openshift-authentication/oauth-openshift-6bffc96f45-v5ctp" Nov 28 10:01:54 crc kubenswrapper[4838]: I1128 10:01:54.139964 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/be6231bb-32fa-4ee1-b294-ff3d784bef88-v4-0-config-system-serving-cert\") pod \"oauth-openshift-6bffc96f45-v5ctp\" (UID: \"be6231bb-32fa-4ee1-b294-ff3d784bef88\") " pod="openshift-authentication/oauth-openshift-6bffc96f45-v5ctp" Nov 28 10:01:54 crc kubenswrapper[4838]: I1128 10:01:54.140219 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/be6231bb-32fa-4ee1-b294-ff3d784bef88-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-6bffc96f45-v5ctp\" (UID: \"be6231bb-32fa-4ee1-b294-ff3d784bef88\") " pod="openshift-authentication/oauth-openshift-6bffc96f45-v5ctp" Nov 28 10:01:54 crc kubenswrapper[4838]: I1128 10:01:54.140600 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/be6231bb-32fa-4ee1-b294-ff3d784bef88-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-6bffc96f45-v5ctp\" (UID: \"be6231bb-32fa-4ee1-b294-ff3d784bef88\") " pod="openshift-authentication/oauth-openshift-6bffc96f45-v5ctp" Nov 28 10:01:54 crc kubenswrapper[4838]: I1128 10:01:54.144066 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/be6231bb-32fa-4ee1-b294-ff3d784bef88-v4-0-config-system-router-certs\") pod \"oauth-openshift-6bffc96f45-v5ctp\" (UID: \"be6231bb-32fa-4ee1-b294-ff3d784bef88\") " pod="openshift-authentication/oauth-openshift-6bffc96f45-v5ctp" Nov 28 10:01:54 crc kubenswrapper[4838]: I1128 10:01:54.154060 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/be6231bb-32fa-4ee1-b294-ff3d784bef88-v4-0-config-user-template-error\") pod \"oauth-openshift-6bffc96f45-v5ctp\" (UID: \"be6231bb-32fa-4ee1-b294-ff3d784bef88\") " pod="openshift-authentication/oauth-openshift-6bffc96f45-v5ctp" Nov 28 10:01:54 crc kubenswrapper[4838]: I1128 10:01:54.157205 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g97v8\" (UniqueName: \"kubernetes.io/projected/be6231bb-32fa-4ee1-b294-ff3d784bef88-kube-api-access-g97v8\") pod \"oauth-openshift-6bffc96f45-v5ctp\" (UID: \"be6231bb-32fa-4ee1-b294-ff3d784bef88\") " pod="openshift-authentication/oauth-openshift-6bffc96f45-v5ctp" Nov 28 10:01:54 crc kubenswrapper[4838]: I1128 10:01:54.158156 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/be6231bb-32fa-4ee1-b294-ff3d784bef88-v4-0-config-user-template-login\") pod \"oauth-openshift-6bffc96f45-v5ctp\" (UID: \"be6231bb-32fa-4ee1-b294-ff3d784bef88\") " pod="openshift-authentication/oauth-openshift-6bffc96f45-v5ctp" Nov 28 10:01:54 crc kubenswrapper[4838]: I1128 10:01:54.231366 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-n5fj2" Nov 28 10:01:54 crc kubenswrapper[4838]: I1128 10:01:54.231406 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-n5fj2" Nov 28 10:01:54 crc kubenswrapper[4838]: I1128 10:01:54.317387 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-6bffc96f45-v5ctp" Nov 28 10:01:54 crc kubenswrapper[4838]: I1128 10:01:54.348446 4838 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-mcgzz" podUID="ac5ded1f-10ca-4db6-b3a6-80f30f28cb34" containerName="registry-server" probeResult="failure" output=< Nov 28 10:01:54 crc kubenswrapper[4838]: timeout: failed to connect service ":50051" within 1s Nov 28 10:01:54 crc kubenswrapper[4838]: > Nov 28 10:01:54 crc kubenswrapper[4838]: I1128 10:01:54.574440 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-6bffc96f45-v5ctp"] Nov 28 10:01:54 crc kubenswrapper[4838]: W1128 10:01:54.581609 4838 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podbe6231bb_32fa_4ee1_b294_ff3d784bef88.slice/crio-eb43fbc2a8dd3e53a2abc92985ebe4f0461381e318ec0d0e4648a5832481b399 WatchSource:0}: Error finding container eb43fbc2a8dd3e53a2abc92985ebe4f0461381e318ec0d0e4648a5832481b399: Status 404 returned error can't find the container with id eb43fbc2a8dd3e53a2abc92985ebe4f0461381e318ec0d0e4648a5832481b399 Nov 28 10:01:55 crc kubenswrapper[4838]: I1128 10:01:55.036458 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-6bffc96f45-v5ctp" event={"ID":"be6231bb-32fa-4ee1-b294-ff3d784bef88","Type":"ContainerStarted","Data":"eb43fbc2a8dd3e53a2abc92985ebe4f0461381e318ec0d0e4648a5832481b399"} Nov 28 10:01:55 crc kubenswrapper[4838]: I1128 10:01:55.283977 4838 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-n5fj2" podUID="d8b6f770-e994-4bf8-92de-7e359cbe75a8" containerName="registry-server" probeResult="failure" output=< Nov 28 10:01:55 crc kubenswrapper[4838]: timeout: failed to connect service ":50051" within 1s Nov 28 10:01:55 crc kubenswrapper[4838]: > Nov 28 10:01:56 crc kubenswrapper[4838]: I1128 10:01:56.041929 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-6bffc96f45-v5ctp" event={"ID":"be6231bb-32fa-4ee1-b294-ff3d784bef88","Type":"ContainerStarted","Data":"e19a49a8865a9ba5a296de62be24ab96557d40e18a29c3c715f303f7f2313ef2"} Nov 28 10:01:57 crc kubenswrapper[4838]: I1128 10:01:57.049480 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-6bffc96f45-v5ctp" Nov 28 10:01:57 crc kubenswrapper[4838]: I1128 10:01:57.057238 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-6bffc96f45-v5ctp" Nov 28 10:01:57 crc kubenswrapper[4838]: I1128 10:01:57.077915 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-6bffc96f45-v5ctp" podStartSLOduration=32.077891946 podStartE2EDuration="32.077891946s" podCreationTimestamp="2025-11-28 10:01:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 10:01:57.07402573 +0000 UTC m=+288.772999920" watchObservedRunningTime="2025-11-28 10:01:57.077891946 +0000 UTC m=+288.776866146" Nov 28 10:02:00 crc kubenswrapper[4838]: I1128 10:02:00.163269 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-sdvg7" Nov 28 10:02:00 crc kubenswrapper[4838]: I1128 10:02:00.224032 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-sdvg7" Nov 28 10:02:00 crc kubenswrapper[4838]: I1128 10:02:00.344805 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-6gkhp" Nov 28 10:02:00 crc kubenswrapper[4838]: I1128 10:02:00.404277 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-6gkhp" Nov 28 10:02:00 crc kubenswrapper[4838]: I1128 10:02:00.515177 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-zlxs4" Nov 28 10:02:00 crc kubenswrapper[4838]: I1128 10:02:00.550668 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-zlxs4" Nov 28 10:02:00 crc kubenswrapper[4838]: I1128 10:02:00.781138 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-bcnzk" Nov 28 10:02:00 crc kubenswrapper[4838]: I1128 10:02:00.855511 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-bcnzk" Nov 28 10:02:02 crc kubenswrapper[4838]: I1128 10:02:02.217545 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-bcnzk"] Nov 28 10:02:02 crc kubenswrapper[4838]: I1128 10:02:02.217968 4838 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-bcnzk" podUID="e35b5813-d7b9-4cbc-b002-44d465476046" containerName="registry-server" containerID="cri-o://36446f37d5c4cfd29f4db891c95a00b3319ba8e54c2136cec7bffc9c2e3f6555" gracePeriod=2 Nov 28 10:02:02 crc kubenswrapper[4838]: I1128 10:02:02.332870 4838 kubelet.go:2431] "SyncLoop REMOVE" source="file" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Nov 28 10:02:02 crc kubenswrapper[4838]: I1128 10:02:02.333234 4838 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" containerID="cri-o://837cc7d33d601516b4ea56a283f71167b41da7c769070c97ea77f29e97cf1555" gracePeriod=15 Nov 28 10:02:02 crc kubenswrapper[4838]: I1128 10:02:02.333302 4838 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" containerID="cri-o://3a821e5b105f62cf7f3b12714bfb0f58867a808d1c777f2fb711895c345d8ee9" gracePeriod=15 Nov 28 10:02:02 crc kubenswrapper[4838]: I1128 10:02:02.333377 4838 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" containerID="cri-o://3a04db28528da269759635186b06952f9a3dc4c2b130458354a5bf9ef994db8b" gracePeriod=15 Nov 28 10:02:02 crc kubenswrapper[4838]: I1128 10:02:02.333435 4838 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" containerID="cri-o://1c690f38f717fbfbd681f21f5dc845b94601530b4fc0860fdbeb1317042c793b" gracePeriod=15 Nov 28 10:02:02 crc kubenswrapper[4838]: I1128 10:02:02.333480 4838 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" containerID="cri-o://25d620ea6d7c38547e89d61e7a60f227d28b21f18d563055db47256b266d5b6b" gracePeriod=15 Nov 28 10:02:02 crc kubenswrapper[4838]: I1128 10:02:02.334365 4838 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Nov 28 10:02:02 crc kubenswrapper[4838]: E1128 10:02:02.334674 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Nov 28 10:02:02 crc kubenswrapper[4838]: I1128 10:02:02.334698 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Nov 28 10:02:02 crc kubenswrapper[4838]: E1128 10:02:02.334742 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Nov 28 10:02:02 crc kubenswrapper[4838]: I1128 10:02:02.334755 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Nov 28 10:02:02 crc kubenswrapper[4838]: E1128 10:02:02.334769 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 28 10:02:02 crc kubenswrapper[4838]: I1128 10:02:02.334779 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 28 10:02:02 crc kubenswrapper[4838]: E1128 10:02:02.334793 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 28 10:02:02 crc kubenswrapper[4838]: I1128 10:02:02.334804 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 28 10:02:02 crc kubenswrapper[4838]: E1128 10:02:02.334818 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Nov 28 10:02:02 crc kubenswrapper[4838]: I1128 10:02:02.334828 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Nov 28 10:02:02 crc kubenswrapper[4838]: E1128 10:02:02.334843 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="setup" Nov 28 10:02:02 crc kubenswrapper[4838]: I1128 10:02:02.334853 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="setup" Nov 28 10:02:02 crc kubenswrapper[4838]: E1128 10:02:02.334865 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Nov 28 10:02:02 crc kubenswrapper[4838]: I1128 10:02:02.334875 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Nov 28 10:02:02 crc kubenswrapper[4838]: E1128 10:02:02.334886 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 28 10:02:02 crc kubenswrapper[4838]: I1128 10:02:02.334896 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 28 10:02:02 crc kubenswrapper[4838]: E1128 10:02:02.334915 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Nov 28 10:02:02 crc kubenswrapper[4838]: I1128 10:02:02.334926 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Nov 28 10:02:02 crc kubenswrapper[4838]: I1128 10:02:02.335085 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 28 10:02:02 crc kubenswrapper[4838]: I1128 10:02:02.335103 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Nov 28 10:02:02 crc kubenswrapper[4838]: I1128 10:02:02.335118 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 28 10:02:02 crc kubenswrapper[4838]: I1128 10:02:02.335131 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Nov 28 10:02:02 crc kubenswrapper[4838]: I1128 10:02:02.335142 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Nov 28 10:02:02 crc kubenswrapper[4838]: I1128 10:02:02.335162 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Nov 28 10:02:02 crc kubenswrapper[4838]: I1128 10:02:02.335175 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Nov 28 10:02:02 crc kubenswrapper[4838]: I1128 10:02:02.335499 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 28 10:02:02 crc kubenswrapper[4838]: I1128 10:02:02.341844 4838 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Nov 28 10:02:02 crc kubenswrapper[4838]: I1128 10:02:02.343490 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 10:02:02 crc kubenswrapper[4838]: I1128 10:02:02.349317 4838 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="f4b27818a5e8e43d0dc095d08835c792" podUID="71bb4a3aecc4ba5b26c4b7318770ce13" Nov 28 10:02:02 crc kubenswrapper[4838]: I1128 10:02:02.393630 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Nov 28 10:02:02 crc kubenswrapper[4838]: I1128 10:02:02.447121 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 10:02:02 crc kubenswrapper[4838]: I1128 10:02:02.447174 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 10:02:02 crc kubenswrapper[4838]: I1128 10:02:02.447211 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 10:02:02 crc kubenswrapper[4838]: I1128 10:02:02.447238 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 10:02:02 crc kubenswrapper[4838]: I1128 10:02:02.447259 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 10:02:02 crc kubenswrapper[4838]: I1128 10:02:02.447285 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 10:02:02 crc kubenswrapper[4838]: I1128 10:02:02.447342 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 10:02:02 crc kubenswrapper[4838]: I1128 10:02:02.447367 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 10:02:02 crc kubenswrapper[4838]: I1128 10:02:02.548806 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 10:02:02 crc kubenswrapper[4838]: I1128 10:02:02.548881 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 10:02:02 crc kubenswrapper[4838]: I1128 10:02:02.548932 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 10:02:02 crc kubenswrapper[4838]: I1128 10:02:02.548973 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 10:02:02 crc kubenswrapper[4838]: I1128 10:02:02.548967 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 10:02:02 crc kubenswrapper[4838]: I1128 10:02:02.549003 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 10:02:02 crc kubenswrapper[4838]: I1128 10:02:02.549043 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 10:02:02 crc kubenswrapper[4838]: I1128 10:02:02.549091 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 10:02:02 crc kubenswrapper[4838]: I1128 10:02:02.549126 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 10:02:02 crc kubenswrapper[4838]: I1128 10:02:02.549162 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 10:02:02 crc kubenswrapper[4838]: I1128 10:02:02.549203 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 10:02:02 crc kubenswrapper[4838]: I1128 10:02:02.549210 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 10:02:02 crc kubenswrapper[4838]: I1128 10:02:02.549102 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 10:02:02 crc kubenswrapper[4838]: I1128 10:02:02.549393 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 10:02:02 crc kubenswrapper[4838]: I1128 10:02:02.549416 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 10:02:02 crc kubenswrapper[4838]: I1128 10:02:02.549515 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 10:02:02 crc kubenswrapper[4838]: I1128 10:02:02.684939 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 10:02:02 crc kubenswrapper[4838]: W1128 10:02:02.712973 4838 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf85e55b1a89d02b0cb034b1ea31ed45a.slice/crio-27c4b861c1951a7142b7675e491faf3c861be5a73c3f4e103eeff0bfe479e2e0 WatchSource:0}: Error finding container 27c4b861c1951a7142b7675e491faf3c861be5a73c3f4e103eeff0bfe479e2e0: Status 404 returned error can't find the container with id 27c4b861c1951a7142b7675e491faf3c861be5a73c3f4e103eeff0bfe479e2e0 Nov 28 10:02:02 crc kubenswrapper[4838]: I1128 10:02:02.814210 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-zlxs4"] Nov 28 10:02:02 crc kubenswrapper[4838]: I1128 10:02:02.815431 4838 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-zlxs4" podUID="d14771c4-48e3-4efe-a5f2-31331a30979f" containerName="registry-server" containerID="cri-o://4ed370e04b7437cc2fc8e19ee2fd3cde7591bdb6b2a77b3cfb9192180d951d6c" gracePeriod=2 Nov 28 10:02:03 crc kubenswrapper[4838]: I1128 10:02:03.092960 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" event={"ID":"f85e55b1a89d02b0cb034b1ea31ed45a","Type":"ContainerStarted","Data":"27c4b861c1951a7142b7675e491faf3c861be5a73c3f4e103eeff0bfe479e2e0"} Nov 28 10:02:03 crc kubenswrapper[4838]: I1128 10:02:03.344548 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-mcgzz" Nov 28 10:02:03 crc kubenswrapper[4838]: I1128 10:02:03.382559 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-mcgzz" Nov 28 10:02:04 crc kubenswrapper[4838]: I1128 10:02:04.101193 4838 generic.go:334] "Generic (PLEG): container finished" podID="d14771c4-48e3-4efe-a5f2-31331a30979f" containerID="4ed370e04b7437cc2fc8e19ee2fd3cde7591bdb6b2a77b3cfb9192180d951d6c" exitCode=0 Nov 28 10:02:04 crc kubenswrapper[4838]: I1128 10:02:04.101589 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zlxs4" event={"ID":"d14771c4-48e3-4efe-a5f2-31331a30979f","Type":"ContainerDied","Data":"4ed370e04b7437cc2fc8e19ee2fd3cde7591bdb6b2a77b3cfb9192180d951d6c"} Nov 28 10:02:04 crc kubenswrapper[4838]: I1128 10:02:04.103879 4838 generic.go:334] "Generic (PLEG): container finished" podID="e35b5813-d7b9-4cbc-b002-44d465476046" containerID="36446f37d5c4cfd29f4db891c95a00b3319ba8e54c2136cec7bffc9c2e3f6555" exitCode=0 Nov 28 10:02:04 crc kubenswrapper[4838]: I1128 10:02:04.103957 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bcnzk" event={"ID":"e35b5813-d7b9-4cbc-b002-44d465476046","Type":"ContainerDied","Data":"36446f37d5c4cfd29f4db891c95a00b3319ba8e54c2136cec7bffc9c2e3f6555"} Nov 28 10:02:04 crc kubenswrapper[4838]: I1128 10:02:04.105228 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" event={"ID":"f85e55b1a89d02b0cb034b1ea31ed45a","Type":"ContainerStarted","Data":"63d56b65686e3856fc7c923cd1def7e61ab1cac00a4f33f0bfbbeddcedfa5997"} Nov 28 10:02:04 crc kubenswrapper[4838]: I1128 10:02:04.106464 4838 generic.go:334] "Generic (PLEG): container finished" podID="37cf39e1-d5d9-4b5d-a80b-5f9e371ebf2b" containerID="fb1c4fc0af81b81e544452f3515780db57ee95ef051ddbca10f323baf3d05318" exitCode=0 Nov 28 10:02:04 crc kubenswrapper[4838]: I1128 10:02:04.106539 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"37cf39e1-d5d9-4b5d-a80b-5f9e371ebf2b","Type":"ContainerDied","Data":"fb1c4fc0af81b81e544452f3515780db57ee95ef051ddbca10f323baf3d05318"} Nov 28 10:02:04 crc kubenswrapper[4838]: I1128 10:02:04.108792 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/1.log" Nov 28 10:02:04 crc kubenswrapper[4838]: I1128 10:02:04.110333 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Nov 28 10:02:04 crc kubenswrapper[4838]: I1128 10:02:04.111091 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver/0.log" Nov 28 10:02:04 crc kubenswrapper[4838]: I1128 10:02:04.111471 4838 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="3a821e5b105f62cf7f3b12714bfb0f58867a808d1c777f2fb711895c345d8ee9" exitCode=0 Nov 28 10:02:04 crc kubenswrapper[4838]: I1128 10:02:04.111506 4838 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="1c690f38f717fbfbd681f21f5dc845b94601530b4fc0860fdbeb1317042c793b" exitCode=0 Nov 28 10:02:04 crc kubenswrapper[4838]: I1128 10:02:04.111522 4838 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="25d620ea6d7c38547e89d61e7a60f227d28b21f18d563055db47256b266d5b6b" exitCode=0 Nov 28 10:02:04 crc kubenswrapper[4838]: I1128 10:02:04.111537 4838 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="837cc7d33d601516b4ea56a283f71167b41da7c769070c97ea77f29e97cf1555" exitCode=2 Nov 28 10:02:04 crc kubenswrapper[4838]: I1128 10:02:04.111955 4838 scope.go:117] "RemoveContainer" containerID="2ad3223a9346861cf1b27af8c95207349f10af6f416380747e32c4faf1d3add4" Nov 28 10:02:04 crc kubenswrapper[4838]: I1128 10:02:04.307886 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-n5fj2" Nov 28 10:02:04 crc kubenswrapper[4838]: I1128 10:02:04.383321 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-n5fj2" Nov 28 10:02:05 crc kubenswrapper[4838]: I1128 10:02:05.119200 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zlxs4" event={"ID":"d14771c4-48e3-4efe-a5f2-31331a30979f","Type":"ContainerDied","Data":"ae1a562ed874232481cd075629141113a8b15940346070ef252389c2165e3dc2"} Nov 28 10:02:05 crc kubenswrapper[4838]: I1128 10:02:05.119518 4838 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ae1a562ed874232481cd075629141113a8b15940346070ef252389c2165e3dc2" Nov 28 10:02:05 crc kubenswrapper[4838]: I1128 10:02:05.121104 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bcnzk" event={"ID":"e35b5813-d7b9-4cbc-b002-44d465476046","Type":"ContainerDied","Data":"c4dd42833b4b184ec1387bdd3e57b17f3cfb7cfffd87fdd66e51ba314b8e583e"} Nov 28 10:02:05 crc kubenswrapper[4838]: I1128 10:02:05.121160 4838 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c4dd42833b4b184ec1387bdd3e57b17f3cfb7cfffd87fdd66e51ba314b8e583e" Nov 28 10:02:05 crc kubenswrapper[4838]: I1128 10:02:05.123287 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Nov 28 10:02:05 crc kubenswrapper[4838]: I1128 10:02:05.123783 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver/0.log" Nov 28 10:02:05 crc kubenswrapper[4838]: I1128 10:02:05.131862 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-bcnzk" Nov 28 10:02:05 crc kubenswrapper[4838]: I1128 10:02:05.136779 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-zlxs4" Nov 28 10:02:05 crc kubenswrapper[4838]: I1128 10:02:05.197995 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e35b5813-d7b9-4cbc-b002-44d465476046-utilities\") pod \"e35b5813-d7b9-4cbc-b002-44d465476046\" (UID: \"e35b5813-d7b9-4cbc-b002-44d465476046\") " Nov 28 10:02:05 crc kubenswrapper[4838]: I1128 10:02:05.198047 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-k6g2v\" (UniqueName: \"kubernetes.io/projected/e35b5813-d7b9-4cbc-b002-44d465476046-kube-api-access-k6g2v\") pod \"e35b5813-d7b9-4cbc-b002-44d465476046\" (UID: \"e35b5813-d7b9-4cbc-b002-44d465476046\") " Nov 28 10:02:05 crc kubenswrapper[4838]: I1128 10:02:05.198107 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d14771c4-48e3-4efe-a5f2-31331a30979f-utilities\") pod \"d14771c4-48e3-4efe-a5f2-31331a30979f\" (UID: \"d14771c4-48e3-4efe-a5f2-31331a30979f\") " Nov 28 10:02:05 crc kubenswrapper[4838]: I1128 10:02:05.198137 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e35b5813-d7b9-4cbc-b002-44d465476046-catalog-content\") pod \"e35b5813-d7b9-4cbc-b002-44d465476046\" (UID: \"e35b5813-d7b9-4cbc-b002-44d465476046\") " Nov 28 10:02:05 crc kubenswrapper[4838]: I1128 10:02:05.198196 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d14771c4-48e3-4efe-a5f2-31331a30979f-catalog-content\") pod \"d14771c4-48e3-4efe-a5f2-31331a30979f\" (UID: \"d14771c4-48e3-4efe-a5f2-31331a30979f\") " Nov 28 10:02:05 crc kubenswrapper[4838]: I1128 10:02:05.198223 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4vls5\" (UniqueName: \"kubernetes.io/projected/d14771c4-48e3-4efe-a5f2-31331a30979f-kube-api-access-4vls5\") pod \"d14771c4-48e3-4efe-a5f2-31331a30979f\" (UID: \"d14771c4-48e3-4efe-a5f2-31331a30979f\") " Nov 28 10:02:05 crc kubenswrapper[4838]: I1128 10:02:05.199070 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e35b5813-d7b9-4cbc-b002-44d465476046-utilities" (OuterVolumeSpecName: "utilities") pod "e35b5813-d7b9-4cbc-b002-44d465476046" (UID: "e35b5813-d7b9-4cbc-b002-44d465476046"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 10:02:05 crc kubenswrapper[4838]: I1128 10:02:05.199600 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d14771c4-48e3-4efe-a5f2-31331a30979f-utilities" (OuterVolumeSpecName: "utilities") pod "d14771c4-48e3-4efe-a5f2-31331a30979f" (UID: "d14771c4-48e3-4efe-a5f2-31331a30979f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 10:02:05 crc kubenswrapper[4838]: I1128 10:02:05.203323 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e35b5813-d7b9-4cbc-b002-44d465476046-kube-api-access-k6g2v" (OuterVolumeSpecName: "kube-api-access-k6g2v") pod "e35b5813-d7b9-4cbc-b002-44d465476046" (UID: "e35b5813-d7b9-4cbc-b002-44d465476046"). InnerVolumeSpecName "kube-api-access-k6g2v". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:02:05 crc kubenswrapper[4838]: I1128 10:02:05.214526 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d14771c4-48e3-4efe-a5f2-31331a30979f-kube-api-access-4vls5" (OuterVolumeSpecName: "kube-api-access-4vls5") pod "d14771c4-48e3-4efe-a5f2-31331a30979f" (UID: "d14771c4-48e3-4efe-a5f2-31331a30979f"). InnerVolumeSpecName "kube-api-access-4vls5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:02:05 crc kubenswrapper[4838]: I1128 10:02:05.255960 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e35b5813-d7b9-4cbc-b002-44d465476046-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "e35b5813-d7b9-4cbc-b002-44d465476046" (UID: "e35b5813-d7b9-4cbc-b002-44d465476046"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 10:02:05 crc kubenswrapper[4838]: I1128 10:02:05.261211 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d14771c4-48e3-4efe-a5f2-31331a30979f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "d14771c4-48e3-4efe-a5f2-31331a30979f" (UID: "d14771c4-48e3-4efe-a5f2-31331a30979f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 10:02:05 crc kubenswrapper[4838]: I1128 10:02:05.299556 4838 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d14771c4-48e3-4efe-a5f2-31331a30979f-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 10:02:05 crc kubenswrapper[4838]: I1128 10:02:05.299613 4838 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e35b5813-d7b9-4cbc-b002-44d465476046-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 10:02:05 crc kubenswrapper[4838]: I1128 10:02:05.299633 4838 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d14771c4-48e3-4efe-a5f2-31331a30979f-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 10:02:05 crc kubenswrapper[4838]: I1128 10:02:05.299652 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4vls5\" (UniqueName: \"kubernetes.io/projected/d14771c4-48e3-4efe-a5f2-31331a30979f-kube-api-access-4vls5\") on node \"crc\" DevicePath \"\"" Nov 28 10:02:05 crc kubenswrapper[4838]: I1128 10:02:05.299673 4838 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e35b5813-d7b9-4cbc-b002-44d465476046-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 10:02:05 crc kubenswrapper[4838]: I1128 10:02:05.299690 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-k6g2v\" (UniqueName: \"kubernetes.io/projected/e35b5813-d7b9-4cbc-b002-44d465476046-kube-api-access-k6g2v\") on node \"crc\" DevicePath \"\"" Nov 28 10:02:05 crc kubenswrapper[4838]: I1128 10:02:05.300328 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Nov 28 10:02:05 crc kubenswrapper[4838]: I1128 10:02:05.400583 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/37cf39e1-d5d9-4b5d-a80b-5f9e371ebf2b-kube-api-access\") pod \"37cf39e1-d5d9-4b5d-a80b-5f9e371ebf2b\" (UID: \"37cf39e1-d5d9-4b5d-a80b-5f9e371ebf2b\") " Nov 28 10:02:05 crc kubenswrapper[4838]: I1128 10:02:05.400660 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/37cf39e1-d5d9-4b5d-a80b-5f9e371ebf2b-var-lock\") pod \"37cf39e1-d5d9-4b5d-a80b-5f9e371ebf2b\" (UID: \"37cf39e1-d5d9-4b5d-a80b-5f9e371ebf2b\") " Nov 28 10:02:05 crc kubenswrapper[4838]: I1128 10:02:05.400750 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/37cf39e1-d5d9-4b5d-a80b-5f9e371ebf2b-kubelet-dir\") pod \"37cf39e1-d5d9-4b5d-a80b-5f9e371ebf2b\" (UID: \"37cf39e1-d5d9-4b5d-a80b-5f9e371ebf2b\") " Nov 28 10:02:05 crc kubenswrapper[4838]: I1128 10:02:05.400865 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/37cf39e1-d5d9-4b5d-a80b-5f9e371ebf2b-var-lock" (OuterVolumeSpecName: "var-lock") pod "37cf39e1-d5d9-4b5d-a80b-5f9e371ebf2b" (UID: "37cf39e1-d5d9-4b5d-a80b-5f9e371ebf2b"). InnerVolumeSpecName "var-lock". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 10:02:05 crc kubenswrapper[4838]: I1128 10:02:05.400926 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/37cf39e1-d5d9-4b5d-a80b-5f9e371ebf2b-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "37cf39e1-d5d9-4b5d-a80b-5f9e371ebf2b" (UID: "37cf39e1-d5d9-4b5d-a80b-5f9e371ebf2b"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 10:02:05 crc kubenswrapper[4838]: I1128 10:02:05.401282 4838 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/37cf39e1-d5d9-4b5d-a80b-5f9e371ebf2b-kubelet-dir\") on node \"crc\" DevicePath \"\"" Nov 28 10:02:05 crc kubenswrapper[4838]: I1128 10:02:05.401307 4838 reconciler_common.go:293] "Volume detached for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/37cf39e1-d5d9-4b5d-a80b-5f9e371ebf2b-var-lock\") on node \"crc\" DevicePath \"\"" Nov 28 10:02:05 crc kubenswrapper[4838]: I1128 10:02:05.405769 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/37cf39e1-d5d9-4b5d-a80b-5f9e371ebf2b-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "37cf39e1-d5d9-4b5d-a80b-5f9e371ebf2b" (UID: "37cf39e1-d5d9-4b5d-a80b-5f9e371ebf2b"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:02:05 crc kubenswrapper[4838]: I1128 10:02:05.501916 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/37cf39e1-d5d9-4b5d-a80b-5f9e371ebf2b-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 28 10:02:06 crc kubenswrapper[4838]: I1128 10:02:06.134026 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"37cf39e1-d5d9-4b5d-a80b-5f9e371ebf2b","Type":"ContainerDied","Data":"e0491cbad3921626d21754a6f7088ef16bc902d4f7c58271742a5054a08cefbb"} Nov 28 10:02:06 crc kubenswrapper[4838]: I1128 10:02:06.134151 4838 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e0491cbad3921626d21754a6f7088ef16bc902d4f7c58271742a5054a08cefbb" Nov 28 10:02:06 crc kubenswrapper[4838]: I1128 10:02:06.134102 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Nov 28 10:02:06 crc kubenswrapper[4838]: I1128 10:02:06.134068 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-bcnzk" Nov 28 10:02:06 crc kubenswrapper[4838]: I1128 10:02:06.134789 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-zlxs4" Nov 28 10:02:08 crc kubenswrapper[4838]: I1128 10:02:08.346544 4838 status_manager.go:851] "Failed to get status for pod" podUID="ac5ded1f-10ca-4db6-b3a6-80f30f28cb34" pod="openshift-marketplace/redhat-operators-mcgzz" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-mcgzz\": dial tcp 38.102.83.65:6443: connect: connection refused" Nov 28 10:02:08 crc kubenswrapper[4838]: I1128 10:02:08.347654 4838 status_manager.go:851] "Failed to get status for pod" podUID="e35b5813-d7b9-4cbc-b002-44d465476046" pod="openshift-marketplace/community-operators-bcnzk" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-bcnzk\": dial tcp 38.102.83.65:6443: connect: connection refused" Nov 28 10:02:08 crc kubenswrapper[4838]: I1128 10:02:08.348324 4838 status_manager.go:851] "Failed to get status for pod" podUID="d8b6f770-e994-4bf8-92de-7e359cbe75a8" pod="openshift-marketplace/redhat-operators-n5fj2" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-n5fj2\": dial tcp 38.102.83.65:6443: connect: connection refused" Nov 28 10:02:08 crc kubenswrapper[4838]: I1128 10:02:08.348719 4838 status_manager.go:851] "Failed to get status for pod" podUID="ac5ded1f-10ca-4db6-b3a6-80f30f28cb34" pod="openshift-marketplace/redhat-operators-mcgzz" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-mcgzz\": dial tcp 38.102.83.65:6443: connect: connection refused" Nov 28 10:02:08 crc kubenswrapper[4838]: I1128 10:02:08.349085 4838 status_manager.go:851] "Failed to get status for pod" podUID="37cf39e1-d5d9-4b5d-a80b-5f9e371ebf2b" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.65:6443: connect: connection refused" Nov 28 10:02:08 crc kubenswrapper[4838]: I1128 10:02:08.349519 4838 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.65:6443: connect: connection refused" Nov 28 10:02:08 crc kubenswrapper[4838]: I1128 10:02:08.349919 4838 status_manager.go:851] "Failed to get status for pod" podUID="d14771c4-48e3-4efe-a5f2-31331a30979f" pod="openshift-marketplace/certified-operators-zlxs4" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-zlxs4\": dial tcp 38.102.83.65:6443: connect: connection refused" Nov 28 10:02:08 crc kubenswrapper[4838]: E1128 10:02:08.363941 4838 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T10:02:03Z\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T10:02:03Z\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T10:02:03Z\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T10:02:03Z\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:20434c856c20158a4c73986bf7de93188afa338ed356d293a59f9e621072cfc3\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:24f7dab5f4a6fcbb16d41b8a7345f9f9bae2ef1e2c53abed71c4f18eeafebc85\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1605131077},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:1ab7704f67839bb3705d0c80bea6f7197f233d472860c3005433c90d7786dd54\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:9c13035c7ccf9d13a21c9219d8d0d462fa2fdb4fe128d9724443784b1ed9a318\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1205801806},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:78954d1069ef1ec171f78e8a5fa73063f4792e602da43b4f790e6545397ddd70\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:d46edf2a1895805ff5ac284dfbb375cc08aeb2751f4c05e4c27a80d7c1e29ca1\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1195657558},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:42da3a01b99987f17824a70b0ac9cde8d27a0ea39d325b9b7216ebdc5ba1f406\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:8932ced4defd2733d4740ea31dd7a6050447207c72233491a6ffdb06926137e7\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1122761533},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792}]}}\" for node \"crc\": Patch \"https://api-int.crc.testing:6443/api/v1/nodes/crc/status?timeout=10s\": dial tcp 38.102.83.65:6443: connect: connection refused" Nov 28 10:02:08 crc kubenswrapper[4838]: E1128 10:02:08.364422 4838 kubelet_node_status.go:585] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 38.102.83.65:6443: connect: connection refused" Nov 28 10:02:08 crc kubenswrapper[4838]: E1128 10:02:08.364699 4838 kubelet_node_status.go:585] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 38.102.83.65:6443: connect: connection refused" Nov 28 10:02:08 crc kubenswrapper[4838]: E1128 10:02:08.365024 4838 kubelet_node_status.go:585] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 38.102.83.65:6443: connect: connection refused" Nov 28 10:02:08 crc kubenswrapper[4838]: E1128 10:02:08.365334 4838 kubelet_node_status.go:585] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 38.102.83.65:6443: connect: connection refused" Nov 28 10:02:08 crc kubenswrapper[4838]: E1128 10:02:08.365380 4838 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 28 10:02:08 crc kubenswrapper[4838]: E1128 10:02:08.465837 4838 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/events\": dial tcp 38.102.83.65:6443: connect: connection refused" event="&Event{ObjectMeta:{kube-apiserver-startup-monitor-crc.187c237147715396 openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-startup-monitor-crc,UID:f85e55b1a89d02b0cb034b1ea31ed45a,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{startup-monitor},},Reason:Created,Message:Created container startup-monitor,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-11-28 10:02:03.459277718 +0000 UTC m=+295.158251898,LastTimestamp:2025-11-28 10:02:03.459277718 +0000 UTC m=+295.158251898,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Nov 28 10:02:08 crc kubenswrapper[4838]: I1128 10:02:08.566001 4838 status_manager.go:851] "Failed to get status for pod" podUID="d8b6f770-e994-4bf8-92de-7e359cbe75a8" pod="openshift-marketplace/redhat-operators-n5fj2" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-n5fj2\": dial tcp 38.102.83.65:6443: connect: connection refused" Nov 28 10:02:08 crc kubenswrapper[4838]: I1128 10:02:08.566647 4838 status_manager.go:851] "Failed to get status for pod" podUID="ac5ded1f-10ca-4db6-b3a6-80f30f28cb34" pod="openshift-marketplace/redhat-operators-mcgzz" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-mcgzz\": dial tcp 38.102.83.65:6443: connect: connection refused" Nov 28 10:02:08 crc kubenswrapper[4838]: I1128 10:02:08.567280 4838 status_manager.go:851] "Failed to get status for pod" podUID="37cf39e1-d5d9-4b5d-a80b-5f9e371ebf2b" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.65:6443: connect: connection refused" Nov 28 10:02:08 crc kubenswrapper[4838]: I1128 10:02:08.567683 4838 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.65:6443: connect: connection refused" Nov 28 10:02:08 crc kubenswrapper[4838]: I1128 10:02:08.568119 4838 status_manager.go:851] "Failed to get status for pod" podUID="d14771c4-48e3-4efe-a5f2-31331a30979f" pod="openshift-marketplace/certified-operators-zlxs4" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-zlxs4\": dial tcp 38.102.83.65:6443: connect: connection refused" Nov 28 10:02:08 crc kubenswrapper[4838]: I1128 10:02:08.568473 4838 status_manager.go:851] "Failed to get status for pod" podUID="e35b5813-d7b9-4cbc-b002-44d465476046" pod="openshift-marketplace/community-operators-bcnzk" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-bcnzk\": dial tcp 38.102.83.65:6443: connect: connection refused" Nov 28 10:02:10 crc kubenswrapper[4838]: E1128 10:02:10.611685 4838 desired_state_of_world_populator.go:312] "Error processing volume" err="error processing PVC openshift-image-registry/crc-image-registry-storage: failed to fetch PVC from API server: Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-image-registry/persistentvolumeclaims/crc-image-registry-storage\": dial tcp 38.102.83.65:6443: connect: connection refused" pod="openshift-image-registry/image-registry-697d97f7c8-8b7z5" volumeName="registry-storage" Nov 28 10:02:11 crc kubenswrapper[4838]: E1128 10:02:11.193370 4838 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf4b27818a5e8e43d0dc095d08835c792.slice/crio-conmon-3a04db28528da269759635186b06952f9a3dc4c2b130458354a5bf9ef994db8b.scope\": RecentStats: unable to find data in memory cache]" Nov 28 10:02:11 crc kubenswrapper[4838]: I1128 10:02:11.391153 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Nov 28 10:02:11 crc kubenswrapper[4838]: I1128 10:02:11.392507 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver/0.log" Nov 28 10:02:11 crc kubenswrapper[4838]: I1128 10:02:11.394030 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 10:02:11 crc kubenswrapper[4838]: I1128 10:02:11.394528 4838 status_manager.go:851] "Failed to get status for pod" podUID="d8b6f770-e994-4bf8-92de-7e359cbe75a8" pod="openshift-marketplace/redhat-operators-n5fj2" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-n5fj2\": dial tcp 38.102.83.65:6443: connect: connection refused" Nov 28 10:02:11 crc kubenswrapper[4838]: I1128 10:02:11.394995 4838 status_manager.go:851] "Failed to get status for pod" podUID="ac5ded1f-10ca-4db6-b3a6-80f30f28cb34" pod="openshift-marketplace/redhat-operators-mcgzz" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-mcgzz\": dial tcp 38.102.83.65:6443: connect: connection refused" Nov 28 10:02:11 crc kubenswrapper[4838]: I1128 10:02:11.395275 4838 status_manager.go:851] "Failed to get status for pod" podUID="37cf39e1-d5d9-4b5d-a80b-5f9e371ebf2b" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.65:6443: connect: connection refused" Nov 28 10:02:11 crc kubenswrapper[4838]: I1128 10:02:11.395575 4838 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.65:6443: connect: connection refused" Nov 28 10:02:11 crc kubenswrapper[4838]: I1128 10:02:11.395981 4838 status_manager.go:851] "Failed to get status for pod" podUID="d14771c4-48e3-4efe-a5f2-31331a30979f" pod="openshift-marketplace/certified-operators-zlxs4" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-zlxs4\": dial tcp 38.102.83.65:6443: connect: connection refused" Nov 28 10:02:11 crc kubenswrapper[4838]: I1128 10:02:11.396271 4838 status_manager.go:851] "Failed to get status for pod" podUID="e35b5813-d7b9-4cbc-b002-44d465476046" pod="openshift-marketplace/community-operators-bcnzk" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-bcnzk\": dial tcp 38.102.83.65:6443: connect: connection refused" Nov 28 10:02:11 crc kubenswrapper[4838]: I1128 10:02:11.396559 4838 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.65:6443: connect: connection refused" Nov 28 10:02:11 crc kubenswrapper[4838]: I1128 10:02:11.484874 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Nov 28 10:02:11 crc kubenswrapper[4838]: I1128 10:02:11.484940 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Nov 28 10:02:11 crc kubenswrapper[4838]: I1128 10:02:11.484992 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Nov 28 10:02:11 crc kubenswrapper[4838]: I1128 10:02:11.485816 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir" (OuterVolumeSpecName: "cert-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "cert-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 10:02:11 crc kubenswrapper[4838]: I1128 10:02:11.485883 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir" (OuterVolumeSpecName: "audit-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "audit-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 10:02:11 crc kubenswrapper[4838]: I1128 10:02:11.485915 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir" (OuterVolumeSpecName: "resource-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 10:02:11 crc kubenswrapper[4838]: I1128 10:02:11.586405 4838 reconciler_common.go:293] "Volume detached for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") on node \"crc\" DevicePath \"\"" Nov 28 10:02:11 crc kubenswrapper[4838]: I1128 10:02:11.586445 4838 reconciler_common.go:293] "Volume detached for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") on node \"crc\" DevicePath \"\"" Nov 28 10:02:11 crc kubenswrapper[4838]: I1128 10:02:11.586457 4838 reconciler_common.go:293] "Volume detached for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") on node \"crc\" DevicePath \"\"" Nov 28 10:02:12 crc kubenswrapper[4838]: I1128 10:02:12.172955 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Nov 28 10:02:12 crc kubenswrapper[4838]: I1128 10:02:12.173709 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver/0.log" Nov 28 10:02:12 crc kubenswrapper[4838]: I1128 10:02:12.174325 4838 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="3a04db28528da269759635186b06952f9a3dc4c2b130458354a5bf9ef994db8b" exitCode=0 Nov 28 10:02:12 crc kubenswrapper[4838]: I1128 10:02:12.174385 4838 scope.go:117] "RemoveContainer" containerID="3a821e5b105f62cf7f3b12714bfb0f58867a808d1c777f2fb711895c345d8ee9" Nov 28 10:02:12 crc kubenswrapper[4838]: I1128 10:02:12.174385 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 10:02:12 crc kubenswrapper[4838]: I1128 10:02:12.202255 4838 status_manager.go:851] "Failed to get status for pod" podUID="d8b6f770-e994-4bf8-92de-7e359cbe75a8" pod="openshift-marketplace/redhat-operators-n5fj2" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-n5fj2\": dial tcp 38.102.83.65:6443: connect: connection refused" Nov 28 10:02:12 crc kubenswrapper[4838]: I1128 10:02:12.202936 4838 status_manager.go:851] "Failed to get status for pod" podUID="ac5ded1f-10ca-4db6-b3a6-80f30f28cb34" pod="openshift-marketplace/redhat-operators-mcgzz" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-mcgzz\": dial tcp 38.102.83.65:6443: connect: connection refused" Nov 28 10:02:12 crc kubenswrapper[4838]: I1128 10:02:12.203559 4838 status_manager.go:851] "Failed to get status for pod" podUID="37cf39e1-d5d9-4b5d-a80b-5f9e371ebf2b" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.65:6443: connect: connection refused" Nov 28 10:02:12 crc kubenswrapper[4838]: I1128 10:02:12.204135 4838 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.65:6443: connect: connection refused" Nov 28 10:02:12 crc kubenswrapper[4838]: I1128 10:02:12.204671 4838 status_manager.go:851] "Failed to get status for pod" podUID="d14771c4-48e3-4efe-a5f2-31331a30979f" pod="openshift-marketplace/certified-operators-zlxs4" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-zlxs4\": dial tcp 38.102.83.65:6443: connect: connection refused" Nov 28 10:02:12 crc kubenswrapper[4838]: I1128 10:02:12.205315 4838 status_manager.go:851] "Failed to get status for pod" podUID="e35b5813-d7b9-4cbc-b002-44d465476046" pod="openshift-marketplace/community-operators-bcnzk" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-bcnzk\": dial tcp 38.102.83.65:6443: connect: connection refused" Nov 28 10:02:12 crc kubenswrapper[4838]: I1128 10:02:12.205804 4838 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.65:6443: connect: connection refused" Nov 28 10:02:12 crc kubenswrapper[4838]: I1128 10:02:12.209697 4838 scope.go:117] "RemoveContainer" containerID="3a04db28528da269759635186b06952f9a3dc4c2b130458354a5bf9ef994db8b" Nov 28 10:02:12 crc kubenswrapper[4838]: I1128 10:02:12.234776 4838 scope.go:117] "RemoveContainer" containerID="1c690f38f717fbfbd681f21f5dc845b94601530b4fc0860fdbeb1317042c793b" Nov 28 10:02:12 crc kubenswrapper[4838]: I1128 10:02:12.259871 4838 scope.go:117] "RemoveContainer" containerID="25d620ea6d7c38547e89d61e7a60f227d28b21f18d563055db47256b266d5b6b" Nov 28 10:02:12 crc kubenswrapper[4838]: I1128 10:02:12.275387 4838 scope.go:117] "RemoveContainer" containerID="837cc7d33d601516b4ea56a283f71167b41da7c769070c97ea77f29e97cf1555" Nov 28 10:02:12 crc kubenswrapper[4838]: I1128 10:02:12.290862 4838 scope.go:117] "RemoveContainer" containerID="065f0c39a0f1fbdb83a6a758ddd20a4af3ddf96297ce35551b570f5e6c9deb89" Nov 28 10:02:12 crc kubenswrapper[4838]: I1128 10:02:12.325502 4838 scope.go:117] "RemoveContainer" containerID="b4d4af6fc5dd97d5b6104ef79b62fd241db5659dfdfd496a38536453b207a4e5" Nov 28 10:02:12 crc kubenswrapper[4838]: I1128 10:02:12.349718 4838 scope.go:117] "RemoveContainer" containerID="3a821e5b105f62cf7f3b12714bfb0f58867a808d1c777f2fb711895c345d8ee9" Nov 28 10:02:12 crc kubenswrapper[4838]: E1128 10:02:12.355141 4838 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3a821e5b105f62cf7f3b12714bfb0f58867a808d1c777f2fb711895c345d8ee9\": container with ID starting with 3a821e5b105f62cf7f3b12714bfb0f58867a808d1c777f2fb711895c345d8ee9 not found: ID does not exist" containerID="3a821e5b105f62cf7f3b12714bfb0f58867a808d1c777f2fb711895c345d8ee9" Nov 28 10:02:12 crc kubenswrapper[4838]: I1128 10:02:12.355198 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3a821e5b105f62cf7f3b12714bfb0f58867a808d1c777f2fb711895c345d8ee9"} err="failed to get container status \"3a821e5b105f62cf7f3b12714bfb0f58867a808d1c777f2fb711895c345d8ee9\": rpc error: code = NotFound desc = could not find container \"3a821e5b105f62cf7f3b12714bfb0f58867a808d1c777f2fb711895c345d8ee9\": container with ID starting with 3a821e5b105f62cf7f3b12714bfb0f58867a808d1c777f2fb711895c345d8ee9 not found: ID does not exist" Nov 28 10:02:12 crc kubenswrapper[4838]: I1128 10:02:12.355237 4838 scope.go:117] "RemoveContainer" containerID="3a04db28528da269759635186b06952f9a3dc4c2b130458354a5bf9ef994db8b" Nov 28 10:02:12 crc kubenswrapper[4838]: E1128 10:02:12.355704 4838 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3a04db28528da269759635186b06952f9a3dc4c2b130458354a5bf9ef994db8b\": container with ID starting with 3a04db28528da269759635186b06952f9a3dc4c2b130458354a5bf9ef994db8b not found: ID does not exist" containerID="3a04db28528da269759635186b06952f9a3dc4c2b130458354a5bf9ef994db8b" Nov 28 10:02:12 crc kubenswrapper[4838]: I1128 10:02:12.355753 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3a04db28528da269759635186b06952f9a3dc4c2b130458354a5bf9ef994db8b"} err="failed to get container status \"3a04db28528da269759635186b06952f9a3dc4c2b130458354a5bf9ef994db8b\": rpc error: code = NotFound desc = could not find container \"3a04db28528da269759635186b06952f9a3dc4c2b130458354a5bf9ef994db8b\": container with ID starting with 3a04db28528da269759635186b06952f9a3dc4c2b130458354a5bf9ef994db8b not found: ID does not exist" Nov 28 10:02:12 crc kubenswrapper[4838]: I1128 10:02:12.355770 4838 scope.go:117] "RemoveContainer" containerID="1c690f38f717fbfbd681f21f5dc845b94601530b4fc0860fdbeb1317042c793b" Nov 28 10:02:12 crc kubenswrapper[4838]: E1128 10:02:12.356112 4838 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1c690f38f717fbfbd681f21f5dc845b94601530b4fc0860fdbeb1317042c793b\": container with ID starting with 1c690f38f717fbfbd681f21f5dc845b94601530b4fc0860fdbeb1317042c793b not found: ID does not exist" containerID="1c690f38f717fbfbd681f21f5dc845b94601530b4fc0860fdbeb1317042c793b" Nov 28 10:02:12 crc kubenswrapper[4838]: I1128 10:02:12.356136 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1c690f38f717fbfbd681f21f5dc845b94601530b4fc0860fdbeb1317042c793b"} err="failed to get container status \"1c690f38f717fbfbd681f21f5dc845b94601530b4fc0860fdbeb1317042c793b\": rpc error: code = NotFound desc = could not find container \"1c690f38f717fbfbd681f21f5dc845b94601530b4fc0860fdbeb1317042c793b\": container with ID starting with 1c690f38f717fbfbd681f21f5dc845b94601530b4fc0860fdbeb1317042c793b not found: ID does not exist" Nov 28 10:02:12 crc kubenswrapper[4838]: I1128 10:02:12.356158 4838 scope.go:117] "RemoveContainer" containerID="25d620ea6d7c38547e89d61e7a60f227d28b21f18d563055db47256b266d5b6b" Nov 28 10:02:12 crc kubenswrapper[4838]: E1128 10:02:12.356526 4838 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"25d620ea6d7c38547e89d61e7a60f227d28b21f18d563055db47256b266d5b6b\": container with ID starting with 25d620ea6d7c38547e89d61e7a60f227d28b21f18d563055db47256b266d5b6b not found: ID does not exist" containerID="25d620ea6d7c38547e89d61e7a60f227d28b21f18d563055db47256b266d5b6b" Nov 28 10:02:12 crc kubenswrapper[4838]: I1128 10:02:12.356567 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"25d620ea6d7c38547e89d61e7a60f227d28b21f18d563055db47256b266d5b6b"} err="failed to get container status \"25d620ea6d7c38547e89d61e7a60f227d28b21f18d563055db47256b266d5b6b\": rpc error: code = NotFound desc = could not find container \"25d620ea6d7c38547e89d61e7a60f227d28b21f18d563055db47256b266d5b6b\": container with ID starting with 25d620ea6d7c38547e89d61e7a60f227d28b21f18d563055db47256b266d5b6b not found: ID does not exist" Nov 28 10:02:12 crc kubenswrapper[4838]: I1128 10:02:12.356593 4838 scope.go:117] "RemoveContainer" containerID="837cc7d33d601516b4ea56a283f71167b41da7c769070c97ea77f29e97cf1555" Nov 28 10:02:12 crc kubenswrapper[4838]: E1128 10:02:12.357168 4838 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"837cc7d33d601516b4ea56a283f71167b41da7c769070c97ea77f29e97cf1555\": container with ID starting with 837cc7d33d601516b4ea56a283f71167b41da7c769070c97ea77f29e97cf1555 not found: ID does not exist" containerID="837cc7d33d601516b4ea56a283f71167b41da7c769070c97ea77f29e97cf1555" Nov 28 10:02:12 crc kubenswrapper[4838]: I1128 10:02:12.357210 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"837cc7d33d601516b4ea56a283f71167b41da7c769070c97ea77f29e97cf1555"} err="failed to get container status \"837cc7d33d601516b4ea56a283f71167b41da7c769070c97ea77f29e97cf1555\": rpc error: code = NotFound desc = could not find container \"837cc7d33d601516b4ea56a283f71167b41da7c769070c97ea77f29e97cf1555\": container with ID starting with 837cc7d33d601516b4ea56a283f71167b41da7c769070c97ea77f29e97cf1555 not found: ID does not exist" Nov 28 10:02:12 crc kubenswrapper[4838]: I1128 10:02:12.357235 4838 scope.go:117] "RemoveContainer" containerID="065f0c39a0f1fbdb83a6a758ddd20a4af3ddf96297ce35551b570f5e6c9deb89" Nov 28 10:02:12 crc kubenswrapper[4838]: E1128 10:02:12.357586 4838 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"065f0c39a0f1fbdb83a6a758ddd20a4af3ddf96297ce35551b570f5e6c9deb89\": container with ID starting with 065f0c39a0f1fbdb83a6a758ddd20a4af3ddf96297ce35551b570f5e6c9deb89 not found: ID does not exist" containerID="065f0c39a0f1fbdb83a6a758ddd20a4af3ddf96297ce35551b570f5e6c9deb89" Nov 28 10:02:12 crc kubenswrapper[4838]: I1128 10:02:12.357619 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"065f0c39a0f1fbdb83a6a758ddd20a4af3ddf96297ce35551b570f5e6c9deb89"} err="failed to get container status \"065f0c39a0f1fbdb83a6a758ddd20a4af3ddf96297ce35551b570f5e6c9deb89\": rpc error: code = NotFound desc = could not find container \"065f0c39a0f1fbdb83a6a758ddd20a4af3ddf96297ce35551b570f5e6c9deb89\": container with ID starting with 065f0c39a0f1fbdb83a6a758ddd20a4af3ddf96297ce35551b570f5e6c9deb89 not found: ID does not exist" Nov 28 10:02:12 crc kubenswrapper[4838]: I1128 10:02:12.357636 4838 scope.go:117] "RemoveContainer" containerID="b4d4af6fc5dd97d5b6104ef79b62fd241db5659dfdfd496a38536453b207a4e5" Nov 28 10:02:12 crc kubenswrapper[4838]: E1128 10:02:12.358385 4838 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b4d4af6fc5dd97d5b6104ef79b62fd241db5659dfdfd496a38536453b207a4e5\": container with ID starting with b4d4af6fc5dd97d5b6104ef79b62fd241db5659dfdfd496a38536453b207a4e5 not found: ID does not exist" containerID="b4d4af6fc5dd97d5b6104ef79b62fd241db5659dfdfd496a38536453b207a4e5" Nov 28 10:02:12 crc kubenswrapper[4838]: I1128 10:02:12.358423 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b4d4af6fc5dd97d5b6104ef79b62fd241db5659dfdfd496a38536453b207a4e5"} err="failed to get container status \"b4d4af6fc5dd97d5b6104ef79b62fd241db5659dfdfd496a38536453b207a4e5\": rpc error: code = NotFound desc = could not find container \"b4d4af6fc5dd97d5b6104ef79b62fd241db5659dfdfd496a38536453b207a4e5\": container with ID starting with b4d4af6fc5dd97d5b6104ef79b62fd241db5659dfdfd496a38536453b207a4e5 not found: ID does not exist" Nov 28 10:02:12 crc kubenswrapper[4838]: I1128 10:02:12.569168 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f4b27818a5e8e43d0dc095d08835c792" path="/var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/volumes" Nov 28 10:02:12 crc kubenswrapper[4838]: E1128 10:02:12.722570 4838 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.65:6443: connect: connection refused" Nov 28 10:02:12 crc kubenswrapper[4838]: E1128 10:02:12.722975 4838 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.65:6443: connect: connection refused" Nov 28 10:02:12 crc kubenswrapper[4838]: E1128 10:02:12.723388 4838 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.65:6443: connect: connection refused" Nov 28 10:02:12 crc kubenswrapper[4838]: E1128 10:02:12.724516 4838 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.65:6443: connect: connection refused" Nov 28 10:02:12 crc kubenswrapper[4838]: E1128 10:02:12.725006 4838 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.65:6443: connect: connection refused" Nov 28 10:02:12 crc kubenswrapper[4838]: I1128 10:02:12.725058 4838 controller.go:115] "failed to update lease using latest lease, fallback to ensure lease" err="failed 5 attempts to update lease" Nov 28 10:02:12 crc kubenswrapper[4838]: E1128 10:02:12.725770 4838 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.65:6443: connect: connection refused" interval="200ms" Nov 28 10:02:12 crc kubenswrapper[4838]: E1128 10:02:12.926701 4838 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.65:6443: connect: connection refused" interval="400ms" Nov 28 10:02:13 crc kubenswrapper[4838]: E1128 10:02:13.329413 4838 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.65:6443: connect: connection refused" interval="800ms" Nov 28 10:02:13 crc kubenswrapper[4838]: I1128 10:02:13.561336 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 10:02:13 crc kubenswrapper[4838]: I1128 10:02:13.562383 4838 status_manager.go:851] "Failed to get status for pod" podUID="e35b5813-d7b9-4cbc-b002-44d465476046" pod="openshift-marketplace/community-operators-bcnzk" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-bcnzk\": dial tcp 38.102.83.65:6443: connect: connection refused" Nov 28 10:02:13 crc kubenswrapper[4838]: I1128 10:02:13.563086 4838 status_manager.go:851] "Failed to get status for pod" podUID="d8b6f770-e994-4bf8-92de-7e359cbe75a8" pod="openshift-marketplace/redhat-operators-n5fj2" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-n5fj2\": dial tcp 38.102.83.65:6443: connect: connection refused" Nov 28 10:02:13 crc kubenswrapper[4838]: I1128 10:02:13.563860 4838 status_manager.go:851] "Failed to get status for pod" podUID="ac5ded1f-10ca-4db6-b3a6-80f30f28cb34" pod="openshift-marketplace/redhat-operators-mcgzz" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-mcgzz\": dial tcp 38.102.83.65:6443: connect: connection refused" Nov 28 10:02:13 crc kubenswrapper[4838]: I1128 10:02:13.564376 4838 status_manager.go:851] "Failed to get status for pod" podUID="37cf39e1-d5d9-4b5d-a80b-5f9e371ebf2b" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.65:6443: connect: connection refused" Nov 28 10:02:13 crc kubenswrapper[4838]: I1128 10:02:13.564865 4838 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.65:6443: connect: connection refused" Nov 28 10:02:13 crc kubenswrapper[4838]: I1128 10:02:13.565573 4838 status_manager.go:851] "Failed to get status for pod" podUID="d14771c4-48e3-4efe-a5f2-31331a30979f" pod="openshift-marketplace/certified-operators-zlxs4" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-zlxs4\": dial tcp 38.102.83.65:6443: connect: connection refused" Nov 28 10:02:13 crc kubenswrapper[4838]: I1128 10:02:13.585626 4838 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="1630b1c6-63b5-4481-a711-0485765d37e3" Nov 28 10:02:13 crc kubenswrapper[4838]: I1128 10:02:13.585668 4838 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="1630b1c6-63b5-4481-a711-0485765d37e3" Nov 28 10:02:13 crc kubenswrapper[4838]: E1128 10:02:13.586179 4838 mirror_client.go:138] "Failed deleting a mirror pod" err="Delete \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.65:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 10:02:13 crc kubenswrapper[4838]: I1128 10:02:13.587011 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 10:02:13 crc kubenswrapper[4838]: W1128 10:02:13.624568 4838 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod71bb4a3aecc4ba5b26c4b7318770ce13.slice/crio-e7df7dc2c8b4d25886e595ebb983eda86931767de8435821ec4f367b294b2915 WatchSource:0}: Error finding container e7df7dc2c8b4d25886e595ebb983eda86931767de8435821ec4f367b294b2915: Status 404 returned error can't find the container with id e7df7dc2c8b4d25886e595ebb983eda86931767de8435821ec4f367b294b2915 Nov 28 10:02:14 crc kubenswrapper[4838]: E1128 10:02:14.130398 4838 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.65:6443: connect: connection refused" interval="1.6s" Nov 28 10:02:14 crc kubenswrapper[4838]: I1128 10:02:14.190064 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"e7df7dc2c8b4d25886e595ebb983eda86931767de8435821ec4f367b294b2915"} Nov 28 10:02:15 crc kubenswrapper[4838]: I1128 10:02:15.199967 4838 generic.go:334] "Generic (PLEG): container finished" podID="71bb4a3aecc4ba5b26c4b7318770ce13" containerID="f6c0d7f5209412636d3dd822c19b697b403435c2f1d4dc51e03e80e4fa8351a9" exitCode=0 Nov 28 10:02:15 crc kubenswrapper[4838]: I1128 10:02:15.200056 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerDied","Data":"f6c0d7f5209412636d3dd822c19b697b403435c2f1d4dc51e03e80e4fa8351a9"} Nov 28 10:02:15 crc kubenswrapper[4838]: I1128 10:02:15.200493 4838 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="1630b1c6-63b5-4481-a711-0485765d37e3" Nov 28 10:02:15 crc kubenswrapper[4838]: I1128 10:02:15.200535 4838 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="1630b1c6-63b5-4481-a711-0485765d37e3" Nov 28 10:02:15 crc kubenswrapper[4838]: I1128 10:02:15.201219 4838 status_manager.go:851] "Failed to get status for pod" podUID="d8b6f770-e994-4bf8-92de-7e359cbe75a8" pod="openshift-marketplace/redhat-operators-n5fj2" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-n5fj2\": dial tcp 38.102.83.65:6443: connect: connection refused" Nov 28 10:02:15 crc kubenswrapper[4838]: E1128 10:02:15.201230 4838 mirror_client.go:138] "Failed deleting a mirror pod" err="Delete \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.65:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 10:02:15 crc kubenswrapper[4838]: I1128 10:02:15.201703 4838 status_manager.go:851] "Failed to get status for pod" podUID="ac5ded1f-10ca-4db6-b3a6-80f30f28cb34" pod="openshift-marketplace/redhat-operators-mcgzz" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-mcgzz\": dial tcp 38.102.83.65:6443: connect: connection refused" Nov 28 10:02:15 crc kubenswrapper[4838]: I1128 10:02:15.202501 4838 status_manager.go:851] "Failed to get status for pod" podUID="37cf39e1-d5d9-4b5d-a80b-5f9e371ebf2b" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.65:6443: connect: connection refused" Nov 28 10:02:15 crc kubenswrapper[4838]: I1128 10:02:15.203644 4838 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.65:6443: connect: connection refused" Nov 28 10:02:15 crc kubenswrapper[4838]: I1128 10:02:15.204167 4838 status_manager.go:851] "Failed to get status for pod" podUID="d14771c4-48e3-4efe-a5f2-31331a30979f" pod="openshift-marketplace/certified-operators-zlxs4" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-zlxs4\": dial tcp 38.102.83.65:6443: connect: connection refused" Nov 28 10:02:15 crc kubenswrapper[4838]: I1128 10:02:15.204676 4838 status_manager.go:851] "Failed to get status for pod" podUID="e35b5813-d7b9-4cbc-b002-44d465476046" pod="openshift-marketplace/community-operators-bcnzk" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-bcnzk\": dial tcp 38.102.83.65:6443: connect: connection refused" Nov 28 10:02:16 crc kubenswrapper[4838]: I1128 10:02:16.216921 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"2d6f5bcc6eedbaf4f53de5ae252452959e47963e4dcf29f907b04559d1dd5485"} Nov 28 10:02:16 crc kubenswrapper[4838]: I1128 10:02:16.217329 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"53a956a38035a5b3b7cdad6a5803375d99f3e5922db188bf4a7ddbb7ab071a3d"} Nov 28 10:02:16 crc kubenswrapper[4838]: I1128 10:02:16.217349 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"55e75cf743f5c8091eea4708f6bc760b4ff70c33628201e4e4797f49694363a6"} Nov 28 10:02:16 crc kubenswrapper[4838]: I1128 10:02:16.221188 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/cluster-policy-controller/0.log" Nov 28 10:02:16 crc kubenswrapper[4838]: I1128 10:02:16.221881 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/0.log" Nov 28 10:02:16 crc kubenswrapper[4838]: I1128 10:02:16.221927 4838 generic.go:334] "Generic (PLEG): container finished" podID="f614b9022728cf315e60c057852e563e" containerID="ef54c8995d6bb8e077c6d1c3d796b6f2ce88370b6cadc4f040f590760103320b" exitCode=1 Nov 28 10:02:16 crc kubenswrapper[4838]: I1128 10:02:16.221952 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerDied","Data":"ef54c8995d6bb8e077c6d1c3d796b6f2ce88370b6cadc4f040f590760103320b"} Nov 28 10:02:16 crc kubenswrapper[4838]: I1128 10:02:16.222464 4838 scope.go:117] "RemoveContainer" containerID="ef54c8995d6bb8e077c6d1c3d796b6f2ce88370b6cadc4f040f590760103320b" Nov 28 10:02:16 crc kubenswrapper[4838]: I1128 10:02:16.453277 4838 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 10:02:17 crc kubenswrapper[4838]: I1128 10:02:17.230015 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/cluster-policy-controller/0.log" Nov 28 10:02:17 crc kubenswrapper[4838]: I1128 10:02:17.230526 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/0.log" Nov 28 10:02:17 crc kubenswrapper[4838]: I1128 10:02:17.230591 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"7e073cf7f9104d1a2b3e04ff0968b88ef212c1666ff77a0612425bce49bf22ea"} Nov 28 10:02:17 crc kubenswrapper[4838]: I1128 10:02:17.234240 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"47b74f6b14132676678fcc39de8ba5bb4de71baec0ec20808080289d5effd2a7"} Nov 28 10:02:17 crc kubenswrapper[4838]: I1128 10:02:17.234275 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"478b8e0d6009682f93824d38acdb69ee611d0b8cba8bdc1b88c1d6e699782584"} Nov 28 10:02:17 crc kubenswrapper[4838]: I1128 10:02:17.234462 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 10:02:17 crc kubenswrapper[4838]: I1128 10:02:17.234577 4838 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="1630b1c6-63b5-4481-a711-0485765d37e3" Nov 28 10:02:17 crc kubenswrapper[4838]: I1128 10:02:17.234604 4838 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="1630b1c6-63b5-4481-a711-0485765d37e3" Nov 28 10:02:18 crc kubenswrapper[4838]: I1128 10:02:18.587666 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 10:02:18 crc kubenswrapper[4838]: I1128 10:02:18.588023 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 10:02:18 crc kubenswrapper[4838]: I1128 10:02:18.594423 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 10:02:20 crc kubenswrapper[4838]: I1128 10:02:20.503625 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 10:02:22 crc kubenswrapper[4838]: I1128 10:02:22.014151 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 10:02:22 crc kubenswrapper[4838]: I1128 10:02:22.021560 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 10:02:22 crc kubenswrapper[4838]: I1128 10:02:22.242784 4838 kubelet.go:1914] "Deleted mirror pod because it is outdated" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 10:02:22 crc kubenswrapper[4838]: I1128 10:02:22.259103 4838 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="1630b1c6-63b5-4481-a711-0485765d37e3" Nov 28 10:02:22 crc kubenswrapper[4838]: I1128 10:02:22.259132 4838 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="1630b1c6-63b5-4481-a711-0485765d37e3" Nov 28 10:02:22 crc kubenswrapper[4838]: I1128 10:02:22.263073 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 10:02:22 crc kubenswrapper[4838]: I1128 10:02:22.357344 4838 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="71bb4a3aecc4ba5b26c4b7318770ce13" podUID="bf6056c8-9937-4d3a-91b6-4d46cf34dbaf" Nov 28 10:02:23 crc kubenswrapper[4838]: I1128 10:02:23.263159 4838 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="1630b1c6-63b5-4481-a711-0485765d37e3" Nov 28 10:02:23 crc kubenswrapper[4838]: I1128 10:02:23.263501 4838 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="1630b1c6-63b5-4481-a711-0485765d37e3" Nov 28 10:02:23 crc kubenswrapper[4838]: I1128 10:02:23.266642 4838 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="71bb4a3aecc4ba5b26c4b7318770ce13" podUID="bf6056c8-9937-4d3a-91b6-4d46cf34dbaf" Nov 28 10:02:25 crc kubenswrapper[4838]: I1128 10:02:25.373374 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"service-ca-bundle" Nov 28 10:02:25 crc kubenswrapper[4838]: I1128 10:02:25.411218 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"openshift-service-ca.crt" Nov 28 10:02:25 crc kubenswrapper[4838]: I1128 10:02:25.445855 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Nov 28 10:02:25 crc kubenswrapper[4838]: I1128 10:02:25.642734 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" Nov 28 10:02:25 crc kubenswrapper[4838]: I1128 10:02:25.911835 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"openshift-config-operator-dockercfg-7pc5z" Nov 28 10:02:25 crc kubenswrapper[4838]: I1128 10:02:25.954490 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"authentication-operator-config" Nov 28 10:02:26 crc kubenswrapper[4838]: I1128 10:02:26.063174 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-dockercfg-xtcjv" Nov 28 10:02:26 crc kubenswrapper[4838]: I1128 10:02:26.200685 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"kube-root-ca.crt" Nov 28 10:02:26 crc kubenswrapper[4838]: I1128 10:02:26.269546 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Nov 28 10:02:26 crc kubenswrapper[4838]: I1128 10:02:26.487667 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Nov 28 10:02:26 crc kubenswrapper[4838]: I1128 10:02:26.496265 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"metrics-tls" Nov 28 10:02:26 crc kubenswrapper[4838]: I1128 10:02:26.641894 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mco-proxy-tls" Nov 28 10:02:26 crc kubenswrapper[4838]: I1128 10:02:26.761378 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Nov 28 10:02:26 crc kubenswrapper[4838]: I1128 10:02:26.773304 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"kube-root-ca.crt" Nov 28 10:02:26 crc kubenswrapper[4838]: I1128 10:02:26.799904 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Nov 28 10:02:26 crc kubenswrapper[4838]: I1128 10:02:26.864859 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"default-dockercfg-2llfx" Nov 28 10:02:26 crc kubenswrapper[4838]: I1128 10:02:26.920740 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" Nov 28 10:02:26 crc kubenswrapper[4838]: I1128 10:02:26.928271 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Nov 28 10:02:26 crc kubenswrapper[4838]: I1128 10:02:26.932860 4838 reflector.go:368] Caches populated for *v1.Node from k8s.io/client-go/informers/factory.go:160 Nov 28 10:02:26 crc kubenswrapper[4838]: I1128 10:02:26.933946 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" Nov 28 10:02:26 crc kubenswrapper[4838]: I1128 10:02:26.981331 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Nov 28 10:02:27 crc kubenswrapper[4838]: I1128 10:02:27.009410 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"console-operator-config" Nov 28 10:02:27 crc kubenswrapper[4838]: I1128 10:02:27.054222 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"service-ca-operator-config" Nov 28 10:02:27 crc kubenswrapper[4838]: I1128 10:02:27.055170 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"kube-root-ca.crt" Nov 28 10:02:27 crc kubenswrapper[4838]: I1128 10:02:27.220001 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator"/"kube-storage-version-migrator-sa-dockercfg-5xfcg" Nov 28 10:02:27 crc kubenswrapper[4838]: I1128 10:02:27.267992 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Nov 28 10:02:27 crc kubenswrapper[4838]: I1128 10:02:27.268766 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-node-metrics-cert" Nov 28 10:02:27 crc kubenswrapper[4838]: I1128 10:02:27.282053 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Nov 28 10:02:27 crc kubenswrapper[4838]: I1128 10:02:27.306666 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"service-ca" Nov 28 10:02:27 crc kubenswrapper[4838]: I1128 10:02:27.404327 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-operator-tls" Nov 28 10:02:27 crc kubenswrapper[4838]: I1128 10:02:27.423157 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"config" Nov 28 10:02:27 crc kubenswrapper[4838]: I1128 10:02:27.474327 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-ca-bundle" Nov 28 10:02:27 crc kubenswrapper[4838]: I1128 10:02:27.514985 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"node-resolver-dockercfg-kz9s7" Nov 28 10:02:27 crc kubenswrapper[4838]: I1128 10:02:27.519851 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Nov 28 10:02:27 crc kubenswrapper[4838]: I1128 10:02:27.628929 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-tls" Nov 28 10:02:27 crc kubenswrapper[4838]: I1128 10:02:27.638090 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" Nov 28 10:02:27 crc kubenswrapper[4838]: I1128 10:02:27.649052 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-dockercfg-5nsgg" Nov 28 10:02:27 crc kubenswrapper[4838]: I1128 10:02:27.650157 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"openshift-service-ca.crt" Nov 28 10:02:27 crc kubenswrapper[4838]: I1128 10:02:27.723318 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"kube-root-ca.crt" Nov 28 10:02:27 crc kubenswrapper[4838]: I1128 10:02:27.725558 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"image-import-ca" Nov 28 10:02:27 crc kubenswrapper[4838]: I1128 10:02:27.750191 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Nov 28 10:02:27 crc kubenswrapper[4838]: I1128 10:02:27.751198 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Nov 28 10:02:27 crc kubenswrapper[4838]: I1128 10:02:27.811151 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"openshift-service-ca.crt" Nov 28 10:02:27 crc kubenswrapper[4838]: I1128 10:02:27.858886 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"image-registry-certificates" Nov 28 10:02:27 crc kubenswrapper[4838]: I1128 10:02:27.971154 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-config" Nov 28 10:02:28 crc kubenswrapper[4838]: I1128 10:02:28.014410 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-metrics" Nov 28 10:02:28 crc kubenswrapper[4838]: I1128 10:02:28.044688 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"default-dockercfg-2q5b6" Nov 28 10:02:28 crc kubenswrapper[4838]: I1128 10:02:28.076970 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"audit-1" Nov 28 10:02:28 crc kubenswrapper[4838]: I1128 10:02:28.136085 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"openshift-service-ca.crt" Nov 28 10:02:28 crc kubenswrapper[4838]: I1128 10:02:28.165836 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"kube-root-ca.crt" Nov 28 10:02:28 crc kubenswrapper[4838]: I1128 10:02:28.183755 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"cluster-image-registry-operator-dockercfg-m4qtx" Nov 28 10:02:28 crc kubenswrapper[4838]: I1128 10:02:28.212876 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-dockercfg-zdk86" Nov 28 10:02:28 crc kubenswrapper[4838]: I1128 10:02:28.217954 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" Nov 28 10:02:28 crc kubenswrapper[4838]: I1128 10:02:28.301288 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-dockercfg-qx5rd" Nov 28 10:02:28 crc kubenswrapper[4838]: I1128 10:02:28.307546 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"openshift-service-ca.crt" Nov 28 10:02:28 crc kubenswrapper[4838]: I1128 10:02:28.413161 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-service-ca.crt" Nov 28 10:02:28 crc kubenswrapper[4838]: I1128 10:02:28.432556 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-tls" Nov 28 10:02:28 crc kubenswrapper[4838]: I1128 10:02:28.446564 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"cluster-samples-operator-dockercfg-xpp9w" Nov 28 10:02:28 crc kubenswrapper[4838]: I1128 10:02:28.451857 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-control-plane-dockercfg-gs7dd" Nov 28 10:02:28 crc kubenswrapper[4838]: I1128 10:02:28.486252 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-sa-dockercfg-d427c" Nov 28 10:02:28 crc kubenswrapper[4838]: I1128 10:02:28.599239 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"proxy-tls" Nov 28 10:02:28 crc kubenswrapper[4838]: I1128 10:02:28.617290 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-dockercfg-r9srn" Nov 28 10:02:28 crc kubenswrapper[4838]: I1128 10:02:28.634412 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Nov 28 10:02:28 crc kubenswrapper[4838]: I1128 10:02:28.670559 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Nov 28 10:02:28 crc kubenswrapper[4838]: I1128 10:02:28.694630 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"openshift-service-ca.crt" Nov 28 10:02:28 crc kubenswrapper[4838]: I1128 10:02:28.742323 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" Nov 28 10:02:28 crc kubenswrapper[4838]: I1128 10:02:28.789180 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"etcd-client" Nov 28 10:02:28 crc kubenswrapper[4838]: I1128 10:02:28.800595 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"ovnkube-identity-cm" Nov 28 10:02:28 crc kubenswrapper[4838]: I1128 10:02:28.846761 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"node-bootstrapper-token" Nov 28 10:02:28 crc kubenswrapper[4838]: I1128 10:02:28.935358 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"authentication-operator-dockercfg-mz9bj" Nov 28 10:02:28 crc kubenswrapper[4838]: I1128 10:02:28.938613 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"openshift-service-ca.crt" Nov 28 10:02:28 crc kubenswrapper[4838]: I1128 10:02:28.966025 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-dockercfg-k9rxt" Nov 28 10:02:29 crc kubenswrapper[4838]: I1128 10:02:29.001014 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"etcd-serving-ca" Nov 28 10:02:29 crc kubenswrapper[4838]: I1128 10:02:29.046061 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Nov 28 10:02:29 crc kubenswrapper[4838]: I1128 10:02:29.048460 4838 reflector.go:368] Caches populated for *v1.RuntimeClass from k8s.io/client-go/informers/factory.go:160 Nov 28 10:02:29 crc kubenswrapper[4838]: I1128 10:02:29.129074 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Nov 28 10:02:29 crc kubenswrapper[4838]: I1128 10:02:29.132115 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"openshift-service-ca.crt" Nov 28 10:02:29 crc kubenswrapper[4838]: I1128 10:02:29.143996 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"trusted-ca-bundle" Nov 28 10:02:29 crc kubenswrapper[4838]: I1128 10:02:29.198438 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"kube-root-ca.crt" Nov 28 10:02:29 crc kubenswrapper[4838]: I1128 10:02:29.213107 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-admission-controller-secret" Nov 28 10:02:29 crc kubenswrapper[4838]: I1128 10:02:29.222828 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"kube-root-ca.crt" Nov 28 10:02:29 crc kubenswrapper[4838]: I1128 10:02:29.273191 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"openshift-service-ca.crt" Nov 28 10:02:29 crc kubenswrapper[4838]: I1128 10:02:29.280476 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"kube-root-ca.crt" Nov 28 10:02:29 crc kubenswrapper[4838]: I1128 10:02:29.282445 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-default-metrics-tls" Nov 28 10:02:29 crc kubenswrapper[4838]: I1128 10:02:29.295677 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq" Nov 28 10:02:29 crc kubenswrapper[4838]: I1128 10:02:29.322635 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-dockercfg-vw8fw" Nov 28 10:02:29 crc kubenswrapper[4838]: I1128 10:02:29.334592 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-tls" Nov 28 10:02:29 crc kubenswrapper[4838]: I1128 10:02:29.397259 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-serving-cert" Nov 28 10:02:29 crc kubenswrapper[4838]: I1128 10:02:29.426586 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"trusted-ca" Nov 28 10:02:29 crc kubenswrapper[4838]: I1128 10:02:29.435549 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Nov 28 10:02:29 crc kubenswrapper[4838]: I1128 10:02:29.440665 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Nov 28 10:02:29 crc kubenswrapper[4838]: I1128 10:02:29.451962 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"etcd-client" Nov 28 10:02:29 crc kubenswrapper[4838]: I1128 10:02:29.455367 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"oauth-serving-cert" Nov 28 10:02:29 crc kubenswrapper[4838]: I1128 10:02:29.472505 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"serving-cert" Nov 28 10:02:29 crc kubenswrapper[4838]: I1128 10:02:29.493600 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"config" Nov 28 10:02:29 crc kubenswrapper[4838]: I1128 10:02:29.550480 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"marketplace-trusted-ca" Nov 28 10:02:29 crc kubenswrapper[4838]: I1128 10:02:29.574250 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-service-ca-bundle" Nov 28 10:02:29 crc kubenswrapper[4838]: I1128 10:02:29.585321 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"trusted-ca" Nov 28 10:02:29 crc kubenswrapper[4838]: I1128 10:02:29.651247 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-root-ca.crt" Nov 28 10:02:29 crc kubenswrapper[4838]: I1128 10:02:29.712255 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"default-dockercfg-gxtc4" Nov 28 10:02:29 crc kubenswrapper[4838]: I1128 10:02:29.738943 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" Nov 28 10:02:29 crc kubenswrapper[4838]: I1128 10:02:29.752410 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"openshift-service-ca.crt" Nov 28 10:02:29 crc kubenswrapper[4838]: I1128 10:02:29.764393 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"openshift-service-ca.crt" Nov 28 10:02:29 crc kubenswrapper[4838]: I1128 10:02:29.844245 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"machine-approver-config" Nov 28 10:02:29 crc kubenswrapper[4838]: I1128 10:02:29.864310 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-stats-default" Nov 28 10:02:29 crc kubenswrapper[4838]: I1128 10:02:29.896043 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" Nov 28 10:02:29 crc kubenswrapper[4838]: I1128 10:02:29.949520 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-operator"/"metrics-tls" Nov 28 10:02:29 crc kubenswrapper[4838]: I1128 10:02:29.960277 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"kube-storage-version-migrator-operator-dockercfg-2bh8d" Nov 28 10:02:29 crc kubenswrapper[4838]: I1128 10:02:29.976024 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"kube-root-ca.crt" Nov 28 10:02:30 crc kubenswrapper[4838]: I1128 10:02:30.018576 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"kube-root-ca.crt" Nov 28 10:02:30 crc kubenswrapper[4838]: I1128 10:02:30.037359 4838 reflector.go:368] Caches populated for *v1.Secret from object-"hostpath-provisioner"/"csi-hostpath-provisioner-sa-dockercfg-qd74k" Nov 28 10:02:30 crc kubenswrapper[4838]: I1128 10:02:30.102067 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"openshift-service-ca.crt" Nov 28 10:02:30 crc kubenswrapper[4838]: I1128 10:02:30.107191 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"cni-copy-resources" Nov 28 10:02:30 crc kubenswrapper[4838]: I1128 10:02:30.145328 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"openshift-service-ca.crt" Nov 28 10:02:30 crc kubenswrapper[4838]: I1128 10:02:30.147575 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"config-operator-serving-cert" Nov 28 10:02:30 crc kubenswrapper[4838]: I1128 10:02:30.151618 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-node-identity"/"network-node-identity-cert" Nov 28 10:02:30 crc kubenswrapper[4838]: I1128 10:02:30.164831 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-operator-config" Nov 28 10:02:30 crc kubenswrapper[4838]: I1128 10:02:30.310352 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"openshift-service-ca.crt" Nov 28 10:02:30 crc kubenswrapper[4838]: I1128 10:02:30.320007 4838 reflector.go:368] Caches populated for *v1.Service from k8s.io/client-go/informers/factory.go:160 Nov 28 10:02:30 crc kubenswrapper[4838]: I1128 10:02:30.344709 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"serving-cert" Nov 28 10:02:30 crc kubenswrapper[4838]: I1128 10:02:30.366850 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"registry-dockercfg-kzzsd" Nov 28 10:02:30 crc kubenswrapper[4838]: I1128 10:02:30.377740 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"dns-default" Nov 28 10:02:30 crc kubenswrapper[4838]: I1128 10:02:30.388255 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"openshift-service-ca.crt" Nov 28 10:02:30 crc kubenswrapper[4838]: I1128 10:02:30.413855 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"serving-cert" Nov 28 10:02:30 crc kubenswrapper[4838]: I1128 10:02:30.426895 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"kube-root-ca.crt" Nov 28 10:02:30 crc kubenswrapper[4838]: I1128 10:02:30.439666 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"openshift-apiserver-sa-dockercfg-djjff" Nov 28 10:02:30 crc kubenswrapper[4838]: I1128 10:02:30.446444 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"serving-cert" Nov 28 10:02:30 crc kubenswrapper[4838]: I1128 10:02:30.476536 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" Nov 28 10:02:30 crc kubenswrapper[4838]: I1128 10:02:30.511002 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 10:02:30 crc kubenswrapper[4838]: I1128 10:02:30.609227 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Nov 28 10:02:30 crc kubenswrapper[4838]: I1128 10:02:30.623293 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"trusted-ca-bundle" Nov 28 10:02:30 crc kubenswrapper[4838]: I1128 10:02:30.636162 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" Nov 28 10:02:30 crc kubenswrapper[4838]: I1128 10:02:30.641369 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Nov 28 10:02:30 crc kubenswrapper[4838]: I1128 10:02:30.694593 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"trusted-ca-bundle" Nov 28 10:02:30 crc kubenswrapper[4838]: I1128 10:02:30.716990 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"openshift-service-ca.crt" Nov 28 10:02:30 crc kubenswrapper[4838]: I1128 10:02:30.720405 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Nov 28 10:02:30 crc kubenswrapper[4838]: I1128 10:02:30.721536 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" Nov 28 10:02:30 crc kubenswrapper[4838]: I1128 10:02:30.792558 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-serving-cert" Nov 28 10:02:30 crc kubenswrapper[4838]: I1128 10:02:30.815246 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"kube-root-ca.crt" Nov 28 10:02:30 crc kubenswrapper[4838]: I1128 10:02:30.832197 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Nov 28 10:02:30 crc kubenswrapper[4838]: I1128 10:02:30.886618 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"openshift-service-ca.crt" Nov 28 10:02:30 crc kubenswrapper[4838]: I1128 10:02:30.945141 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"multus-daemon-config" Nov 28 10:02:30 crc kubenswrapper[4838]: I1128 10:02:30.978805 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ac-dockercfg-9lkdf" Nov 28 10:02:31 crc kubenswrapper[4838]: I1128 10:02:31.002734 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"kube-root-ca.crt" Nov 28 10:02:31 crc kubenswrapper[4838]: I1128 10:02:31.047238 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-dockercfg-x57mr" Nov 28 10:02:31 crc kubenswrapper[4838]: I1128 10:02:31.050357 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Nov 28 10:02:31 crc kubenswrapper[4838]: I1128 10:02:31.052372 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-dockercfg-mfbb7" Nov 28 10:02:31 crc kubenswrapper[4838]: I1128 10:02:31.080929 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-root-ca.crt" Nov 28 10:02:31 crc kubenswrapper[4838]: I1128 10:02:31.117380 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" Nov 28 10:02:31 crc kubenswrapper[4838]: I1128 10:02:31.147050 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"openshift-service-ca.crt" Nov 28 10:02:31 crc kubenswrapper[4838]: I1128 10:02:31.168705 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"kube-root-ca.crt" Nov 28 10:02:31 crc kubenswrapper[4838]: I1128 10:02:31.279820 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"console-config" Nov 28 10:02:31 crc kubenswrapper[4838]: I1128 10:02:31.293551 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-service-ca.crt" Nov 28 10:02:31 crc kubenswrapper[4838]: I1128 10:02:31.296574 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"samples-operator-tls" Nov 28 10:02:31 crc kubenswrapper[4838]: I1128 10:02:31.323624 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"kube-root-ca.crt" Nov 28 10:02:31 crc kubenswrapper[4838]: I1128 10:02:31.400304 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"env-overrides" Nov 28 10:02:31 crc kubenswrapper[4838]: I1128 10:02:31.406159 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"kube-root-ca.crt" Nov 28 10:02:31 crc kubenswrapper[4838]: I1128 10:02:31.451915 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"kube-root-ca.crt" Nov 28 10:02:31 crc kubenswrapper[4838]: I1128 10:02:31.459631 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-root-ca.crt" Nov 28 10:02:31 crc kubenswrapper[4838]: I1128 10:02:31.476738 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"encryption-config-1" Nov 28 10:02:31 crc kubenswrapper[4838]: I1128 10:02:31.512444 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"trusted-ca" Nov 28 10:02:31 crc kubenswrapper[4838]: I1128 10:02:31.519897 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Nov 28 10:02:31 crc kubenswrapper[4838]: I1128 10:02:31.526490 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Nov 28 10:02:31 crc kubenswrapper[4838]: I1128 10:02:31.541153 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"audit-1" Nov 28 10:02:31 crc kubenswrapper[4838]: I1128 10:02:31.550166 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"machine-config-operator-images" Nov 28 10:02:31 crc kubenswrapper[4838]: I1128 10:02:31.569063 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"openshift-service-ca.crt" Nov 28 10:02:31 crc kubenswrapper[4838]: I1128 10:02:31.583051 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"kube-root-ca.crt" Nov 28 10:02:31 crc kubenswrapper[4838]: I1128 10:02:31.605492 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Nov 28 10:02:31 crc kubenswrapper[4838]: I1128 10:02:31.643850 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"openshift-service-ca.crt" Nov 28 10:02:31 crc kubenswrapper[4838]: I1128 10:02:31.659989 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"dns-operator-dockercfg-9mqw5" Nov 28 10:02:31 crc kubenswrapper[4838]: I1128 10:02:31.662551 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-operator-dockercfg-98p87" Nov 28 10:02:31 crc kubenswrapper[4838]: I1128 10:02:31.674340 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Nov 28 10:02:31 crc kubenswrapper[4838]: I1128 10:02:31.691918 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"oauth-apiserver-sa-dockercfg-6r2bq" Nov 28 10:02:31 crc kubenswrapper[4838]: I1128 10:02:31.716934 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-tls" Nov 28 10:02:31 crc kubenswrapper[4838]: I1128 10:02:31.721377 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ancillary-tools-dockercfg-vnmsz" Nov 28 10:02:31 crc kubenswrapper[4838]: I1128 10:02:31.783618 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mcc-proxy-tls" Nov 28 10:02:31 crc kubenswrapper[4838]: I1128 10:02:31.830163 4838 reflector.go:368] Caches populated for *v1.Pod from pkg/kubelet/config/apiserver.go:66 Nov 28 10:02:31 crc kubenswrapper[4838]: I1128 10:02:31.831284 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" podStartSLOduration=29.831266573 podStartE2EDuration="29.831266573s" podCreationTimestamp="2025-11-28 10:02:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 10:02:22.250362913 +0000 UTC m=+313.949337093" watchObservedRunningTime="2025-11-28 10:02:31.831266573 +0000 UTC m=+323.530240763" Nov 28 10:02:31 crc kubenswrapper[4838]: I1128 10:02:31.835411 4838 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-bcnzk","openshift-kube-apiserver/kube-apiserver-crc","openshift-marketplace/certified-operators-zlxs4"] Nov 28 10:02:31 crc kubenswrapper[4838]: I1128 10:02:31.835472 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Nov 28 10:02:31 crc kubenswrapper[4838]: I1128 10:02:31.847052 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 10:02:31 crc kubenswrapper[4838]: I1128 10:02:31.849783 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"env-overrides" Nov 28 10:02:31 crc kubenswrapper[4838]: I1128 10:02:31.854737 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-client" Nov 28 10:02:31 crc kubenswrapper[4838]: I1128 10:02:31.864834 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-crc" podStartSLOduration=9.864810623 podStartE2EDuration="9.864810623s" podCreationTimestamp="2025-11-28 10:02:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 10:02:31.8566138 +0000 UTC m=+323.555588000" watchObservedRunningTime="2025-11-28 10:02:31.864810623 +0000 UTC m=+323.563784793" Nov 28 10:02:31 crc kubenswrapper[4838]: I1128 10:02:31.895681 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"serving-cert" Nov 28 10:02:31 crc kubenswrapper[4838]: I1128 10:02:31.946958 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"kube-root-ca.crt" Nov 28 10:02:31 crc kubenswrapper[4838]: I1128 10:02:31.947865 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"pprof-cert" Nov 28 10:02:31 crc kubenswrapper[4838]: I1128 10:02:31.965355 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-dockercfg-gkqpw" Nov 28 10:02:31 crc kubenswrapper[4838]: I1128 10:02:31.977537 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"packageserver-service-cert" Nov 28 10:02:31 crc kubenswrapper[4838]: I1128 10:02:31.983096 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-rbac-proxy" Nov 28 10:02:32 crc kubenswrapper[4838]: I1128 10:02:32.008754 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"installation-pull-secrets" Nov 28 10:02:32 crc kubenswrapper[4838]: I1128 10:02:32.043609 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"openshift-service-ca.crt" Nov 28 10:02:32 crc kubenswrapper[4838]: I1128 10:02:32.100429 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"metrics-tls" Nov 28 10:02:32 crc kubenswrapper[4838]: I1128 10:02:32.102328 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"iptables-alerter-script" Nov 28 10:02:32 crc kubenswrapper[4838]: I1128 10:02:32.168794 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-node-dockercfg-pwtwl" Nov 28 10:02:32 crc kubenswrapper[4838]: I1128 10:02:32.179413 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-certs-default" Nov 28 10:02:32 crc kubenswrapper[4838]: I1128 10:02:32.223353 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"kube-root-ca.crt" Nov 28 10:02:32 crc kubenswrapper[4838]: I1128 10:02:32.238162 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"kube-root-ca.crt" Nov 28 10:02:32 crc kubenswrapper[4838]: I1128 10:02:32.244399 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"service-ca-operator-dockercfg-rg9jl" Nov 28 10:02:32 crc kubenswrapper[4838]: I1128 10:02:32.252686 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"etcd-serving-ca" Nov 28 10:02:32 crc kubenswrapper[4838]: I1128 10:02:32.282165 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-secret" Nov 28 10:02:32 crc kubenswrapper[4838]: I1128 10:02:32.282351 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"openshift-service-ca.crt" Nov 28 10:02:32 crc kubenswrapper[4838]: I1128 10:02:32.287410 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"node-ca-dockercfg-4777p" Nov 28 10:02:32 crc kubenswrapper[4838]: I1128 10:02:32.294008 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"kube-root-ca.crt" Nov 28 10:02:32 crc kubenswrapper[4838]: I1128 10:02:32.334810 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"kube-root-ca.crt" Nov 28 10:02:32 crc kubenswrapper[4838]: I1128 10:02:32.343465 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-root-ca.crt" Nov 28 10:02:32 crc kubenswrapper[4838]: I1128 10:02:32.343954 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" Nov 28 10:02:32 crc kubenswrapper[4838]: I1128 10:02:32.362734 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Nov 28 10:02:32 crc kubenswrapper[4838]: I1128 10:02:32.451329 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Nov 28 10:02:32 crc kubenswrapper[4838]: I1128 10:02:32.457688 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"kube-root-ca.crt" Nov 28 10:02:32 crc kubenswrapper[4838]: I1128 10:02:32.568653 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d14771c4-48e3-4efe-a5f2-31331a30979f" path="/var/lib/kubelet/pods/d14771c4-48e3-4efe-a5f2-31331a30979f/volumes" Nov 28 10:02:32 crc kubenswrapper[4838]: I1128 10:02:32.569503 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e35b5813-d7b9-4cbc-b002-44d465476046" path="/var/lib/kubelet/pods/e35b5813-d7b9-4cbc-b002-44d465476046/volumes" Nov 28 10:02:32 crc kubenswrapper[4838]: I1128 10:02:32.599301 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-dockercfg-f62pw" Nov 28 10:02:32 crc kubenswrapper[4838]: I1128 10:02:32.755844 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"machine-api-operator-images" Nov 28 10:02:32 crc kubenswrapper[4838]: I1128 10:02:32.798375 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"kube-root-ca.crt" Nov 28 10:02:32 crc kubenswrapper[4838]: I1128 10:02:32.808822 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serviceaccount-dockercfg-rq7zk" Nov 28 10:02:32 crc kubenswrapper[4838]: I1128 10:02:32.821336 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-dockercfg-qt55r" Nov 28 10:02:32 crc kubenswrapper[4838]: I1128 10:02:32.872545 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"default-cni-sysctl-allowlist" Nov 28 10:02:32 crc kubenswrapper[4838]: I1128 10:02:32.903137 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"ingress-operator-dockercfg-7lnqk" Nov 28 10:02:32 crc kubenswrapper[4838]: I1128 10:02:32.905466 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-rbac-proxy" Nov 28 10:02:32 crc kubenswrapper[4838]: I1128 10:02:32.975267 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Nov 28 10:02:33 crc kubenswrapper[4838]: I1128 10:02:33.036890 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"serving-cert" Nov 28 10:02:33 crc kubenswrapper[4838]: I1128 10:02:33.161742 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Nov 28 10:02:33 crc kubenswrapper[4838]: I1128 10:02:33.219167 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Nov 28 10:02:33 crc kubenswrapper[4838]: I1128 10:02:33.238985 4838 reflector.go:368] Caches populated for *v1.CSIDriver from k8s.io/client-go/informers/factory.go:160 Nov 28 10:02:33 crc kubenswrapper[4838]: I1128 10:02:33.374207 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" Nov 28 10:02:33 crc kubenswrapper[4838]: I1128 10:02:33.484171 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"kube-root-ca.crt" Nov 28 10:02:33 crc kubenswrapper[4838]: I1128 10:02:33.556882 4838 kubelet.go:2431] "SyncLoop REMOVE" source="file" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Nov 28 10:02:33 crc kubenswrapper[4838]: I1128 10:02:33.557270 4838 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" containerID="cri-o://63d56b65686e3856fc7c923cd1def7e61ab1cac00a4f33f0bfbbeddcedfa5997" gracePeriod=5 Nov 28 10:02:33 crc kubenswrapper[4838]: I1128 10:02:33.562878 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-dockercfg-jwfmh" Nov 28 10:02:33 crc kubenswrapper[4838]: I1128 10:02:33.582672 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"trusted-ca-bundle" Nov 28 10:02:33 crc kubenswrapper[4838]: I1128 10:02:33.662292 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"default-dockercfg-chnjx" Nov 28 10:02:33 crc kubenswrapper[4838]: I1128 10:02:33.785328 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"canary-serving-cert" Nov 28 10:02:33 crc kubenswrapper[4838]: I1128 10:02:33.819992 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-root-ca.crt" Nov 28 10:02:33 crc kubenswrapper[4838]: I1128 10:02:33.835395 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-controller-dockercfg-c2lfx" Nov 28 10:02:33 crc kubenswrapper[4838]: I1128 10:02:33.863048 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-sa-dockercfg-nl2j4" Nov 28 10:02:33 crc kubenswrapper[4838]: I1128 10:02:33.899094 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Nov 28 10:02:33 crc kubenswrapper[4838]: I1128 10:02:33.899896 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"openshift-service-ca.crt" Nov 28 10:02:33 crc kubenswrapper[4838]: I1128 10:02:33.913306 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Nov 28 10:02:33 crc kubenswrapper[4838]: I1128 10:02:33.929525 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"openshift-service-ca.crt" Nov 28 10:02:33 crc kubenswrapper[4838]: I1128 10:02:33.932678 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Nov 28 10:02:34 crc kubenswrapper[4838]: I1128 10:02:34.047376 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-control-plane-metrics-cert" Nov 28 10:02:34 crc kubenswrapper[4838]: I1128 10:02:34.157529 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"signing-cabundle" Nov 28 10:02:34 crc kubenswrapper[4838]: I1128 10:02:34.231253 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-script-lib" Nov 28 10:02:34 crc kubenswrapper[4838]: I1128 10:02:34.239656 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"kube-root-ca.crt" Nov 28 10:02:34 crc kubenswrapper[4838]: I1128 10:02:34.241427 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-metrics-certs-default" Nov 28 10:02:34 crc kubenswrapper[4838]: I1128 10:02:34.247866 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-tls" Nov 28 10:02:34 crc kubenswrapper[4838]: I1128 10:02:34.331651 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-oauth-config" Nov 28 10:02:34 crc kubenswrapper[4838]: I1128 10:02:34.352482 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"signing-key" Nov 28 10:02:34 crc kubenswrapper[4838]: I1128 10:02:34.491556 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Nov 28 10:02:34 crc kubenswrapper[4838]: I1128 10:02:34.547420 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"cluster-version-operator-serving-cert" Nov 28 10:02:34 crc kubenswrapper[4838]: I1128 10:02:34.584233 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"openshift-service-ca.crt" Nov 28 10:02:34 crc kubenswrapper[4838]: I1128 10:02:34.851849 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-rbac-proxy" Nov 28 10:02:35 crc kubenswrapper[4838]: I1128 10:02:35.348275 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" Nov 28 10:02:35 crc kubenswrapper[4838]: I1128 10:02:35.495392 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"kube-root-ca.crt" Nov 28 10:02:35 crc kubenswrapper[4838]: I1128 10:02:35.537025 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"service-ca-dockercfg-pn86c" Nov 28 10:02:35 crc kubenswrapper[4838]: I1128 10:02:35.539579 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"openshift-service-ca.crt" Nov 28 10:02:35 crc kubenswrapper[4838]: I1128 10:02:35.769576 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" Nov 28 10:02:35 crc kubenswrapper[4838]: I1128 10:02:35.827042 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"console-operator-dockercfg-4xjcr" Nov 28 10:02:35 crc kubenswrapper[4838]: I1128 10:02:35.839077 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-config" Nov 28 10:02:35 crc kubenswrapper[4838]: I1128 10:02:35.840148 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"encryption-config-1" Nov 28 10:02:35 crc kubenswrapper[4838]: I1128 10:02:35.973999 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"service-ca-bundle" Nov 28 10:02:39 crc kubenswrapper[4838]: I1128 10:02:39.152258 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-startup-monitor-crc_f85e55b1a89d02b0cb034b1ea31ed45a/startup-monitor/0.log" Nov 28 10:02:39 crc kubenswrapper[4838]: I1128 10:02:39.152328 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 10:02:39 crc kubenswrapper[4838]: I1128 10:02:39.301140 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Nov 28 10:02:39 crc kubenswrapper[4838]: I1128 10:02:39.301212 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Nov 28 10:02:39 crc kubenswrapper[4838]: I1128 10:02:39.301279 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock" (OuterVolumeSpecName: "var-lock") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "var-lock". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 10:02:39 crc kubenswrapper[4838]: I1128 10:02:39.301321 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log" (OuterVolumeSpecName: "var-log") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "var-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 10:02:39 crc kubenswrapper[4838]: I1128 10:02:39.301299 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Nov 28 10:02:39 crc kubenswrapper[4838]: I1128 10:02:39.301441 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Nov 28 10:02:39 crc kubenswrapper[4838]: I1128 10:02:39.301486 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Nov 28 10:02:39 crc kubenswrapper[4838]: I1128 10:02:39.301567 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests" (OuterVolumeSpecName: "manifests") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "manifests". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 10:02:39 crc kubenswrapper[4838]: I1128 10:02:39.301610 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir" (OuterVolumeSpecName: "resource-dir") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 10:02:39 crc kubenswrapper[4838]: I1128 10:02:39.302034 4838 reconciler_common.go:293] "Volume detached for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") on node \"crc\" DevicePath \"\"" Nov 28 10:02:39 crc kubenswrapper[4838]: I1128 10:02:39.302062 4838 reconciler_common.go:293] "Volume detached for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") on node \"crc\" DevicePath \"\"" Nov 28 10:02:39 crc kubenswrapper[4838]: I1128 10:02:39.302073 4838 reconciler_common.go:293] "Volume detached for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") on node \"crc\" DevicePath \"\"" Nov 28 10:02:39 crc kubenswrapper[4838]: I1128 10:02:39.302085 4838 reconciler_common.go:293] "Volume detached for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") on node \"crc\" DevicePath \"\"" Nov 28 10:02:39 crc kubenswrapper[4838]: I1128 10:02:39.308377 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir" (OuterVolumeSpecName: "pod-resource-dir") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "pod-resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 10:02:39 crc kubenswrapper[4838]: I1128 10:02:39.399273 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-startup-monitor-crc_f85e55b1a89d02b0cb034b1ea31ed45a/startup-monitor/0.log" Nov 28 10:02:39 crc kubenswrapper[4838]: I1128 10:02:39.399359 4838 generic.go:334] "Generic (PLEG): container finished" podID="f85e55b1a89d02b0cb034b1ea31ed45a" containerID="63d56b65686e3856fc7c923cd1def7e61ab1cac00a4f33f0bfbbeddcedfa5997" exitCode=137 Nov 28 10:02:39 crc kubenswrapper[4838]: I1128 10:02:39.399420 4838 scope.go:117] "RemoveContainer" containerID="63d56b65686e3856fc7c923cd1def7e61ab1cac00a4f33f0bfbbeddcedfa5997" Nov 28 10:02:39 crc kubenswrapper[4838]: I1128 10:02:39.399432 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 10:02:39 crc kubenswrapper[4838]: I1128 10:02:39.402859 4838 reconciler_common.go:293] "Volume detached for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") on node \"crc\" DevicePath \"\"" Nov 28 10:02:39 crc kubenswrapper[4838]: I1128 10:02:39.427189 4838 scope.go:117] "RemoveContainer" containerID="63d56b65686e3856fc7c923cd1def7e61ab1cac00a4f33f0bfbbeddcedfa5997" Nov 28 10:02:39 crc kubenswrapper[4838]: E1128 10:02:39.427788 4838 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"63d56b65686e3856fc7c923cd1def7e61ab1cac00a4f33f0bfbbeddcedfa5997\": container with ID starting with 63d56b65686e3856fc7c923cd1def7e61ab1cac00a4f33f0bfbbeddcedfa5997 not found: ID does not exist" containerID="63d56b65686e3856fc7c923cd1def7e61ab1cac00a4f33f0bfbbeddcedfa5997" Nov 28 10:02:39 crc kubenswrapper[4838]: I1128 10:02:39.427828 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"63d56b65686e3856fc7c923cd1def7e61ab1cac00a4f33f0bfbbeddcedfa5997"} err="failed to get container status \"63d56b65686e3856fc7c923cd1def7e61ab1cac00a4f33f0bfbbeddcedfa5997\": rpc error: code = NotFound desc = could not find container \"63d56b65686e3856fc7c923cd1def7e61ab1cac00a4f33f0bfbbeddcedfa5997\": container with ID starting with 63d56b65686e3856fc7c923cd1def7e61ab1cac00a4f33f0bfbbeddcedfa5997 not found: ID does not exist" Nov 28 10:02:40 crc kubenswrapper[4838]: I1128 10:02:40.574682 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" path="/var/lib/kubelet/pods/f85e55b1a89d02b0cb034b1ea31ed45a/volumes" Nov 28 10:02:40 crc kubenswrapper[4838]: I1128 10:02:40.575354 4838 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" podUID="" Nov 28 10:02:40 crc kubenswrapper[4838]: I1128 10:02:40.601277 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Nov 28 10:02:40 crc kubenswrapper[4838]: I1128 10:02:40.601335 4838 kubelet.go:2649] "Unable to find pod for mirror pod, skipping" mirrorPod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" mirrorPodUID="ad180624-f224-49bb-a1a1-b890666c2117" Nov 28 10:02:40 crc kubenswrapper[4838]: I1128 10:02:40.613347 4838 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Nov 28 10:02:40 crc kubenswrapper[4838]: I1128 10:02:40.613421 4838 kubelet.go:2673] "Unable to find pod for mirror pod, skipping" mirrorPod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" mirrorPodUID="ad180624-f224-49bb-a1a1-b890666c2117" Nov 28 10:03:09 crc kubenswrapper[4838]: I1128 10:03:09.792948 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-75j6q"] Nov 28 10:03:09 crc kubenswrapper[4838]: I1128 10:03:09.793656 4838 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-879f6c89f-75j6q" podUID="cc483880-bf40-4f4c-bf77-52eb4896bd5b" containerName="controller-manager" containerID="cri-o://68fad22b01142c00a010c036fae40d56e97966c40e06f59ff5f3ac350944c7a6" gracePeriod=30 Nov 28 10:03:09 crc kubenswrapper[4838]: I1128 10:03:09.799702 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-xrgtq"] Nov 28 10:03:09 crc kubenswrapper[4838]: I1128 10:03:09.799929 4838 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-xrgtq" podUID="74b81264-e855-4198-a063-9ef62eb9ad30" containerName="route-controller-manager" containerID="cri-o://8874d4cd19f3eb967d9b738b3b8b25d11d2b7b8f46e2d9584a33d9dc929bbb21" gracePeriod=30 Nov 28 10:03:10 crc kubenswrapper[4838]: I1128 10:03:10.178961 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-75j6q" Nov 28 10:03:10 crc kubenswrapper[4838]: I1128 10:03:10.184314 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-xrgtq" Nov 28 10:03:10 crc kubenswrapper[4838]: I1128 10:03:10.239197 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fzrf6\" (UniqueName: \"kubernetes.io/projected/cc483880-bf40-4f4c-bf77-52eb4896bd5b-kube-api-access-fzrf6\") pod \"cc483880-bf40-4f4c-bf77-52eb4896bd5b\" (UID: \"cc483880-bf40-4f4c-bf77-52eb4896bd5b\") " Nov 28 10:03:10 crc kubenswrapper[4838]: I1128 10:03:10.239260 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/cc483880-bf40-4f4c-bf77-52eb4896bd5b-proxy-ca-bundles\") pod \"cc483880-bf40-4f4c-bf77-52eb4896bd5b\" (UID: \"cc483880-bf40-4f4c-bf77-52eb4896bd5b\") " Nov 28 10:03:10 crc kubenswrapper[4838]: I1128 10:03:10.239294 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/cc483880-bf40-4f4c-bf77-52eb4896bd5b-client-ca\") pod \"cc483880-bf40-4f4c-bf77-52eb4896bd5b\" (UID: \"cc483880-bf40-4f4c-bf77-52eb4896bd5b\") " Nov 28 10:03:10 crc kubenswrapper[4838]: I1128 10:03:10.239341 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cc483880-bf40-4f4c-bf77-52eb4896bd5b-config\") pod \"cc483880-bf40-4f4c-bf77-52eb4896bd5b\" (UID: \"cc483880-bf40-4f4c-bf77-52eb4896bd5b\") " Nov 28 10:03:10 crc kubenswrapper[4838]: I1128 10:03:10.239367 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/cc483880-bf40-4f4c-bf77-52eb4896bd5b-serving-cert\") pod \"cc483880-bf40-4f4c-bf77-52eb4896bd5b\" (UID: \"cc483880-bf40-4f4c-bf77-52eb4896bd5b\") " Nov 28 10:03:10 crc kubenswrapper[4838]: I1128 10:03:10.240586 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cc483880-bf40-4f4c-bf77-52eb4896bd5b-client-ca" (OuterVolumeSpecName: "client-ca") pod "cc483880-bf40-4f4c-bf77-52eb4896bd5b" (UID: "cc483880-bf40-4f4c-bf77-52eb4896bd5b"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 10:03:10 crc kubenswrapper[4838]: I1128 10:03:10.240612 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cc483880-bf40-4f4c-bf77-52eb4896bd5b-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "cc483880-bf40-4f4c-bf77-52eb4896bd5b" (UID: "cc483880-bf40-4f4c-bf77-52eb4896bd5b"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 10:03:10 crc kubenswrapper[4838]: I1128 10:03:10.240671 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cc483880-bf40-4f4c-bf77-52eb4896bd5b-config" (OuterVolumeSpecName: "config") pod "cc483880-bf40-4f4c-bf77-52eb4896bd5b" (UID: "cc483880-bf40-4f4c-bf77-52eb4896bd5b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 10:03:10 crc kubenswrapper[4838]: I1128 10:03:10.240807 4838 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/cc483880-bf40-4f4c-bf77-52eb4896bd5b-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Nov 28 10:03:10 crc kubenswrapper[4838]: I1128 10:03:10.240823 4838 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/cc483880-bf40-4f4c-bf77-52eb4896bd5b-client-ca\") on node \"crc\" DevicePath \"\"" Nov 28 10:03:10 crc kubenswrapper[4838]: I1128 10:03:10.240835 4838 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cc483880-bf40-4f4c-bf77-52eb4896bd5b-config\") on node \"crc\" DevicePath \"\"" Nov 28 10:03:10 crc kubenswrapper[4838]: I1128 10:03:10.247669 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cc483880-bf40-4f4c-bf77-52eb4896bd5b-kube-api-access-fzrf6" (OuterVolumeSpecName: "kube-api-access-fzrf6") pod "cc483880-bf40-4f4c-bf77-52eb4896bd5b" (UID: "cc483880-bf40-4f4c-bf77-52eb4896bd5b"). InnerVolumeSpecName "kube-api-access-fzrf6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:03:10 crc kubenswrapper[4838]: I1128 10:03:10.247683 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cc483880-bf40-4f4c-bf77-52eb4896bd5b-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "cc483880-bf40-4f4c-bf77-52eb4896bd5b" (UID: "cc483880-bf40-4f4c-bf77-52eb4896bd5b"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:03:10 crc kubenswrapper[4838]: I1128 10:03:10.341220 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/74b81264-e855-4198-a063-9ef62eb9ad30-serving-cert\") pod \"74b81264-e855-4198-a063-9ef62eb9ad30\" (UID: \"74b81264-e855-4198-a063-9ef62eb9ad30\") " Nov 28 10:03:10 crc kubenswrapper[4838]: I1128 10:03:10.341272 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/74b81264-e855-4198-a063-9ef62eb9ad30-config\") pod \"74b81264-e855-4198-a063-9ef62eb9ad30\" (UID: \"74b81264-e855-4198-a063-9ef62eb9ad30\") " Nov 28 10:03:10 crc kubenswrapper[4838]: I1128 10:03:10.341329 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/74b81264-e855-4198-a063-9ef62eb9ad30-client-ca\") pod \"74b81264-e855-4198-a063-9ef62eb9ad30\" (UID: \"74b81264-e855-4198-a063-9ef62eb9ad30\") " Nov 28 10:03:10 crc kubenswrapper[4838]: I1128 10:03:10.341476 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pfz6q\" (UniqueName: \"kubernetes.io/projected/74b81264-e855-4198-a063-9ef62eb9ad30-kube-api-access-pfz6q\") pod \"74b81264-e855-4198-a063-9ef62eb9ad30\" (UID: \"74b81264-e855-4198-a063-9ef62eb9ad30\") " Nov 28 10:03:10 crc kubenswrapper[4838]: I1128 10:03:10.341794 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fzrf6\" (UniqueName: \"kubernetes.io/projected/cc483880-bf40-4f4c-bf77-52eb4896bd5b-kube-api-access-fzrf6\") on node \"crc\" DevicePath \"\"" Nov 28 10:03:10 crc kubenswrapper[4838]: I1128 10:03:10.341818 4838 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/cc483880-bf40-4f4c-bf77-52eb4896bd5b-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 10:03:10 crc kubenswrapper[4838]: I1128 10:03:10.342248 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/74b81264-e855-4198-a063-9ef62eb9ad30-config" (OuterVolumeSpecName: "config") pod "74b81264-e855-4198-a063-9ef62eb9ad30" (UID: "74b81264-e855-4198-a063-9ef62eb9ad30"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 10:03:10 crc kubenswrapper[4838]: I1128 10:03:10.342581 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/74b81264-e855-4198-a063-9ef62eb9ad30-client-ca" (OuterVolumeSpecName: "client-ca") pod "74b81264-e855-4198-a063-9ef62eb9ad30" (UID: "74b81264-e855-4198-a063-9ef62eb9ad30"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 10:03:10 crc kubenswrapper[4838]: I1128 10:03:10.344441 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/74b81264-e855-4198-a063-9ef62eb9ad30-kube-api-access-pfz6q" (OuterVolumeSpecName: "kube-api-access-pfz6q") pod "74b81264-e855-4198-a063-9ef62eb9ad30" (UID: "74b81264-e855-4198-a063-9ef62eb9ad30"). InnerVolumeSpecName "kube-api-access-pfz6q". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:03:10 crc kubenswrapper[4838]: I1128 10:03:10.344902 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/74b81264-e855-4198-a063-9ef62eb9ad30-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "74b81264-e855-4198-a063-9ef62eb9ad30" (UID: "74b81264-e855-4198-a063-9ef62eb9ad30"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:03:10 crc kubenswrapper[4838]: I1128 10:03:10.442919 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pfz6q\" (UniqueName: \"kubernetes.io/projected/74b81264-e855-4198-a063-9ef62eb9ad30-kube-api-access-pfz6q\") on node \"crc\" DevicePath \"\"" Nov 28 10:03:10 crc kubenswrapper[4838]: I1128 10:03:10.442984 4838 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/74b81264-e855-4198-a063-9ef62eb9ad30-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 10:03:10 crc kubenswrapper[4838]: I1128 10:03:10.443012 4838 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/74b81264-e855-4198-a063-9ef62eb9ad30-config\") on node \"crc\" DevicePath \"\"" Nov 28 10:03:10 crc kubenswrapper[4838]: I1128 10:03:10.443035 4838 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/74b81264-e855-4198-a063-9ef62eb9ad30-client-ca\") on node \"crc\" DevicePath \"\"" Nov 28 10:03:10 crc kubenswrapper[4838]: I1128 10:03:10.638447 4838 generic.go:334] "Generic (PLEG): container finished" podID="cc483880-bf40-4f4c-bf77-52eb4896bd5b" containerID="68fad22b01142c00a010c036fae40d56e97966c40e06f59ff5f3ac350944c7a6" exitCode=0 Nov 28 10:03:10 crc kubenswrapper[4838]: I1128 10:03:10.638564 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-75j6q" Nov 28 10:03:10 crc kubenswrapper[4838]: I1128 10:03:10.638598 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-75j6q" event={"ID":"cc483880-bf40-4f4c-bf77-52eb4896bd5b","Type":"ContainerDied","Data":"68fad22b01142c00a010c036fae40d56e97966c40e06f59ff5f3ac350944c7a6"} Nov 28 10:03:10 crc kubenswrapper[4838]: I1128 10:03:10.638759 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-75j6q" event={"ID":"cc483880-bf40-4f4c-bf77-52eb4896bd5b","Type":"ContainerDied","Data":"91baa3b5b59a6db9b0bc9d99bcd3896eef74bdd31fb32b9d35840a3de0a3973f"} Nov 28 10:03:10 crc kubenswrapper[4838]: I1128 10:03:10.638888 4838 scope.go:117] "RemoveContainer" containerID="68fad22b01142c00a010c036fae40d56e97966c40e06f59ff5f3ac350944c7a6" Nov 28 10:03:10 crc kubenswrapper[4838]: I1128 10:03:10.642949 4838 generic.go:334] "Generic (PLEG): container finished" podID="74b81264-e855-4198-a063-9ef62eb9ad30" containerID="8874d4cd19f3eb967d9b738b3b8b25d11d2b7b8f46e2d9584a33d9dc929bbb21" exitCode=0 Nov 28 10:03:10 crc kubenswrapper[4838]: I1128 10:03:10.643007 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-xrgtq" event={"ID":"74b81264-e855-4198-a063-9ef62eb9ad30","Type":"ContainerDied","Data":"8874d4cd19f3eb967d9b738b3b8b25d11d2b7b8f46e2d9584a33d9dc929bbb21"} Nov 28 10:03:10 crc kubenswrapper[4838]: I1128 10:03:10.643048 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-xrgtq" event={"ID":"74b81264-e855-4198-a063-9ef62eb9ad30","Type":"ContainerDied","Data":"38a0d2caffe23df330b235ca815748b61341db940d0bc79b5880a5f4eca3d031"} Nov 28 10:03:10 crc kubenswrapper[4838]: I1128 10:03:10.643079 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-xrgtq" Nov 28 10:03:10 crc kubenswrapper[4838]: I1128 10:03:10.673533 4838 scope.go:117] "RemoveContainer" containerID="68fad22b01142c00a010c036fae40d56e97966c40e06f59ff5f3ac350944c7a6" Nov 28 10:03:10 crc kubenswrapper[4838]: E1128 10:03:10.676268 4838 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"68fad22b01142c00a010c036fae40d56e97966c40e06f59ff5f3ac350944c7a6\": container with ID starting with 68fad22b01142c00a010c036fae40d56e97966c40e06f59ff5f3ac350944c7a6 not found: ID does not exist" containerID="68fad22b01142c00a010c036fae40d56e97966c40e06f59ff5f3ac350944c7a6" Nov 28 10:03:10 crc kubenswrapper[4838]: I1128 10:03:10.676939 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"68fad22b01142c00a010c036fae40d56e97966c40e06f59ff5f3ac350944c7a6"} err="failed to get container status \"68fad22b01142c00a010c036fae40d56e97966c40e06f59ff5f3ac350944c7a6\": rpc error: code = NotFound desc = could not find container \"68fad22b01142c00a010c036fae40d56e97966c40e06f59ff5f3ac350944c7a6\": container with ID starting with 68fad22b01142c00a010c036fae40d56e97966c40e06f59ff5f3ac350944c7a6 not found: ID does not exist" Nov 28 10:03:10 crc kubenswrapper[4838]: I1128 10:03:10.677216 4838 scope.go:117] "RemoveContainer" containerID="8874d4cd19f3eb967d9b738b3b8b25d11d2b7b8f46e2d9584a33d9dc929bbb21" Nov 28 10:03:10 crc kubenswrapper[4838]: I1128 10:03:10.679009 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-xrgtq"] Nov 28 10:03:10 crc kubenswrapper[4838]: I1128 10:03:10.688311 4838 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-xrgtq"] Nov 28 10:03:10 crc kubenswrapper[4838]: I1128 10:03:10.695306 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-75j6q"] Nov 28 10:03:10 crc kubenswrapper[4838]: I1128 10:03:10.701458 4838 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-75j6q"] Nov 28 10:03:10 crc kubenswrapper[4838]: I1128 10:03:10.704106 4838 scope.go:117] "RemoveContainer" containerID="8874d4cd19f3eb967d9b738b3b8b25d11d2b7b8f46e2d9584a33d9dc929bbb21" Nov 28 10:03:10 crc kubenswrapper[4838]: E1128 10:03:10.704657 4838 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8874d4cd19f3eb967d9b738b3b8b25d11d2b7b8f46e2d9584a33d9dc929bbb21\": container with ID starting with 8874d4cd19f3eb967d9b738b3b8b25d11d2b7b8f46e2d9584a33d9dc929bbb21 not found: ID does not exist" containerID="8874d4cd19f3eb967d9b738b3b8b25d11d2b7b8f46e2d9584a33d9dc929bbb21" Nov 28 10:03:10 crc kubenswrapper[4838]: I1128 10:03:10.704708 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8874d4cd19f3eb967d9b738b3b8b25d11d2b7b8f46e2d9584a33d9dc929bbb21"} err="failed to get container status \"8874d4cd19f3eb967d9b738b3b8b25d11d2b7b8f46e2d9584a33d9dc929bbb21\": rpc error: code = NotFound desc = could not find container \"8874d4cd19f3eb967d9b738b3b8b25d11d2b7b8f46e2d9584a33d9dc929bbb21\": container with ID starting with 8874d4cd19f3eb967d9b738b3b8b25d11d2b7b8f46e2d9584a33d9dc929bbb21 not found: ID does not exist" Nov 28 10:03:11 crc kubenswrapper[4838]: I1128 10:03:11.018049 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-5b9c6669fd-t6424"] Nov 28 10:03:11 crc kubenswrapper[4838]: E1128 10:03:11.018475 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d14771c4-48e3-4efe-a5f2-31331a30979f" containerName="extract-utilities" Nov 28 10:03:11 crc kubenswrapper[4838]: I1128 10:03:11.018497 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="d14771c4-48e3-4efe-a5f2-31331a30979f" containerName="extract-utilities" Nov 28 10:03:11 crc kubenswrapper[4838]: E1128 10:03:11.018516 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Nov 28 10:03:11 crc kubenswrapper[4838]: I1128 10:03:11.018529 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Nov 28 10:03:11 crc kubenswrapper[4838]: E1128 10:03:11.018551 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cc483880-bf40-4f4c-bf77-52eb4896bd5b" containerName="controller-manager" Nov 28 10:03:11 crc kubenswrapper[4838]: I1128 10:03:11.018564 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="cc483880-bf40-4f4c-bf77-52eb4896bd5b" containerName="controller-manager" Nov 28 10:03:11 crc kubenswrapper[4838]: E1128 10:03:11.018585 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d14771c4-48e3-4efe-a5f2-31331a30979f" containerName="extract-content" Nov 28 10:03:11 crc kubenswrapper[4838]: I1128 10:03:11.018597 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="d14771c4-48e3-4efe-a5f2-31331a30979f" containerName="extract-content" Nov 28 10:03:11 crc kubenswrapper[4838]: E1128 10:03:11.018620 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="37cf39e1-d5d9-4b5d-a80b-5f9e371ebf2b" containerName="installer" Nov 28 10:03:11 crc kubenswrapper[4838]: I1128 10:03:11.018633 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="37cf39e1-d5d9-4b5d-a80b-5f9e371ebf2b" containerName="installer" Nov 28 10:03:11 crc kubenswrapper[4838]: E1128 10:03:11.018646 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d14771c4-48e3-4efe-a5f2-31331a30979f" containerName="registry-server" Nov 28 10:03:11 crc kubenswrapper[4838]: I1128 10:03:11.018658 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="d14771c4-48e3-4efe-a5f2-31331a30979f" containerName="registry-server" Nov 28 10:03:11 crc kubenswrapper[4838]: E1128 10:03:11.018679 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e35b5813-d7b9-4cbc-b002-44d465476046" containerName="registry-server" Nov 28 10:03:11 crc kubenswrapper[4838]: I1128 10:03:11.018690 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="e35b5813-d7b9-4cbc-b002-44d465476046" containerName="registry-server" Nov 28 10:03:11 crc kubenswrapper[4838]: E1128 10:03:11.018707 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="74b81264-e855-4198-a063-9ef62eb9ad30" containerName="route-controller-manager" Nov 28 10:03:11 crc kubenswrapper[4838]: I1128 10:03:11.018745 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="74b81264-e855-4198-a063-9ef62eb9ad30" containerName="route-controller-manager" Nov 28 10:03:11 crc kubenswrapper[4838]: E1128 10:03:11.018760 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e35b5813-d7b9-4cbc-b002-44d465476046" containerName="extract-utilities" Nov 28 10:03:11 crc kubenswrapper[4838]: I1128 10:03:11.018771 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="e35b5813-d7b9-4cbc-b002-44d465476046" containerName="extract-utilities" Nov 28 10:03:11 crc kubenswrapper[4838]: E1128 10:03:11.018789 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e35b5813-d7b9-4cbc-b002-44d465476046" containerName="extract-content" Nov 28 10:03:11 crc kubenswrapper[4838]: I1128 10:03:11.018801 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="e35b5813-d7b9-4cbc-b002-44d465476046" containerName="extract-content" Nov 28 10:03:11 crc kubenswrapper[4838]: I1128 10:03:11.018994 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="d14771c4-48e3-4efe-a5f2-31331a30979f" containerName="registry-server" Nov 28 10:03:11 crc kubenswrapper[4838]: I1128 10:03:11.019016 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="37cf39e1-d5d9-4b5d-a80b-5f9e371ebf2b" containerName="installer" Nov 28 10:03:11 crc kubenswrapper[4838]: I1128 10:03:11.019036 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="74b81264-e855-4198-a063-9ef62eb9ad30" containerName="route-controller-manager" Nov 28 10:03:11 crc kubenswrapper[4838]: I1128 10:03:11.019053 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="cc483880-bf40-4f4c-bf77-52eb4896bd5b" containerName="controller-manager" Nov 28 10:03:11 crc kubenswrapper[4838]: I1128 10:03:11.019066 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="e35b5813-d7b9-4cbc-b002-44d465476046" containerName="registry-server" Nov 28 10:03:11 crc kubenswrapper[4838]: I1128 10:03:11.019085 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Nov 28 10:03:11 crc kubenswrapper[4838]: I1128 10:03:11.019694 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-5b9c6669fd-t6424" Nov 28 10:03:11 crc kubenswrapper[4838]: I1128 10:03:11.024054 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Nov 28 10:03:11 crc kubenswrapper[4838]: I1128 10:03:11.024195 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Nov 28 10:03:11 crc kubenswrapper[4838]: I1128 10:03:11.024672 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Nov 28 10:03:11 crc kubenswrapper[4838]: I1128 10:03:11.025239 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Nov 28 10:03:11 crc kubenswrapper[4838]: I1128 10:03:11.026785 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Nov 28 10:03:11 crc kubenswrapper[4838]: I1128 10:03:11.028196 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Nov 28 10:03:11 crc kubenswrapper[4838]: I1128 10:03:11.031630 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6b9fb897f9-dkfft"] Nov 28 10:03:11 crc kubenswrapper[4838]: I1128 10:03:11.035503 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Nov 28 10:03:11 crc kubenswrapper[4838]: I1128 10:03:11.048458 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6b9fb897f9-dkfft" Nov 28 10:03:11 crc kubenswrapper[4838]: I1128 10:03:11.058002 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-5b9c6669fd-t6424"] Nov 28 10:03:11 crc kubenswrapper[4838]: I1128 10:03:11.087313 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Nov 28 10:03:11 crc kubenswrapper[4838]: I1128 10:03:11.097642 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6b9fb897f9-dkfft"] Nov 28 10:03:11 crc kubenswrapper[4838]: I1128 10:03:11.102109 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Nov 28 10:03:11 crc kubenswrapper[4838]: I1128 10:03:11.102559 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Nov 28 10:03:11 crc kubenswrapper[4838]: I1128 10:03:11.103918 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Nov 28 10:03:11 crc kubenswrapper[4838]: I1128 10:03:11.104118 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Nov 28 10:03:11 crc kubenswrapper[4838]: I1128 10:03:11.104473 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Nov 28 10:03:11 crc kubenswrapper[4838]: I1128 10:03:11.153348 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/59190323-fcec-4e06-8cfa-0fdf052aa4f3-serving-cert\") pod \"controller-manager-5b9c6669fd-t6424\" (UID: \"59190323-fcec-4e06-8cfa-0fdf052aa4f3\") " pod="openshift-controller-manager/controller-manager-5b9c6669fd-t6424" Nov 28 10:03:11 crc kubenswrapper[4838]: I1128 10:03:11.153437 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/59190323-fcec-4e06-8cfa-0fdf052aa4f3-proxy-ca-bundles\") pod \"controller-manager-5b9c6669fd-t6424\" (UID: \"59190323-fcec-4e06-8cfa-0fdf052aa4f3\") " pod="openshift-controller-manager/controller-manager-5b9c6669fd-t6424" Nov 28 10:03:11 crc kubenswrapper[4838]: I1128 10:03:11.153467 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/d465e42f-09ca-4a23-8984-7d6318adfa15-client-ca\") pod \"route-controller-manager-6b9fb897f9-dkfft\" (UID: \"d465e42f-09ca-4a23-8984-7d6318adfa15\") " pod="openshift-route-controller-manager/route-controller-manager-6b9fb897f9-dkfft" Nov 28 10:03:11 crc kubenswrapper[4838]: I1128 10:03:11.153489 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d465e42f-09ca-4a23-8984-7d6318adfa15-serving-cert\") pod \"route-controller-manager-6b9fb897f9-dkfft\" (UID: \"d465e42f-09ca-4a23-8984-7d6318adfa15\") " pod="openshift-route-controller-manager/route-controller-manager-6b9fb897f9-dkfft" Nov 28 10:03:11 crc kubenswrapper[4838]: I1128 10:03:11.153511 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7c5sp\" (UniqueName: \"kubernetes.io/projected/d465e42f-09ca-4a23-8984-7d6318adfa15-kube-api-access-7c5sp\") pod \"route-controller-manager-6b9fb897f9-dkfft\" (UID: \"d465e42f-09ca-4a23-8984-7d6318adfa15\") " pod="openshift-route-controller-manager/route-controller-manager-6b9fb897f9-dkfft" Nov 28 10:03:11 crc kubenswrapper[4838]: I1128 10:03:11.153542 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/59190323-fcec-4e06-8cfa-0fdf052aa4f3-config\") pod \"controller-manager-5b9c6669fd-t6424\" (UID: \"59190323-fcec-4e06-8cfa-0fdf052aa4f3\") " pod="openshift-controller-manager/controller-manager-5b9c6669fd-t6424" Nov 28 10:03:11 crc kubenswrapper[4838]: I1128 10:03:11.153564 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/59190323-fcec-4e06-8cfa-0fdf052aa4f3-client-ca\") pod \"controller-manager-5b9c6669fd-t6424\" (UID: \"59190323-fcec-4e06-8cfa-0fdf052aa4f3\") " pod="openshift-controller-manager/controller-manager-5b9c6669fd-t6424" Nov 28 10:03:11 crc kubenswrapper[4838]: I1128 10:03:11.153586 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r8xjc\" (UniqueName: \"kubernetes.io/projected/59190323-fcec-4e06-8cfa-0fdf052aa4f3-kube-api-access-r8xjc\") pod \"controller-manager-5b9c6669fd-t6424\" (UID: \"59190323-fcec-4e06-8cfa-0fdf052aa4f3\") " pod="openshift-controller-manager/controller-manager-5b9c6669fd-t6424" Nov 28 10:03:11 crc kubenswrapper[4838]: I1128 10:03:11.153613 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d465e42f-09ca-4a23-8984-7d6318adfa15-config\") pod \"route-controller-manager-6b9fb897f9-dkfft\" (UID: \"d465e42f-09ca-4a23-8984-7d6318adfa15\") " pod="openshift-route-controller-manager/route-controller-manager-6b9fb897f9-dkfft" Nov 28 10:03:11 crc kubenswrapper[4838]: I1128 10:03:11.255360 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/59190323-fcec-4e06-8cfa-0fdf052aa4f3-proxy-ca-bundles\") pod \"controller-manager-5b9c6669fd-t6424\" (UID: \"59190323-fcec-4e06-8cfa-0fdf052aa4f3\") " pod="openshift-controller-manager/controller-manager-5b9c6669fd-t6424" Nov 28 10:03:11 crc kubenswrapper[4838]: I1128 10:03:11.255439 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/d465e42f-09ca-4a23-8984-7d6318adfa15-client-ca\") pod \"route-controller-manager-6b9fb897f9-dkfft\" (UID: \"d465e42f-09ca-4a23-8984-7d6318adfa15\") " pod="openshift-route-controller-manager/route-controller-manager-6b9fb897f9-dkfft" Nov 28 10:03:11 crc kubenswrapper[4838]: I1128 10:03:11.255474 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7c5sp\" (UniqueName: \"kubernetes.io/projected/d465e42f-09ca-4a23-8984-7d6318adfa15-kube-api-access-7c5sp\") pod \"route-controller-manager-6b9fb897f9-dkfft\" (UID: \"d465e42f-09ca-4a23-8984-7d6318adfa15\") " pod="openshift-route-controller-manager/route-controller-manager-6b9fb897f9-dkfft" Nov 28 10:03:11 crc kubenswrapper[4838]: I1128 10:03:11.255502 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d465e42f-09ca-4a23-8984-7d6318adfa15-serving-cert\") pod \"route-controller-manager-6b9fb897f9-dkfft\" (UID: \"d465e42f-09ca-4a23-8984-7d6318adfa15\") " pod="openshift-route-controller-manager/route-controller-manager-6b9fb897f9-dkfft" Nov 28 10:03:11 crc kubenswrapper[4838]: I1128 10:03:11.255537 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/59190323-fcec-4e06-8cfa-0fdf052aa4f3-config\") pod \"controller-manager-5b9c6669fd-t6424\" (UID: \"59190323-fcec-4e06-8cfa-0fdf052aa4f3\") " pod="openshift-controller-manager/controller-manager-5b9c6669fd-t6424" Nov 28 10:03:11 crc kubenswrapper[4838]: I1128 10:03:11.255563 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/59190323-fcec-4e06-8cfa-0fdf052aa4f3-client-ca\") pod \"controller-manager-5b9c6669fd-t6424\" (UID: \"59190323-fcec-4e06-8cfa-0fdf052aa4f3\") " pod="openshift-controller-manager/controller-manager-5b9c6669fd-t6424" Nov 28 10:03:11 crc kubenswrapper[4838]: I1128 10:03:11.255588 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r8xjc\" (UniqueName: \"kubernetes.io/projected/59190323-fcec-4e06-8cfa-0fdf052aa4f3-kube-api-access-r8xjc\") pod \"controller-manager-5b9c6669fd-t6424\" (UID: \"59190323-fcec-4e06-8cfa-0fdf052aa4f3\") " pod="openshift-controller-manager/controller-manager-5b9c6669fd-t6424" Nov 28 10:03:11 crc kubenswrapper[4838]: I1128 10:03:11.255620 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d465e42f-09ca-4a23-8984-7d6318adfa15-config\") pod \"route-controller-manager-6b9fb897f9-dkfft\" (UID: \"d465e42f-09ca-4a23-8984-7d6318adfa15\") " pod="openshift-route-controller-manager/route-controller-manager-6b9fb897f9-dkfft" Nov 28 10:03:11 crc kubenswrapper[4838]: I1128 10:03:11.256662 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/d465e42f-09ca-4a23-8984-7d6318adfa15-client-ca\") pod \"route-controller-manager-6b9fb897f9-dkfft\" (UID: \"d465e42f-09ca-4a23-8984-7d6318adfa15\") " pod="openshift-route-controller-manager/route-controller-manager-6b9fb897f9-dkfft" Nov 28 10:03:11 crc kubenswrapper[4838]: I1128 10:03:11.257352 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/59190323-fcec-4e06-8cfa-0fdf052aa4f3-client-ca\") pod \"controller-manager-5b9c6669fd-t6424\" (UID: \"59190323-fcec-4e06-8cfa-0fdf052aa4f3\") " pod="openshift-controller-manager/controller-manager-5b9c6669fd-t6424" Nov 28 10:03:11 crc kubenswrapper[4838]: I1128 10:03:11.257551 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d465e42f-09ca-4a23-8984-7d6318adfa15-config\") pod \"route-controller-manager-6b9fb897f9-dkfft\" (UID: \"d465e42f-09ca-4a23-8984-7d6318adfa15\") " pod="openshift-route-controller-manager/route-controller-manager-6b9fb897f9-dkfft" Nov 28 10:03:11 crc kubenswrapper[4838]: I1128 10:03:11.255665 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/59190323-fcec-4e06-8cfa-0fdf052aa4f3-serving-cert\") pod \"controller-manager-5b9c6669fd-t6424\" (UID: \"59190323-fcec-4e06-8cfa-0fdf052aa4f3\") " pod="openshift-controller-manager/controller-manager-5b9c6669fd-t6424" Nov 28 10:03:11 crc kubenswrapper[4838]: I1128 10:03:11.258022 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/59190323-fcec-4e06-8cfa-0fdf052aa4f3-proxy-ca-bundles\") pod \"controller-manager-5b9c6669fd-t6424\" (UID: \"59190323-fcec-4e06-8cfa-0fdf052aa4f3\") " pod="openshift-controller-manager/controller-manager-5b9c6669fd-t6424" Nov 28 10:03:11 crc kubenswrapper[4838]: I1128 10:03:11.258468 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/59190323-fcec-4e06-8cfa-0fdf052aa4f3-config\") pod \"controller-manager-5b9c6669fd-t6424\" (UID: \"59190323-fcec-4e06-8cfa-0fdf052aa4f3\") " pod="openshift-controller-manager/controller-manager-5b9c6669fd-t6424" Nov 28 10:03:11 crc kubenswrapper[4838]: I1128 10:03:11.260837 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/59190323-fcec-4e06-8cfa-0fdf052aa4f3-serving-cert\") pod \"controller-manager-5b9c6669fd-t6424\" (UID: \"59190323-fcec-4e06-8cfa-0fdf052aa4f3\") " pod="openshift-controller-manager/controller-manager-5b9c6669fd-t6424" Nov 28 10:03:11 crc kubenswrapper[4838]: I1128 10:03:11.261215 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d465e42f-09ca-4a23-8984-7d6318adfa15-serving-cert\") pod \"route-controller-manager-6b9fb897f9-dkfft\" (UID: \"d465e42f-09ca-4a23-8984-7d6318adfa15\") " pod="openshift-route-controller-manager/route-controller-manager-6b9fb897f9-dkfft" Nov 28 10:03:11 crc kubenswrapper[4838]: I1128 10:03:11.280428 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r8xjc\" (UniqueName: \"kubernetes.io/projected/59190323-fcec-4e06-8cfa-0fdf052aa4f3-kube-api-access-r8xjc\") pod \"controller-manager-5b9c6669fd-t6424\" (UID: \"59190323-fcec-4e06-8cfa-0fdf052aa4f3\") " pod="openshift-controller-manager/controller-manager-5b9c6669fd-t6424" Nov 28 10:03:11 crc kubenswrapper[4838]: I1128 10:03:11.287370 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7c5sp\" (UniqueName: \"kubernetes.io/projected/d465e42f-09ca-4a23-8984-7d6318adfa15-kube-api-access-7c5sp\") pod \"route-controller-manager-6b9fb897f9-dkfft\" (UID: \"d465e42f-09ca-4a23-8984-7d6318adfa15\") " pod="openshift-route-controller-manager/route-controller-manager-6b9fb897f9-dkfft" Nov 28 10:03:11 crc kubenswrapper[4838]: I1128 10:03:11.392619 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-5b9c6669fd-t6424" Nov 28 10:03:11 crc kubenswrapper[4838]: I1128 10:03:11.411897 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6b9fb897f9-dkfft" Nov 28 10:03:11 crc kubenswrapper[4838]: W1128 10:03:11.677865 4838 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod59190323_fcec_4e06_8cfa_0fdf052aa4f3.slice/crio-5548264fcc09705045bafc48659c4e12473358d970dfff0d5ac2758e2ca7b282 WatchSource:0}: Error finding container 5548264fcc09705045bafc48659c4e12473358d970dfff0d5ac2758e2ca7b282: Status 404 returned error can't find the container with id 5548264fcc09705045bafc48659c4e12473358d970dfff0d5ac2758e2ca7b282 Nov 28 10:03:11 crc kubenswrapper[4838]: I1128 10:03:11.678902 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-5b9c6669fd-t6424"] Nov 28 10:03:11 crc kubenswrapper[4838]: I1128 10:03:11.731246 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6b9fb897f9-dkfft"] Nov 28 10:03:12 crc kubenswrapper[4838]: I1128 10:03:12.567954 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="74b81264-e855-4198-a063-9ef62eb9ad30" path="/var/lib/kubelet/pods/74b81264-e855-4198-a063-9ef62eb9ad30/volumes" Nov 28 10:03:12 crc kubenswrapper[4838]: I1128 10:03:12.568930 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cc483880-bf40-4f4c-bf77-52eb4896bd5b" path="/var/lib/kubelet/pods/cc483880-bf40-4f4c-bf77-52eb4896bd5b/volumes" Nov 28 10:03:12 crc kubenswrapper[4838]: I1128 10:03:12.661316 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-5b9c6669fd-t6424" event={"ID":"59190323-fcec-4e06-8cfa-0fdf052aa4f3","Type":"ContainerStarted","Data":"b4cd449f519aaec4f9c27f63962ba8cf2dc2abafbe14e591c30f8bf046770ca0"} Nov 28 10:03:12 crc kubenswrapper[4838]: I1128 10:03:12.661366 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-5b9c6669fd-t6424" event={"ID":"59190323-fcec-4e06-8cfa-0fdf052aa4f3","Type":"ContainerStarted","Data":"5548264fcc09705045bafc48659c4e12473358d970dfff0d5ac2758e2ca7b282"} Nov 28 10:03:12 crc kubenswrapper[4838]: I1128 10:03:12.661523 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-5b9c6669fd-t6424" Nov 28 10:03:12 crc kubenswrapper[4838]: I1128 10:03:12.663597 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6b9fb897f9-dkfft" event={"ID":"d465e42f-09ca-4a23-8984-7d6318adfa15","Type":"ContainerStarted","Data":"8bd100c4b985977f47df2eca5efb7ce51ed706ad08c00d7ac6a9ab072f11a8a1"} Nov 28 10:03:12 crc kubenswrapper[4838]: I1128 10:03:12.663651 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6b9fb897f9-dkfft" event={"ID":"d465e42f-09ca-4a23-8984-7d6318adfa15","Type":"ContainerStarted","Data":"a96db3c43b37a4e30008f9f5eb1e5b7a0c3f1560bab311ac0ab5440292ef6282"} Nov 28 10:03:12 crc kubenswrapper[4838]: I1128 10:03:12.663827 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-6b9fb897f9-dkfft" Nov 28 10:03:12 crc kubenswrapper[4838]: I1128 10:03:12.666281 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-5b9c6669fd-t6424" Nov 28 10:03:12 crc kubenswrapper[4838]: I1128 10:03:12.668810 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-6b9fb897f9-dkfft" Nov 28 10:03:12 crc kubenswrapper[4838]: I1128 10:03:12.681382 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-5b9c6669fd-t6424" podStartSLOduration=3.681366057 podStartE2EDuration="3.681366057s" podCreationTimestamp="2025-11-28 10:03:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 10:03:12.678003697 +0000 UTC m=+364.376977877" watchObservedRunningTime="2025-11-28 10:03:12.681366057 +0000 UTC m=+364.380340227" Nov 28 10:03:12 crc kubenswrapper[4838]: I1128 10:03:12.726471 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-6b9fb897f9-dkfft" podStartSLOduration=3.726455379 podStartE2EDuration="3.726455379s" podCreationTimestamp="2025-11-28 10:03:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 10:03:12.723746377 +0000 UTC m=+364.422720557" watchObservedRunningTime="2025-11-28 10:03:12.726455379 +0000 UTC m=+364.425429549" Nov 28 10:03:15 crc kubenswrapper[4838]: I1128 10:03:15.366692 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-5b9c6669fd-t6424"] Nov 28 10:03:15 crc kubenswrapper[4838]: I1128 10:03:15.379448 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6b9fb897f9-dkfft"] Nov 28 10:03:15 crc kubenswrapper[4838]: I1128 10:03:15.682558 4838 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-6b9fb897f9-dkfft" podUID="d465e42f-09ca-4a23-8984-7d6318adfa15" containerName="route-controller-manager" containerID="cri-o://8bd100c4b985977f47df2eca5efb7ce51ed706ad08c00d7ac6a9ab072f11a8a1" gracePeriod=30 Nov 28 10:03:15 crc kubenswrapper[4838]: I1128 10:03:15.682652 4838 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-5b9c6669fd-t6424" podUID="59190323-fcec-4e06-8cfa-0fdf052aa4f3" containerName="controller-manager" containerID="cri-o://b4cd449f519aaec4f9c27f63962ba8cf2dc2abafbe14e591c30f8bf046770ca0" gracePeriod=30 Nov 28 10:03:16 crc kubenswrapper[4838]: I1128 10:03:16.173041 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6b9fb897f9-dkfft" Nov 28 10:03:16 crc kubenswrapper[4838]: I1128 10:03:16.179253 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-5b9c6669fd-t6424" Nov 28 10:03:16 crc kubenswrapper[4838]: I1128 10:03:16.220418 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/d465e42f-09ca-4a23-8984-7d6318adfa15-client-ca\") pod \"d465e42f-09ca-4a23-8984-7d6318adfa15\" (UID: \"d465e42f-09ca-4a23-8984-7d6318adfa15\") " Nov 28 10:03:16 crc kubenswrapper[4838]: I1128 10:03:16.220559 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7c5sp\" (UniqueName: \"kubernetes.io/projected/d465e42f-09ca-4a23-8984-7d6318adfa15-kube-api-access-7c5sp\") pod \"d465e42f-09ca-4a23-8984-7d6318adfa15\" (UID: \"d465e42f-09ca-4a23-8984-7d6318adfa15\") " Nov 28 10:03:16 crc kubenswrapper[4838]: I1128 10:03:16.220588 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d465e42f-09ca-4a23-8984-7d6318adfa15-config\") pod \"d465e42f-09ca-4a23-8984-7d6318adfa15\" (UID: \"d465e42f-09ca-4a23-8984-7d6318adfa15\") " Nov 28 10:03:16 crc kubenswrapper[4838]: I1128 10:03:16.220675 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d465e42f-09ca-4a23-8984-7d6318adfa15-serving-cert\") pod \"d465e42f-09ca-4a23-8984-7d6318adfa15\" (UID: \"d465e42f-09ca-4a23-8984-7d6318adfa15\") " Nov 28 10:03:16 crc kubenswrapper[4838]: I1128 10:03:16.221828 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d465e42f-09ca-4a23-8984-7d6318adfa15-client-ca" (OuterVolumeSpecName: "client-ca") pod "d465e42f-09ca-4a23-8984-7d6318adfa15" (UID: "d465e42f-09ca-4a23-8984-7d6318adfa15"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 10:03:16 crc kubenswrapper[4838]: I1128 10:03:16.222279 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d465e42f-09ca-4a23-8984-7d6318adfa15-config" (OuterVolumeSpecName: "config") pod "d465e42f-09ca-4a23-8984-7d6318adfa15" (UID: "d465e42f-09ca-4a23-8984-7d6318adfa15"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 10:03:16 crc kubenswrapper[4838]: I1128 10:03:16.229945 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d465e42f-09ca-4a23-8984-7d6318adfa15-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "d465e42f-09ca-4a23-8984-7d6318adfa15" (UID: "d465e42f-09ca-4a23-8984-7d6318adfa15"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:03:16 crc kubenswrapper[4838]: I1128 10:03:16.230966 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d465e42f-09ca-4a23-8984-7d6318adfa15-kube-api-access-7c5sp" (OuterVolumeSpecName: "kube-api-access-7c5sp") pod "d465e42f-09ca-4a23-8984-7d6318adfa15" (UID: "d465e42f-09ca-4a23-8984-7d6318adfa15"). InnerVolumeSpecName "kube-api-access-7c5sp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:03:16 crc kubenswrapper[4838]: I1128 10:03:16.321392 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/59190323-fcec-4e06-8cfa-0fdf052aa4f3-client-ca\") pod \"59190323-fcec-4e06-8cfa-0fdf052aa4f3\" (UID: \"59190323-fcec-4e06-8cfa-0fdf052aa4f3\") " Nov 28 10:03:16 crc kubenswrapper[4838]: I1128 10:03:16.321479 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/59190323-fcec-4e06-8cfa-0fdf052aa4f3-config\") pod \"59190323-fcec-4e06-8cfa-0fdf052aa4f3\" (UID: \"59190323-fcec-4e06-8cfa-0fdf052aa4f3\") " Nov 28 10:03:16 crc kubenswrapper[4838]: I1128 10:03:16.321590 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/59190323-fcec-4e06-8cfa-0fdf052aa4f3-proxy-ca-bundles\") pod \"59190323-fcec-4e06-8cfa-0fdf052aa4f3\" (UID: \"59190323-fcec-4e06-8cfa-0fdf052aa4f3\") " Nov 28 10:03:16 crc kubenswrapper[4838]: I1128 10:03:16.321665 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-r8xjc\" (UniqueName: \"kubernetes.io/projected/59190323-fcec-4e06-8cfa-0fdf052aa4f3-kube-api-access-r8xjc\") pod \"59190323-fcec-4e06-8cfa-0fdf052aa4f3\" (UID: \"59190323-fcec-4e06-8cfa-0fdf052aa4f3\") " Nov 28 10:03:16 crc kubenswrapper[4838]: I1128 10:03:16.321752 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/59190323-fcec-4e06-8cfa-0fdf052aa4f3-serving-cert\") pod \"59190323-fcec-4e06-8cfa-0fdf052aa4f3\" (UID: \"59190323-fcec-4e06-8cfa-0fdf052aa4f3\") " Nov 28 10:03:16 crc kubenswrapper[4838]: I1128 10:03:16.322113 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7c5sp\" (UniqueName: \"kubernetes.io/projected/d465e42f-09ca-4a23-8984-7d6318adfa15-kube-api-access-7c5sp\") on node \"crc\" DevicePath \"\"" Nov 28 10:03:16 crc kubenswrapper[4838]: I1128 10:03:16.322166 4838 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d465e42f-09ca-4a23-8984-7d6318adfa15-config\") on node \"crc\" DevicePath \"\"" Nov 28 10:03:16 crc kubenswrapper[4838]: I1128 10:03:16.322188 4838 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d465e42f-09ca-4a23-8984-7d6318adfa15-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 10:03:16 crc kubenswrapper[4838]: I1128 10:03:16.322209 4838 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/d465e42f-09ca-4a23-8984-7d6318adfa15-client-ca\") on node \"crc\" DevicePath \"\"" Nov 28 10:03:16 crc kubenswrapper[4838]: I1128 10:03:16.322513 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/59190323-fcec-4e06-8cfa-0fdf052aa4f3-client-ca" (OuterVolumeSpecName: "client-ca") pod "59190323-fcec-4e06-8cfa-0fdf052aa4f3" (UID: "59190323-fcec-4e06-8cfa-0fdf052aa4f3"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 10:03:16 crc kubenswrapper[4838]: I1128 10:03:16.322541 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/59190323-fcec-4e06-8cfa-0fdf052aa4f3-config" (OuterVolumeSpecName: "config") pod "59190323-fcec-4e06-8cfa-0fdf052aa4f3" (UID: "59190323-fcec-4e06-8cfa-0fdf052aa4f3"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 10:03:16 crc kubenswrapper[4838]: I1128 10:03:16.322962 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/59190323-fcec-4e06-8cfa-0fdf052aa4f3-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "59190323-fcec-4e06-8cfa-0fdf052aa4f3" (UID: "59190323-fcec-4e06-8cfa-0fdf052aa4f3"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 10:03:16 crc kubenswrapper[4838]: I1128 10:03:16.325188 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/59190323-fcec-4e06-8cfa-0fdf052aa4f3-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "59190323-fcec-4e06-8cfa-0fdf052aa4f3" (UID: "59190323-fcec-4e06-8cfa-0fdf052aa4f3"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:03:16 crc kubenswrapper[4838]: I1128 10:03:16.326045 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/59190323-fcec-4e06-8cfa-0fdf052aa4f3-kube-api-access-r8xjc" (OuterVolumeSpecName: "kube-api-access-r8xjc") pod "59190323-fcec-4e06-8cfa-0fdf052aa4f3" (UID: "59190323-fcec-4e06-8cfa-0fdf052aa4f3"). InnerVolumeSpecName "kube-api-access-r8xjc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:03:16 crc kubenswrapper[4838]: I1128 10:03:16.424107 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-r8xjc\" (UniqueName: \"kubernetes.io/projected/59190323-fcec-4e06-8cfa-0fdf052aa4f3-kube-api-access-r8xjc\") on node \"crc\" DevicePath \"\"" Nov 28 10:03:16 crc kubenswrapper[4838]: I1128 10:03:16.424166 4838 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/59190323-fcec-4e06-8cfa-0fdf052aa4f3-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 10:03:16 crc kubenswrapper[4838]: I1128 10:03:16.424192 4838 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/59190323-fcec-4e06-8cfa-0fdf052aa4f3-client-ca\") on node \"crc\" DevicePath \"\"" Nov 28 10:03:16 crc kubenswrapper[4838]: I1128 10:03:16.424215 4838 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/59190323-fcec-4e06-8cfa-0fdf052aa4f3-config\") on node \"crc\" DevicePath \"\"" Nov 28 10:03:16 crc kubenswrapper[4838]: I1128 10:03:16.424231 4838 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/59190323-fcec-4e06-8cfa-0fdf052aa4f3-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Nov 28 10:03:16 crc kubenswrapper[4838]: I1128 10:03:16.691217 4838 generic.go:334] "Generic (PLEG): container finished" podID="59190323-fcec-4e06-8cfa-0fdf052aa4f3" containerID="b4cd449f519aaec4f9c27f63962ba8cf2dc2abafbe14e591c30f8bf046770ca0" exitCode=0 Nov 28 10:03:16 crc kubenswrapper[4838]: I1128 10:03:16.692262 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-5b9c6669fd-t6424" event={"ID":"59190323-fcec-4e06-8cfa-0fdf052aa4f3","Type":"ContainerDied","Data":"b4cd449f519aaec4f9c27f63962ba8cf2dc2abafbe14e591c30f8bf046770ca0"} Nov 28 10:03:16 crc kubenswrapper[4838]: I1128 10:03:16.692555 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-5b9c6669fd-t6424" event={"ID":"59190323-fcec-4e06-8cfa-0fdf052aa4f3","Type":"ContainerDied","Data":"5548264fcc09705045bafc48659c4e12473358d970dfff0d5ac2758e2ca7b282"} Nov 28 10:03:16 crc kubenswrapper[4838]: I1128 10:03:16.692831 4838 scope.go:117] "RemoveContainer" containerID="b4cd449f519aaec4f9c27f63962ba8cf2dc2abafbe14e591c30f8bf046770ca0" Nov 28 10:03:16 crc kubenswrapper[4838]: I1128 10:03:16.694001 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-5b9c6669fd-t6424" Nov 28 10:03:16 crc kubenswrapper[4838]: I1128 10:03:16.699566 4838 generic.go:334] "Generic (PLEG): container finished" podID="d465e42f-09ca-4a23-8984-7d6318adfa15" containerID="8bd100c4b985977f47df2eca5efb7ce51ed706ad08c00d7ac6a9ab072f11a8a1" exitCode=0 Nov 28 10:03:16 crc kubenswrapper[4838]: I1128 10:03:16.699625 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6b9fb897f9-dkfft" event={"ID":"d465e42f-09ca-4a23-8984-7d6318adfa15","Type":"ContainerDied","Data":"8bd100c4b985977f47df2eca5efb7ce51ed706ad08c00d7ac6a9ab072f11a8a1"} Nov 28 10:03:16 crc kubenswrapper[4838]: I1128 10:03:16.699662 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6b9fb897f9-dkfft" event={"ID":"d465e42f-09ca-4a23-8984-7d6318adfa15","Type":"ContainerDied","Data":"a96db3c43b37a4e30008f9f5eb1e5b7a0c3f1560bab311ac0ab5440292ef6282"} Nov 28 10:03:16 crc kubenswrapper[4838]: I1128 10:03:16.699795 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6b9fb897f9-dkfft" Nov 28 10:03:16 crc kubenswrapper[4838]: I1128 10:03:16.721926 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-5b9c6669fd-t6424"] Nov 28 10:03:16 crc kubenswrapper[4838]: I1128 10:03:16.723857 4838 scope.go:117] "RemoveContainer" containerID="b4cd449f519aaec4f9c27f63962ba8cf2dc2abafbe14e591c30f8bf046770ca0" Nov 28 10:03:16 crc kubenswrapper[4838]: E1128 10:03:16.727333 4838 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b4cd449f519aaec4f9c27f63962ba8cf2dc2abafbe14e591c30f8bf046770ca0\": container with ID starting with b4cd449f519aaec4f9c27f63962ba8cf2dc2abafbe14e591c30f8bf046770ca0 not found: ID does not exist" containerID="b4cd449f519aaec4f9c27f63962ba8cf2dc2abafbe14e591c30f8bf046770ca0" Nov 28 10:03:16 crc kubenswrapper[4838]: I1128 10:03:16.727402 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b4cd449f519aaec4f9c27f63962ba8cf2dc2abafbe14e591c30f8bf046770ca0"} err="failed to get container status \"b4cd449f519aaec4f9c27f63962ba8cf2dc2abafbe14e591c30f8bf046770ca0\": rpc error: code = NotFound desc = could not find container \"b4cd449f519aaec4f9c27f63962ba8cf2dc2abafbe14e591c30f8bf046770ca0\": container with ID starting with b4cd449f519aaec4f9c27f63962ba8cf2dc2abafbe14e591c30f8bf046770ca0 not found: ID does not exist" Nov 28 10:03:16 crc kubenswrapper[4838]: I1128 10:03:16.727441 4838 scope.go:117] "RemoveContainer" containerID="8bd100c4b985977f47df2eca5efb7ce51ed706ad08c00d7ac6a9ab072f11a8a1" Nov 28 10:03:16 crc kubenswrapper[4838]: I1128 10:03:16.730143 4838 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-5b9c6669fd-t6424"] Nov 28 10:03:16 crc kubenswrapper[4838]: I1128 10:03:16.737485 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6b9fb897f9-dkfft"] Nov 28 10:03:16 crc kubenswrapper[4838]: I1128 10:03:16.744694 4838 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6b9fb897f9-dkfft"] Nov 28 10:03:16 crc kubenswrapper[4838]: I1128 10:03:16.751507 4838 scope.go:117] "RemoveContainer" containerID="8bd100c4b985977f47df2eca5efb7ce51ed706ad08c00d7ac6a9ab072f11a8a1" Nov 28 10:03:16 crc kubenswrapper[4838]: E1128 10:03:16.752225 4838 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8bd100c4b985977f47df2eca5efb7ce51ed706ad08c00d7ac6a9ab072f11a8a1\": container with ID starting with 8bd100c4b985977f47df2eca5efb7ce51ed706ad08c00d7ac6a9ab072f11a8a1 not found: ID does not exist" containerID="8bd100c4b985977f47df2eca5efb7ce51ed706ad08c00d7ac6a9ab072f11a8a1" Nov 28 10:03:16 crc kubenswrapper[4838]: I1128 10:03:16.752491 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8bd100c4b985977f47df2eca5efb7ce51ed706ad08c00d7ac6a9ab072f11a8a1"} err="failed to get container status \"8bd100c4b985977f47df2eca5efb7ce51ed706ad08c00d7ac6a9ab072f11a8a1\": rpc error: code = NotFound desc = could not find container \"8bd100c4b985977f47df2eca5efb7ce51ed706ad08c00d7ac6a9ab072f11a8a1\": container with ID starting with 8bd100c4b985977f47df2eca5efb7ce51ed706ad08c00d7ac6a9ab072f11a8a1 not found: ID does not exist" Nov 28 10:03:17 crc kubenswrapper[4838]: I1128 10:03:17.021664 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-7dc98d9d7-kdgkw"] Nov 28 10:03:17 crc kubenswrapper[4838]: E1128 10:03:17.022590 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="59190323-fcec-4e06-8cfa-0fdf052aa4f3" containerName="controller-manager" Nov 28 10:03:17 crc kubenswrapper[4838]: I1128 10:03:17.022616 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="59190323-fcec-4e06-8cfa-0fdf052aa4f3" containerName="controller-manager" Nov 28 10:03:17 crc kubenswrapper[4838]: E1128 10:03:17.022656 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d465e42f-09ca-4a23-8984-7d6318adfa15" containerName="route-controller-manager" Nov 28 10:03:17 crc kubenswrapper[4838]: I1128 10:03:17.022669 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="d465e42f-09ca-4a23-8984-7d6318adfa15" containerName="route-controller-manager" Nov 28 10:03:17 crc kubenswrapper[4838]: I1128 10:03:17.022856 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="d465e42f-09ca-4a23-8984-7d6318adfa15" containerName="route-controller-manager" Nov 28 10:03:17 crc kubenswrapper[4838]: I1128 10:03:17.022881 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="59190323-fcec-4e06-8cfa-0fdf052aa4f3" containerName="controller-manager" Nov 28 10:03:17 crc kubenswrapper[4838]: I1128 10:03:17.023407 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-7dc98d9d7-kdgkw" Nov 28 10:03:17 crc kubenswrapper[4838]: I1128 10:03:17.026268 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Nov 28 10:03:17 crc kubenswrapper[4838]: I1128 10:03:17.026615 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Nov 28 10:03:17 crc kubenswrapper[4838]: I1128 10:03:17.027352 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Nov 28 10:03:17 crc kubenswrapper[4838]: I1128 10:03:17.028344 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Nov 28 10:03:17 crc kubenswrapper[4838]: I1128 10:03:17.028806 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Nov 28 10:03:17 crc kubenswrapper[4838]: I1128 10:03:17.029058 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Nov 28 10:03:17 crc kubenswrapper[4838]: I1128 10:03:17.040613 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-5495bb6774-p9mf7"] Nov 28 10:03:17 crc kubenswrapper[4838]: I1128 10:03:17.041901 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5495bb6774-p9mf7" Nov 28 10:03:17 crc kubenswrapper[4838]: I1128 10:03:17.044602 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Nov 28 10:03:17 crc kubenswrapper[4838]: I1128 10:03:17.044857 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Nov 28 10:03:17 crc kubenswrapper[4838]: I1128 10:03:17.045040 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Nov 28 10:03:17 crc kubenswrapper[4838]: I1128 10:03:17.045142 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Nov 28 10:03:17 crc kubenswrapper[4838]: I1128 10:03:17.045464 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Nov 28 10:03:17 crc kubenswrapper[4838]: I1128 10:03:17.045523 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Nov 28 10:03:17 crc kubenswrapper[4838]: I1128 10:03:17.045599 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Nov 28 10:03:17 crc kubenswrapper[4838]: I1128 10:03:17.049913 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-5495bb6774-p9mf7"] Nov 28 10:03:17 crc kubenswrapper[4838]: I1128 10:03:17.053964 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-7dc98d9d7-kdgkw"] Nov 28 10:03:17 crc kubenswrapper[4838]: I1128 10:03:17.135317 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ccb2f736-b88b-486a-9091-2d5ef579f1cb-serving-cert\") pod \"route-controller-manager-5495bb6774-p9mf7\" (UID: \"ccb2f736-b88b-486a-9091-2d5ef579f1cb\") " pod="openshift-route-controller-manager/route-controller-manager-5495bb6774-p9mf7" Nov 28 10:03:17 crc kubenswrapper[4838]: I1128 10:03:17.135398 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/3d21d238-2cf2-4b63-9fec-2babbc832c4f-client-ca\") pod \"controller-manager-7dc98d9d7-kdgkw\" (UID: \"3d21d238-2cf2-4b63-9fec-2babbc832c4f\") " pod="openshift-controller-manager/controller-manager-7dc98d9d7-kdgkw" Nov 28 10:03:17 crc kubenswrapper[4838]: I1128 10:03:17.135486 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/ccb2f736-b88b-486a-9091-2d5ef579f1cb-client-ca\") pod \"route-controller-manager-5495bb6774-p9mf7\" (UID: \"ccb2f736-b88b-486a-9091-2d5ef579f1cb\") " pod="openshift-route-controller-manager/route-controller-manager-5495bb6774-p9mf7" Nov 28 10:03:17 crc kubenswrapper[4838]: I1128 10:03:17.135549 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3d21d238-2cf2-4b63-9fec-2babbc832c4f-serving-cert\") pod \"controller-manager-7dc98d9d7-kdgkw\" (UID: \"3d21d238-2cf2-4b63-9fec-2babbc832c4f\") " pod="openshift-controller-manager/controller-manager-7dc98d9d7-kdgkw" Nov 28 10:03:17 crc kubenswrapper[4838]: I1128 10:03:17.135577 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/3d21d238-2cf2-4b63-9fec-2babbc832c4f-proxy-ca-bundles\") pod \"controller-manager-7dc98d9d7-kdgkw\" (UID: \"3d21d238-2cf2-4b63-9fec-2babbc832c4f\") " pod="openshift-controller-manager/controller-manager-7dc98d9d7-kdgkw" Nov 28 10:03:17 crc kubenswrapper[4838]: I1128 10:03:17.135605 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3d21d238-2cf2-4b63-9fec-2babbc832c4f-config\") pod \"controller-manager-7dc98d9d7-kdgkw\" (UID: \"3d21d238-2cf2-4b63-9fec-2babbc832c4f\") " pod="openshift-controller-manager/controller-manager-7dc98d9d7-kdgkw" Nov 28 10:03:17 crc kubenswrapper[4838]: I1128 10:03:17.135628 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zjxkk\" (UniqueName: \"kubernetes.io/projected/ccb2f736-b88b-486a-9091-2d5ef579f1cb-kube-api-access-zjxkk\") pod \"route-controller-manager-5495bb6774-p9mf7\" (UID: \"ccb2f736-b88b-486a-9091-2d5ef579f1cb\") " pod="openshift-route-controller-manager/route-controller-manager-5495bb6774-p9mf7" Nov 28 10:03:17 crc kubenswrapper[4838]: I1128 10:03:17.135662 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bls6k\" (UniqueName: \"kubernetes.io/projected/3d21d238-2cf2-4b63-9fec-2babbc832c4f-kube-api-access-bls6k\") pod \"controller-manager-7dc98d9d7-kdgkw\" (UID: \"3d21d238-2cf2-4b63-9fec-2babbc832c4f\") " pod="openshift-controller-manager/controller-manager-7dc98d9d7-kdgkw" Nov 28 10:03:17 crc kubenswrapper[4838]: I1128 10:03:17.135746 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ccb2f736-b88b-486a-9091-2d5ef579f1cb-config\") pod \"route-controller-manager-5495bb6774-p9mf7\" (UID: \"ccb2f736-b88b-486a-9091-2d5ef579f1cb\") " pod="openshift-route-controller-manager/route-controller-manager-5495bb6774-p9mf7" Nov 28 10:03:17 crc kubenswrapper[4838]: I1128 10:03:17.237066 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/3d21d238-2cf2-4b63-9fec-2babbc832c4f-client-ca\") pod \"controller-manager-7dc98d9d7-kdgkw\" (UID: \"3d21d238-2cf2-4b63-9fec-2babbc832c4f\") " pod="openshift-controller-manager/controller-manager-7dc98d9d7-kdgkw" Nov 28 10:03:17 crc kubenswrapper[4838]: I1128 10:03:17.237114 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/ccb2f736-b88b-486a-9091-2d5ef579f1cb-client-ca\") pod \"route-controller-manager-5495bb6774-p9mf7\" (UID: \"ccb2f736-b88b-486a-9091-2d5ef579f1cb\") " pod="openshift-route-controller-manager/route-controller-manager-5495bb6774-p9mf7" Nov 28 10:03:17 crc kubenswrapper[4838]: I1128 10:03:17.237150 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3d21d238-2cf2-4b63-9fec-2babbc832c4f-serving-cert\") pod \"controller-manager-7dc98d9d7-kdgkw\" (UID: \"3d21d238-2cf2-4b63-9fec-2babbc832c4f\") " pod="openshift-controller-manager/controller-manager-7dc98d9d7-kdgkw" Nov 28 10:03:17 crc kubenswrapper[4838]: I1128 10:03:17.237169 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/3d21d238-2cf2-4b63-9fec-2babbc832c4f-proxy-ca-bundles\") pod \"controller-manager-7dc98d9d7-kdgkw\" (UID: \"3d21d238-2cf2-4b63-9fec-2babbc832c4f\") " pod="openshift-controller-manager/controller-manager-7dc98d9d7-kdgkw" Nov 28 10:03:17 crc kubenswrapper[4838]: I1128 10:03:17.237199 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3d21d238-2cf2-4b63-9fec-2babbc832c4f-config\") pod \"controller-manager-7dc98d9d7-kdgkw\" (UID: \"3d21d238-2cf2-4b63-9fec-2babbc832c4f\") " pod="openshift-controller-manager/controller-manager-7dc98d9d7-kdgkw" Nov 28 10:03:17 crc kubenswrapper[4838]: I1128 10:03:17.237218 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zjxkk\" (UniqueName: \"kubernetes.io/projected/ccb2f736-b88b-486a-9091-2d5ef579f1cb-kube-api-access-zjxkk\") pod \"route-controller-manager-5495bb6774-p9mf7\" (UID: \"ccb2f736-b88b-486a-9091-2d5ef579f1cb\") " pod="openshift-route-controller-manager/route-controller-manager-5495bb6774-p9mf7" Nov 28 10:03:17 crc kubenswrapper[4838]: I1128 10:03:17.237246 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bls6k\" (UniqueName: \"kubernetes.io/projected/3d21d238-2cf2-4b63-9fec-2babbc832c4f-kube-api-access-bls6k\") pod \"controller-manager-7dc98d9d7-kdgkw\" (UID: \"3d21d238-2cf2-4b63-9fec-2babbc832c4f\") " pod="openshift-controller-manager/controller-manager-7dc98d9d7-kdgkw" Nov 28 10:03:17 crc kubenswrapper[4838]: I1128 10:03:17.237266 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ccb2f736-b88b-486a-9091-2d5ef579f1cb-config\") pod \"route-controller-manager-5495bb6774-p9mf7\" (UID: \"ccb2f736-b88b-486a-9091-2d5ef579f1cb\") " pod="openshift-route-controller-manager/route-controller-manager-5495bb6774-p9mf7" Nov 28 10:03:17 crc kubenswrapper[4838]: I1128 10:03:17.237284 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ccb2f736-b88b-486a-9091-2d5ef579f1cb-serving-cert\") pod \"route-controller-manager-5495bb6774-p9mf7\" (UID: \"ccb2f736-b88b-486a-9091-2d5ef579f1cb\") " pod="openshift-route-controller-manager/route-controller-manager-5495bb6774-p9mf7" Nov 28 10:03:17 crc kubenswrapper[4838]: I1128 10:03:17.238660 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ccb2f736-b88b-486a-9091-2d5ef579f1cb-config\") pod \"route-controller-manager-5495bb6774-p9mf7\" (UID: \"ccb2f736-b88b-486a-9091-2d5ef579f1cb\") " pod="openshift-route-controller-manager/route-controller-manager-5495bb6774-p9mf7" Nov 28 10:03:17 crc kubenswrapper[4838]: I1128 10:03:17.238761 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3d21d238-2cf2-4b63-9fec-2babbc832c4f-config\") pod \"controller-manager-7dc98d9d7-kdgkw\" (UID: \"3d21d238-2cf2-4b63-9fec-2babbc832c4f\") " pod="openshift-controller-manager/controller-manager-7dc98d9d7-kdgkw" Nov 28 10:03:17 crc kubenswrapper[4838]: I1128 10:03:17.239195 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/ccb2f736-b88b-486a-9091-2d5ef579f1cb-client-ca\") pod \"route-controller-manager-5495bb6774-p9mf7\" (UID: \"ccb2f736-b88b-486a-9091-2d5ef579f1cb\") " pod="openshift-route-controller-manager/route-controller-manager-5495bb6774-p9mf7" Nov 28 10:03:17 crc kubenswrapper[4838]: I1128 10:03:17.239281 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/3d21d238-2cf2-4b63-9fec-2babbc832c4f-client-ca\") pod \"controller-manager-7dc98d9d7-kdgkw\" (UID: \"3d21d238-2cf2-4b63-9fec-2babbc832c4f\") " pod="openshift-controller-manager/controller-manager-7dc98d9d7-kdgkw" Nov 28 10:03:17 crc kubenswrapper[4838]: I1128 10:03:17.239807 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/3d21d238-2cf2-4b63-9fec-2babbc832c4f-proxy-ca-bundles\") pod \"controller-manager-7dc98d9d7-kdgkw\" (UID: \"3d21d238-2cf2-4b63-9fec-2babbc832c4f\") " pod="openshift-controller-manager/controller-manager-7dc98d9d7-kdgkw" Nov 28 10:03:17 crc kubenswrapper[4838]: I1128 10:03:17.243042 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3d21d238-2cf2-4b63-9fec-2babbc832c4f-serving-cert\") pod \"controller-manager-7dc98d9d7-kdgkw\" (UID: \"3d21d238-2cf2-4b63-9fec-2babbc832c4f\") " pod="openshift-controller-manager/controller-manager-7dc98d9d7-kdgkw" Nov 28 10:03:17 crc kubenswrapper[4838]: I1128 10:03:17.243168 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ccb2f736-b88b-486a-9091-2d5ef579f1cb-serving-cert\") pod \"route-controller-manager-5495bb6774-p9mf7\" (UID: \"ccb2f736-b88b-486a-9091-2d5ef579f1cb\") " pod="openshift-route-controller-manager/route-controller-manager-5495bb6774-p9mf7" Nov 28 10:03:17 crc kubenswrapper[4838]: I1128 10:03:17.258263 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bls6k\" (UniqueName: \"kubernetes.io/projected/3d21d238-2cf2-4b63-9fec-2babbc832c4f-kube-api-access-bls6k\") pod \"controller-manager-7dc98d9d7-kdgkw\" (UID: \"3d21d238-2cf2-4b63-9fec-2babbc832c4f\") " pod="openshift-controller-manager/controller-manager-7dc98d9d7-kdgkw" Nov 28 10:03:17 crc kubenswrapper[4838]: I1128 10:03:17.267773 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zjxkk\" (UniqueName: \"kubernetes.io/projected/ccb2f736-b88b-486a-9091-2d5ef579f1cb-kube-api-access-zjxkk\") pod \"route-controller-manager-5495bb6774-p9mf7\" (UID: \"ccb2f736-b88b-486a-9091-2d5ef579f1cb\") " pod="openshift-route-controller-manager/route-controller-manager-5495bb6774-p9mf7" Nov 28 10:03:17 crc kubenswrapper[4838]: I1128 10:03:17.340523 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-7dc98d9d7-kdgkw" Nov 28 10:03:17 crc kubenswrapper[4838]: I1128 10:03:17.392601 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5495bb6774-p9mf7" Nov 28 10:03:17 crc kubenswrapper[4838]: I1128 10:03:17.600939 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-7dc98d9d7-kdgkw"] Nov 28 10:03:17 crc kubenswrapper[4838]: I1128 10:03:17.638748 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-5495bb6774-p9mf7"] Nov 28 10:03:17 crc kubenswrapper[4838]: W1128 10:03:17.642931 4838 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podccb2f736_b88b_486a_9091_2d5ef579f1cb.slice/crio-6a3d34406e0533cce4fe900d905c2f1ee26711e0aeee44f433810832e8a57602 WatchSource:0}: Error finding container 6a3d34406e0533cce4fe900d905c2f1ee26711e0aeee44f433810832e8a57602: Status 404 returned error can't find the container with id 6a3d34406e0533cce4fe900d905c2f1ee26711e0aeee44f433810832e8a57602 Nov 28 10:03:17 crc kubenswrapper[4838]: I1128 10:03:17.711338 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-5495bb6774-p9mf7" event={"ID":"ccb2f736-b88b-486a-9091-2d5ef579f1cb","Type":"ContainerStarted","Data":"6a3d34406e0533cce4fe900d905c2f1ee26711e0aeee44f433810832e8a57602"} Nov 28 10:03:17 crc kubenswrapper[4838]: I1128 10:03:17.712409 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-7dc98d9d7-kdgkw" event={"ID":"3d21d238-2cf2-4b63-9fec-2babbc832c4f","Type":"ContainerStarted","Data":"ec80a4acc13c4222900a584fd2d1fe6b55284257f44e1b176c2b649a0323d75f"} Nov 28 10:03:18 crc kubenswrapper[4838]: I1128 10:03:18.578068 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="59190323-fcec-4e06-8cfa-0fdf052aa4f3" path="/var/lib/kubelet/pods/59190323-fcec-4e06-8cfa-0fdf052aa4f3/volumes" Nov 28 10:03:18 crc kubenswrapper[4838]: I1128 10:03:18.579106 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d465e42f-09ca-4a23-8984-7d6318adfa15" path="/var/lib/kubelet/pods/d465e42f-09ca-4a23-8984-7d6318adfa15/volumes" Nov 28 10:03:18 crc kubenswrapper[4838]: I1128 10:03:18.718408 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-5495bb6774-p9mf7" event={"ID":"ccb2f736-b88b-486a-9091-2d5ef579f1cb","Type":"ContainerStarted","Data":"cdb0b7da439ca1cd9a197a0750435e3d5abf3f40418174b26b9755d3f98616e5"} Nov 28 10:03:18 crc kubenswrapper[4838]: I1128 10:03:18.719140 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-5495bb6774-p9mf7" Nov 28 10:03:18 crc kubenswrapper[4838]: I1128 10:03:18.720979 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-7dc98d9d7-kdgkw" event={"ID":"3d21d238-2cf2-4b63-9fec-2babbc832c4f","Type":"ContainerStarted","Data":"5a854e952c6c657be661d2710d52246df8873b12ea47a750c25bea6137b102b7"} Nov 28 10:03:18 crc kubenswrapper[4838]: I1128 10:03:18.721212 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-7dc98d9d7-kdgkw" Nov 28 10:03:18 crc kubenswrapper[4838]: I1128 10:03:18.726543 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-5495bb6774-p9mf7" Nov 28 10:03:18 crc kubenswrapper[4838]: I1128 10:03:18.726823 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-7dc98d9d7-kdgkw" Nov 28 10:03:18 crc kubenswrapper[4838]: I1128 10:03:18.739876 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-5495bb6774-p9mf7" podStartSLOduration=3.739852748 podStartE2EDuration="3.739852748s" podCreationTimestamp="2025-11-28 10:03:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 10:03:18.736727884 +0000 UTC m=+370.435702074" watchObservedRunningTime="2025-11-28 10:03:18.739852748 +0000 UTC m=+370.438826958" Nov 28 10:03:18 crc kubenswrapper[4838]: I1128 10:03:18.758482 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-7dc98d9d7-kdgkw" podStartSLOduration=3.758462079 podStartE2EDuration="3.758462079s" podCreationTimestamp="2025-11-28 10:03:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 10:03:18.754767599 +0000 UTC m=+370.453741799" watchObservedRunningTime="2025-11-28 10:03:18.758462079 +0000 UTC m=+370.457436249" Nov 28 10:03:29 crc kubenswrapper[4838]: I1128 10:03:29.780588 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-5495bb6774-p9mf7"] Nov 28 10:03:29 crc kubenswrapper[4838]: I1128 10:03:29.781515 4838 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-5495bb6774-p9mf7" podUID="ccb2f736-b88b-486a-9091-2d5ef579f1cb" containerName="route-controller-manager" containerID="cri-o://cdb0b7da439ca1cd9a197a0750435e3d5abf3f40418174b26b9755d3f98616e5" gracePeriod=30 Nov 28 10:03:30 crc kubenswrapper[4838]: I1128 10:03:30.243818 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5495bb6774-p9mf7" Nov 28 10:03:30 crc kubenswrapper[4838]: I1128 10:03:30.315337 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ccb2f736-b88b-486a-9091-2d5ef579f1cb-serving-cert\") pod \"ccb2f736-b88b-486a-9091-2d5ef579f1cb\" (UID: \"ccb2f736-b88b-486a-9091-2d5ef579f1cb\") " Nov 28 10:03:30 crc kubenswrapper[4838]: I1128 10:03:30.316228 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/ccb2f736-b88b-486a-9091-2d5ef579f1cb-client-ca\") pod \"ccb2f736-b88b-486a-9091-2d5ef579f1cb\" (UID: \"ccb2f736-b88b-486a-9091-2d5ef579f1cb\") " Nov 28 10:03:30 crc kubenswrapper[4838]: I1128 10:03:30.316303 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ccb2f736-b88b-486a-9091-2d5ef579f1cb-config\") pod \"ccb2f736-b88b-486a-9091-2d5ef579f1cb\" (UID: \"ccb2f736-b88b-486a-9091-2d5ef579f1cb\") " Nov 28 10:03:30 crc kubenswrapper[4838]: I1128 10:03:30.316343 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zjxkk\" (UniqueName: \"kubernetes.io/projected/ccb2f736-b88b-486a-9091-2d5ef579f1cb-kube-api-access-zjxkk\") pod \"ccb2f736-b88b-486a-9091-2d5ef579f1cb\" (UID: \"ccb2f736-b88b-486a-9091-2d5ef579f1cb\") " Nov 28 10:03:30 crc kubenswrapper[4838]: I1128 10:03:30.316944 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ccb2f736-b88b-486a-9091-2d5ef579f1cb-client-ca" (OuterVolumeSpecName: "client-ca") pod "ccb2f736-b88b-486a-9091-2d5ef579f1cb" (UID: "ccb2f736-b88b-486a-9091-2d5ef579f1cb"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 10:03:30 crc kubenswrapper[4838]: I1128 10:03:30.317135 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ccb2f736-b88b-486a-9091-2d5ef579f1cb-config" (OuterVolumeSpecName: "config") pod "ccb2f736-b88b-486a-9091-2d5ef579f1cb" (UID: "ccb2f736-b88b-486a-9091-2d5ef579f1cb"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 10:03:30 crc kubenswrapper[4838]: I1128 10:03:30.322871 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ccb2f736-b88b-486a-9091-2d5ef579f1cb-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "ccb2f736-b88b-486a-9091-2d5ef579f1cb" (UID: "ccb2f736-b88b-486a-9091-2d5ef579f1cb"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:03:30 crc kubenswrapper[4838]: I1128 10:03:30.339915 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ccb2f736-b88b-486a-9091-2d5ef579f1cb-kube-api-access-zjxkk" (OuterVolumeSpecName: "kube-api-access-zjxkk") pod "ccb2f736-b88b-486a-9091-2d5ef579f1cb" (UID: "ccb2f736-b88b-486a-9091-2d5ef579f1cb"). InnerVolumeSpecName "kube-api-access-zjxkk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:03:30 crc kubenswrapper[4838]: I1128 10:03:30.417373 4838 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ccb2f736-b88b-486a-9091-2d5ef579f1cb-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 10:03:30 crc kubenswrapper[4838]: I1128 10:03:30.417710 4838 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/ccb2f736-b88b-486a-9091-2d5ef579f1cb-client-ca\") on node \"crc\" DevicePath \"\"" Nov 28 10:03:30 crc kubenswrapper[4838]: I1128 10:03:30.417739 4838 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ccb2f736-b88b-486a-9091-2d5ef579f1cb-config\") on node \"crc\" DevicePath \"\"" Nov 28 10:03:30 crc kubenswrapper[4838]: I1128 10:03:30.417751 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zjxkk\" (UniqueName: \"kubernetes.io/projected/ccb2f736-b88b-486a-9091-2d5ef579f1cb-kube-api-access-zjxkk\") on node \"crc\" DevicePath \"\"" Nov 28 10:03:30 crc kubenswrapper[4838]: I1128 10:03:30.806958 4838 generic.go:334] "Generic (PLEG): container finished" podID="ccb2f736-b88b-486a-9091-2d5ef579f1cb" containerID="cdb0b7da439ca1cd9a197a0750435e3d5abf3f40418174b26b9755d3f98616e5" exitCode=0 Nov 28 10:03:30 crc kubenswrapper[4838]: I1128 10:03:30.806994 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-5495bb6774-p9mf7" event={"ID":"ccb2f736-b88b-486a-9091-2d5ef579f1cb","Type":"ContainerDied","Data":"cdb0b7da439ca1cd9a197a0750435e3d5abf3f40418174b26b9755d3f98616e5"} Nov 28 10:03:30 crc kubenswrapper[4838]: I1128 10:03:30.807043 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-5495bb6774-p9mf7" event={"ID":"ccb2f736-b88b-486a-9091-2d5ef579f1cb","Type":"ContainerDied","Data":"6a3d34406e0533cce4fe900d905c2f1ee26711e0aeee44f433810832e8a57602"} Nov 28 10:03:30 crc kubenswrapper[4838]: I1128 10:03:30.807067 4838 scope.go:117] "RemoveContainer" containerID="cdb0b7da439ca1cd9a197a0750435e3d5abf3f40418174b26b9755d3f98616e5" Nov 28 10:03:30 crc kubenswrapper[4838]: I1128 10:03:30.807078 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5495bb6774-p9mf7" Nov 28 10:03:30 crc kubenswrapper[4838]: I1128 10:03:30.822681 4838 scope.go:117] "RemoveContainer" containerID="cdb0b7da439ca1cd9a197a0750435e3d5abf3f40418174b26b9755d3f98616e5" Nov 28 10:03:30 crc kubenswrapper[4838]: E1128 10:03:30.823256 4838 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cdb0b7da439ca1cd9a197a0750435e3d5abf3f40418174b26b9755d3f98616e5\": container with ID starting with cdb0b7da439ca1cd9a197a0750435e3d5abf3f40418174b26b9755d3f98616e5 not found: ID does not exist" containerID="cdb0b7da439ca1cd9a197a0750435e3d5abf3f40418174b26b9755d3f98616e5" Nov 28 10:03:30 crc kubenswrapper[4838]: I1128 10:03:30.823357 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cdb0b7da439ca1cd9a197a0750435e3d5abf3f40418174b26b9755d3f98616e5"} err="failed to get container status \"cdb0b7da439ca1cd9a197a0750435e3d5abf3f40418174b26b9755d3f98616e5\": rpc error: code = NotFound desc = could not find container \"cdb0b7da439ca1cd9a197a0750435e3d5abf3f40418174b26b9755d3f98616e5\": container with ID starting with cdb0b7da439ca1cd9a197a0750435e3d5abf3f40418174b26b9755d3f98616e5 not found: ID does not exist" Nov 28 10:03:30 crc kubenswrapper[4838]: I1128 10:03:30.831786 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-5495bb6774-p9mf7"] Nov 28 10:03:30 crc kubenswrapper[4838]: I1128 10:03:30.836564 4838 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-5495bb6774-p9mf7"] Nov 28 10:03:31 crc kubenswrapper[4838]: I1128 10:03:31.026653 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-595f487d49-jqwld"] Nov 28 10:03:31 crc kubenswrapper[4838]: E1128 10:03:31.026991 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ccb2f736-b88b-486a-9091-2d5ef579f1cb" containerName="route-controller-manager" Nov 28 10:03:31 crc kubenswrapper[4838]: I1128 10:03:31.027015 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="ccb2f736-b88b-486a-9091-2d5ef579f1cb" containerName="route-controller-manager" Nov 28 10:03:31 crc kubenswrapper[4838]: I1128 10:03:31.027233 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="ccb2f736-b88b-486a-9091-2d5ef579f1cb" containerName="route-controller-manager" Nov 28 10:03:31 crc kubenswrapper[4838]: I1128 10:03:31.027981 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-595f487d49-jqwld" Nov 28 10:03:31 crc kubenswrapper[4838]: I1128 10:03:31.032843 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Nov 28 10:03:31 crc kubenswrapper[4838]: I1128 10:03:31.033650 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Nov 28 10:03:31 crc kubenswrapper[4838]: I1128 10:03:31.034167 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Nov 28 10:03:31 crc kubenswrapper[4838]: I1128 10:03:31.034449 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Nov 28 10:03:31 crc kubenswrapper[4838]: I1128 10:03:31.047914 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-595f487d49-jqwld"] Nov 28 10:03:31 crc kubenswrapper[4838]: I1128 10:03:31.078401 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Nov 28 10:03:31 crc kubenswrapper[4838]: I1128 10:03:31.078671 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Nov 28 10:03:31 crc kubenswrapper[4838]: I1128 10:03:31.126795 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h9tbr\" (UniqueName: \"kubernetes.io/projected/ac432f26-e7df-4179-b18e-24e09acf5e68-kube-api-access-h9tbr\") pod \"route-controller-manager-595f487d49-jqwld\" (UID: \"ac432f26-e7df-4179-b18e-24e09acf5e68\") " pod="openshift-route-controller-manager/route-controller-manager-595f487d49-jqwld" Nov 28 10:03:31 crc kubenswrapper[4838]: I1128 10:03:31.126864 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ac432f26-e7df-4179-b18e-24e09acf5e68-serving-cert\") pod \"route-controller-manager-595f487d49-jqwld\" (UID: \"ac432f26-e7df-4179-b18e-24e09acf5e68\") " pod="openshift-route-controller-manager/route-controller-manager-595f487d49-jqwld" Nov 28 10:03:31 crc kubenswrapper[4838]: I1128 10:03:31.126906 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ac432f26-e7df-4179-b18e-24e09acf5e68-config\") pod \"route-controller-manager-595f487d49-jqwld\" (UID: \"ac432f26-e7df-4179-b18e-24e09acf5e68\") " pod="openshift-route-controller-manager/route-controller-manager-595f487d49-jqwld" Nov 28 10:03:31 crc kubenswrapper[4838]: I1128 10:03:31.126933 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/ac432f26-e7df-4179-b18e-24e09acf5e68-client-ca\") pod \"route-controller-manager-595f487d49-jqwld\" (UID: \"ac432f26-e7df-4179-b18e-24e09acf5e68\") " pod="openshift-route-controller-manager/route-controller-manager-595f487d49-jqwld" Nov 28 10:03:31 crc kubenswrapper[4838]: I1128 10:03:31.228533 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ac432f26-e7df-4179-b18e-24e09acf5e68-serving-cert\") pod \"route-controller-manager-595f487d49-jqwld\" (UID: \"ac432f26-e7df-4179-b18e-24e09acf5e68\") " pod="openshift-route-controller-manager/route-controller-manager-595f487d49-jqwld" Nov 28 10:03:31 crc kubenswrapper[4838]: I1128 10:03:31.228637 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ac432f26-e7df-4179-b18e-24e09acf5e68-config\") pod \"route-controller-manager-595f487d49-jqwld\" (UID: \"ac432f26-e7df-4179-b18e-24e09acf5e68\") " pod="openshift-route-controller-manager/route-controller-manager-595f487d49-jqwld" Nov 28 10:03:31 crc kubenswrapper[4838]: I1128 10:03:31.228678 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/ac432f26-e7df-4179-b18e-24e09acf5e68-client-ca\") pod \"route-controller-manager-595f487d49-jqwld\" (UID: \"ac432f26-e7df-4179-b18e-24e09acf5e68\") " pod="openshift-route-controller-manager/route-controller-manager-595f487d49-jqwld" Nov 28 10:03:31 crc kubenswrapper[4838]: I1128 10:03:31.228992 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h9tbr\" (UniqueName: \"kubernetes.io/projected/ac432f26-e7df-4179-b18e-24e09acf5e68-kube-api-access-h9tbr\") pod \"route-controller-manager-595f487d49-jqwld\" (UID: \"ac432f26-e7df-4179-b18e-24e09acf5e68\") " pod="openshift-route-controller-manager/route-controller-manager-595f487d49-jqwld" Nov 28 10:03:31 crc kubenswrapper[4838]: I1128 10:03:31.230402 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/ac432f26-e7df-4179-b18e-24e09acf5e68-client-ca\") pod \"route-controller-manager-595f487d49-jqwld\" (UID: \"ac432f26-e7df-4179-b18e-24e09acf5e68\") " pod="openshift-route-controller-manager/route-controller-manager-595f487d49-jqwld" Nov 28 10:03:31 crc kubenswrapper[4838]: I1128 10:03:31.230906 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ac432f26-e7df-4179-b18e-24e09acf5e68-config\") pod \"route-controller-manager-595f487d49-jqwld\" (UID: \"ac432f26-e7df-4179-b18e-24e09acf5e68\") " pod="openshift-route-controller-manager/route-controller-manager-595f487d49-jqwld" Nov 28 10:03:31 crc kubenswrapper[4838]: I1128 10:03:31.234787 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ac432f26-e7df-4179-b18e-24e09acf5e68-serving-cert\") pod \"route-controller-manager-595f487d49-jqwld\" (UID: \"ac432f26-e7df-4179-b18e-24e09acf5e68\") " pod="openshift-route-controller-manager/route-controller-manager-595f487d49-jqwld" Nov 28 10:03:31 crc kubenswrapper[4838]: I1128 10:03:31.260327 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h9tbr\" (UniqueName: \"kubernetes.io/projected/ac432f26-e7df-4179-b18e-24e09acf5e68-kube-api-access-h9tbr\") pod \"route-controller-manager-595f487d49-jqwld\" (UID: \"ac432f26-e7df-4179-b18e-24e09acf5e68\") " pod="openshift-route-controller-manager/route-controller-manager-595f487d49-jqwld" Nov 28 10:03:31 crc kubenswrapper[4838]: I1128 10:03:31.414831 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-595f487d49-jqwld" Nov 28 10:03:31 crc kubenswrapper[4838]: I1128 10:03:31.864803 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-595f487d49-jqwld"] Nov 28 10:03:31 crc kubenswrapper[4838]: W1128 10:03:31.866745 4838 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podac432f26_e7df_4179_b18e_24e09acf5e68.slice/crio-07719e10fbf4ce25d4db8a68f0dbfe25aa81243a9a62d17f35d7cf37452932b7 WatchSource:0}: Error finding container 07719e10fbf4ce25d4db8a68f0dbfe25aa81243a9a62d17f35d7cf37452932b7: Status 404 returned error can't find the container with id 07719e10fbf4ce25d4db8a68f0dbfe25aa81243a9a62d17f35d7cf37452932b7 Nov 28 10:03:32 crc kubenswrapper[4838]: I1128 10:03:32.568521 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ccb2f736-b88b-486a-9091-2d5ef579f1cb" path="/var/lib/kubelet/pods/ccb2f736-b88b-486a-9091-2d5ef579f1cb/volumes" Nov 28 10:03:32 crc kubenswrapper[4838]: I1128 10:03:32.819509 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-595f487d49-jqwld" event={"ID":"ac432f26-e7df-4179-b18e-24e09acf5e68","Type":"ContainerStarted","Data":"afac999928cdd0ad2b473fb80f0ff7fc41e5caa3bd8c037974458e562313e7dd"} Nov 28 10:03:32 crc kubenswrapper[4838]: I1128 10:03:32.819558 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-595f487d49-jqwld" event={"ID":"ac432f26-e7df-4179-b18e-24e09acf5e68","Type":"ContainerStarted","Data":"07719e10fbf4ce25d4db8a68f0dbfe25aa81243a9a62d17f35d7cf37452932b7"} Nov 28 10:03:32 crc kubenswrapper[4838]: I1128 10:03:32.819781 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-595f487d49-jqwld" Nov 28 10:03:32 crc kubenswrapper[4838]: I1128 10:03:32.824787 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-595f487d49-jqwld" Nov 28 10:03:32 crc kubenswrapper[4838]: I1128 10:03:32.840848 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-595f487d49-jqwld" podStartSLOduration=3.840819228 podStartE2EDuration="3.840819228s" podCreationTimestamp="2025-11-28 10:03:29 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 10:03:32.835332241 +0000 UTC m=+384.534306431" watchObservedRunningTime="2025-11-28 10:03:32.840819228 +0000 UTC m=+384.539793488" Nov 28 10:03:37 crc kubenswrapper[4838]: I1128 10:03:37.970963 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-n5fj2"] Nov 28 10:03:37 crc kubenswrapper[4838]: I1128 10:03:37.972059 4838 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-n5fj2" podUID="d8b6f770-e994-4bf8-92de-7e359cbe75a8" containerName="registry-server" containerID="cri-o://5e6b8d3df090431f382ebb130cd462a38463f6742c3215993268d50c4fe52e50" gracePeriod=2 Nov 28 10:03:38 crc kubenswrapper[4838]: I1128 10:03:38.276796 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-sdvg7"] Nov 28 10:03:38 crc kubenswrapper[4838]: I1128 10:03:38.277017 4838 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-sdvg7" podUID="e1f1e017-546a-4f0b-965e-bd050ad48e44" containerName="registry-server" containerID="cri-o://9130cd5998cc69dcf460ea6a726b4f0a2dbe0ba992f14901e0bec996820573e9" gracePeriod=30 Nov 28 10:03:38 crc kubenswrapper[4838]: I1128 10:03:38.282420 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-6gkhp"] Nov 28 10:03:38 crc kubenswrapper[4838]: I1128 10:03:38.282703 4838 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-6gkhp" podUID="0a0eea75-7b92-4cde-bb7d-5ee4cb46ddd0" containerName="registry-server" containerID="cri-o://c7f193b4ec4568793495fffecdd0e3503720c269ac15d45d3716d5b6853b6d50" gracePeriod=30 Nov 28 10:03:38 crc kubenswrapper[4838]: I1128 10:03:38.294549 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-xsjrl"] Nov 28 10:03:38 crc kubenswrapper[4838]: I1128 10:03:38.296642 4838 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/marketplace-operator-79b997595-xsjrl" podUID="8a281de3-cc12-4dd1-b9be-0ca03a0613ec" containerName="marketplace-operator" containerID="cri-o://6e26a372f47f409cbd328fe79d285e7b58ca7123f6fff591bcefa8d90affd22a" gracePeriod=30 Nov 28 10:03:38 crc kubenswrapper[4838]: I1128 10:03:38.301869 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-vmqxd"] Nov 28 10:03:38 crc kubenswrapper[4838]: I1128 10:03:38.302324 4838 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-vmqxd" podUID="bf9fc775-df38-4de1-b17a-d093a477938a" containerName="registry-server" containerID="cri-o://f008e458bc1dca0a2781deea356d38d890938c45045c1466a681fff535226ead" gracePeriod=30 Nov 28 10:03:38 crc kubenswrapper[4838]: I1128 10:03:38.313320 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-mcgzz"] Nov 28 10:03:38 crc kubenswrapper[4838]: I1128 10:03:38.313571 4838 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-mcgzz" podUID="ac5ded1f-10ca-4db6-b3a6-80f30f28cb34" containerName="registry-server" containerID="cri-o://0c552617179b3cea76f22cfdaf8d852af57ced4aabea154b74088bf2fe6615b0" gracePeriod=30 Nov 28 10:03:38 crc kubenswrapper[4838]: I1128 10:03:38.320181 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-6mmw4"] Nov 28 10:03:38 crc kubenswrapper[4838]: I1128 10:03:38.320840 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-6mmw4" Nov 28 10:03:38 crc kubenswrapper[4838]: I1128 10:03:38.372509 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-6mmw4"] Nov 28 10:03:38 crc kubenswrapper[4838]: I1128 10:03:38.418702 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b0c9680e-7b0a-47a9-87dc-4da8cfbfce77-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-6mmw4\" (UID: \"b0c9680e-7b0a-47a9-87dc-4da8cfbfce77\") " pod="openshift-marketplace/marketplace-operator-79b997595-6mmw4" Nov 28 10:03:38 crc kubenswrapper[4838]: I1128 10:03:38.418782 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b0c9680e-7b0a-47a9-87dc-4da8cfbfce77-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-6mmw4\" (UID: \"b0c9680e-7b0a-47a9-87dc-4da8cfbfce77\") " pod="openshift-marketplace/marketplace-operator-79b997595-6mmw4" Nov 28 10:03:38 crc kubenswrapper[4838]: I1128 10:03:38.418835 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z2bvk\" (UniqueName: \"kubernetes.io/projected/b0c9680e-7b0a-47a9-87dc-4da8cfbfce77-kube-api-access-z2bvk\") pod \"marketplace-operator-79b997595-6mmw4\" (UID: \"b0c9680e-7b0a-47a9-87dc-4da8cfbfce77\") " pod="openshift-marketplace/marketplace-operator-79b997595-6mmw4" Nov 28 10:03:38 crc kubenswrapper[4838]: I1128 10:03:38.519825 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b0c9680e-7b0a-47a9-87dc-4da8cfbfce77-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-6mmw4\" (UID: \"b0c9680e-7b0a-47a9-87dc-4da8cfbfce77\") " pod="openshift-marketplace/marketplace-operator-79b997595-6mmw4" Nov 28 10:03:38 crc kubenswrapper[4838]: I1128 10:03:38.519926 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b0c9680e-7b0a-47a9-87dc-4da8cfbfce77-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-6mmw4\" (UID: \"b0c9680e-7b0a-47a9-87dc-4da8cfbfce77\") " pod="openshift-marketplace/marketplace-operator-79b997595-6mmw4" Nov 28 10:03:38 crc kubenswrapper[4838]: I1128 10:03:38.519979 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z2bvk\" (UniqueName: \"kubernetes.io/projected/b0c9680e-7b0a-47a9-87dc-4da8cfbfce77-kube-api-access-z2bvk\") pod \"marketplace-operator-79b997595-6mmw4\" (UID: \"b0c9680e-7b0a-47a9-87dc-4da8cfbfce77\") " pod="openshift-marketplace/marketplace-operator-79b997595-6mmw4" Nov 28 10:03:38 crc kubenswrapper[4838]: I1128 10:03:38.521574 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b0c9680e-7b0a-47a9-87dc-4da8cfbfce77-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-6mmw4\" (UID: \"b0c9680e-7b0a-47a9-87dc-4da8cfbfce77\") " pod="openshift-marketplace/marketplace-operator-79b997595-6mmw4" Nov 28 10:03:38 crc kubenswrapper[4838]: I1128 10:03:38.533308 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b0c9680e-7b0a-47a9-87dc-4da8cfbfce77-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-6mmw4\" (UID: \"b0c9680e-7b0a-47a9-87dc-4da8cfbfce77\") " pod="openshift-marketplace/marketplace-operator-79b997595-6mmw4" Nov 28 10:03:38 crc kubenswrapper[4838]: I1128 10:03:38.542085 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z2bvk\" (UniqueName: \"kubernetes.io/projected/b0c9680e-7b0a-47a9-87dc-4da8cfbfce77-kube-api-access-z2bvk\") pod \"marketplace-operator-79b997595-6mmw4\" (UID: \"b0c9680e-7b0a-47a9-87dc-4da8cfbfce77\") " pod="openshift-marketplace/marketplace-operator-79b997595-6mmw4" Nov 28 10:03:38 crc kubenswrapper[4838]: I1128 10:03:38.591374 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-kzkzk"] Nov 28 10:03:38 crc kubenswrapper[4838]: I1128 10:03:38.592059 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-kzkzk" Nov 28 10:03:38 crc kubenswrapper[4838]: I1128 10:03:38.608914 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-kzkzk"] Nov 28 10:03:38 crc kubenswrapper[4838]: I1128 10:03:38.621194 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sdkcq\" (UniqueName: \"kubernetes.io/projected/f55a8232-4e90-405d-846a-d68ac28dd002-kube-api-access-sdkcq\") pod \"image-registry-66df7c8f76-kzkzk\" (UID: \"f55a8232-4e90-405d-846a-d68ac28dd002\") " pod="openshift-image-registry/image-registry-66df7c8f76-kzkzk" Nov 28 10:03:38 crc kubenswrapper[4838]: I1128 10:03:38.621347 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/f55a8232-4e90-405d-846a-d68ac28dd002-trusted-ca\") pod \"image-registry-66df7c8f76-kzkzk\" (UID: \"f55a8232-4e90-405d-846a-d68ac28dd002\") " pod="openshift-image-registry/image-registry-66df7c8f76-kzkzk" Nov 28 10:03:38 crc kubenswrapper[4838]: I1128 10:03:38.621487 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/f55a8232-4e90-405d-846a-d68ac28dd002-registry-certificates\") pod \"image-registry-66df7c8f76-kzkzk\" (UID: \"f55a8232-4e90-405d-846a-d68ac28dd002\") " pod="openshift-image-registry/image-registry-66df7c8f76-kzkzk" Nov 28 10:03:38 crc kubenswrapper[4838]: I1128 10:03:38.621572 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/f55a8232-4e90-405d-846a-d68ac28dd002-bound-sa-token\") pod \"image-registry-66df7c8f76-kzkzk\" (UID: \"f55a8232-4e90-405d-846a-d68ac28dd002\") " pod="openshift-image-registry/image-registry-66df7c8f76-kzkzk" Nov 28 10:03:38 crc kubenswrapper[4838]: I1128 10:03:38.621652 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-kzkzk\" (UID: \"f55a8232-4e90-405d-846a-d68ac28dd002\") " pod="openshift-image-registry/image-registry-66df7c8f76-kzkzk" Nov 28 10:03:38 crc kubenswrapper[4838]: I1128 10:03:38.621761 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/f55a8232-4e90-405d-846a-d68ac28dd002-registry-tls\") pod \"image-registry-66df7c8f76-kzkzk\" (UID: \"f55a8232-4e90-405d-846a-d68ac28dd002\") " pod="openshift-image-registry/image-registry-66df7c8f76-kzkzk" Nov 28 10:03:38 crc kubenswrapper[4838]: I1128 10:03:38.621903 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/f55a8232-4e90-405d-846a-d68ac28dd002-installation-pull-secrets\") pod \"image-registry-66df7c8f76-kzkzk\" (UID: \"f55a8232-4e90-405d-846a-d68ac28dd002\") " pod="openshift-image-registry/image-registry-66df7c8f76-kzkzk" Nov 28 10:03:38 crc kubenswrapper[4838]: I1128 10:03:38.621981 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/f55a8232-4e90-405d-846a-d68ac28dd002-ca-trust-extracted\") pod \"image-registry-66df7c8f76-kzkzk\" (UID: \"f55a8232-4e90-405d-846a-d68ac28dd002\") " pod="openshift-image-registry/image-registry-66df7c8f76-kzkzk" Nov 28 10:03:38 crc kubenswrapper[4838]: I1128 10:03:38.643538 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-6mmw4" Nov 28 10:03:38 crc kubenswrapper[4838]: I1128 10:03:38.651270 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-kzkzk\" (UID: \"f55a8232-4e90-405d-846a-d68ac28dd002\") " pod="openshift-image-registry/image-registry-66df7c8f76-kzkzk" Nov 28 10:03:38 crc kubenswrapper[4838]: I1128 10:03:38.737159 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/f55a8232-4e90-405d-846a-d68ac28dd002-registry-tls\") pod \"image-registry-66df7c8f76-kzkzk\" (UID: \"f55a8232-4e90-405d-846a-d68ac28dd002\") " pod="openshift-image-registry/image-registry-66df7c8f76-kzkzk" Nov 28 10:03:38 crc kubenswrapper[4838]: I1128 10:03:38.737256 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/f55a8232-4e90-405d-846a-d68ac28dd002-installation-pull-secrets\") pod \"image-registry-66df7c8f76-kzkzk\" (UID: \"f55a8232-4e90-405d-846a-d68ac28dd002\") " pod="openshift-image-registry/image-registry-66df7c8f76-kzkzk" Nov 28 10:03:38 crc kubenswrapper[4838]: I1128 10:03:38.737281 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/f55a8232-4e90-405d-846a-d68ac28dd002-ca-trust-extracted\") pod \"image-registry-66df7c8f76-kzkzk\" (UID: \"f55a8232-4e90-405d-846a-d68ac28dd002\") " pod="openshift-image-registry/image-registry-66df7c8f76-kzkzk" Nov 28 10:03:38 crc kubenswrapper[4838]: I1128 10:03:38.737303 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sdkcq\" (UniqueName: \"kubernetes.io/projected/f55a8232-4e90-405d-846a-d68ac28dd002-kube-api-access-sdkcq\") pod \"image-registry-66df7c8f76-kzkzk\" (UID: \"f55a8232-4e90-405d-846a-d68ac28dd002\") " pod="openshift-image-registry/image-registry-66df7c8f76-kzkzk" Nov 28 10:03:38 crc kubenswrapper[4838]: I1128 10:03:38.737322 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/f55a8232-4e90-405d-846a-d68ac28dd002-trusted-ca\") pod \"image-registry-66df7c8f76-kzkzk\" (UID: \"f55a8232-4e90-405d-846a-d68ac28dd002\") " pod="openshift-image-registry/image-registry-66df7c8f76-kzkzk" Nov 28 10:03:38 crc kubenswrapper[4838]: I1128 10:03:38.737348 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/f55a8232-4e90-405d-846a-d68ac28dd002-registry-certificates\") pod \"image-registry-66df7c8f76-kzkzk\" (UID: \"f55a8232-4e90-405d-846a-d68ac28dd002\") " pod="openshift-image-registry/image-registry-66df7c8f76-kzkzk" Nov 28 10:03:38 crc kubenswrapper[4838]: I1128 10:03:38.737378 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/f55a8232-4e90-405d-846a-d68ac28dd002-bound-sa-token\") pod \"image-registry-66df7c8f76-kzkzk\" (UID: \"f55a8232-4e90-405d-846a-d68ac28dd002\") " pod="openshift-image-registry/image-registry-66df7c8f76-kzkzk" Nov 28 10:03:38 crc kubenswrapper[4838]: I1128 10:03:38.741977 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/f55a8232-4e90-405d-846a-d68ac28dd002-registry-tls\") pod \"image-registry-66df7c8f76-kzkzk\" (UID: \"f55a8232-4e90-405d-846a-d68ac28dd002\") " pod="openshift-image-registry/image-registry-66df7c8f76-kzkzk" Nov 28 10:03:38 crc kubenswrapper[4838]: I1128 10:03:38.742768 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/f55a8232-4e90-405d-846a-d68ac28dd002-ca-trust-extracted\") pod \"image-registry-66df7c8f76-kzkzk\" (UID: \"f55a8232-4e90-405d-846a-d68ac28dd002\") " pod="openshift-image-registry/image-registry-66df7c8f76-kzkzk" Nov 28 10:03:38 crc kubenswrapper[4838]: I1128 10:03:38.743243 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/f55a8232-4e90-405d-846a-d68ac28dd002-trusted-ca\") pod \"image-registry-66df7c8f76-kzkzk\" (UID: \"f55a8232-4e90-405d-846a-d68ac28dd002\") " pod="openshift-image-registry/image-registry-66df7c8f76-kzkzk" Nov 28 10:03:38 crc kubenswrapper[4838]: I1128 10:03:38.743694 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/f55a8232-4e90-405d-846a-d68ac28dd002-registry-certificates\") pod \"image-registry-66df7c8f76-kzkzk\" (UID: \"f55a8232-4e90-405d-846a-d68ac28dd002\") " pod="openshift-image-registry/image-registry-66df7c8f76-kzkzk" Nov 28 10:03:38 crc kubenswrapper[4838]: I1128 10:03:38.744935 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/f55a8232-4e90-405d-846a-d68ac28dd002-installation-pull-secrets\") pod \"image-registry-66df7c8f76-kzkzk\" (UID: \"f55a8232-4e90-405d-846a-d68ac28dd002\") " pod="openshift-image-registry/image-registry-66df7c8f76-kzkzk" Nov 28 10:03:38 crc kubenswrapper[4838]: I1128 10:03:38.752812 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/f55a8232-4e90-405d-846a-d68ac28dd002-bound-sa-token\") pod \"image-registry-66df7c8f76-kzkzk\" (UID: \"f55a8232-4e90-405d-846a-d68ac28dd002\") " pod="openshift-image-registry/image-registry-66df7c8f76-kzkzk" Nov 28 10:03:38 crc kubenswrapper[4838]: I1128 10:03:38.757046 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sdkcq\" (UniqueName: \"kubernetes.io/projected/f55a8232-4e90-405d-846a-d68ac28dd002-kube-api-access-sdkcq\") pod \"image-registry-66df7c8f76-kzkzk\" (UID: \"f55a8232-4e90-405d-846a-d68ac28dd002\") " pod="openshift-image-registry/image-registry-66df7c8f76-kzkzk" Nov 28 10:03:39 crc kubenswrapper[4838]: I1128 10:03:39.192169 4838 generic.go:334] "Generic (PLEG): container finished" podID="0a0eea75-7b92-4cde-bb7d-5ee4cb46ddd0" containerID="c7f193b4ec4568793495fffecdd0e3503720c269ac15d45d3716d5b6853b6d50" exitCode=0 Nov 28 10:03:39 crc kubenswrapper[4838]: I1128 10:03:39.192622 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6gkhp" event={"ID":"0a0eea75-7b92-4cde-bb7d-5ee4cb46ddd0","Type":"ContainerDied","Data":"c7f193b4ec4568793495fffecdd0e3503720c269ac15d45d3716d5b6853b6d50"} Nov 28 10:03:39 crc kubenswrapper[4838]: I1128 10:03:39.206152 4838 generic.go:334] "Generic (PLEG): container finished" podID="ac5ded1f-10ca-4db6-b3a6-80f30f28cb34" containerID="0c552617179b3cea76f22cfdaf8d852af57ced4aabea154b74088bf2fe6615b0" exitCode=0 Nov 28 10:03:39 crc kubenswrapper[4838]: I1128 10:03:39.206220 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mcgzz" event={"ID":"ac5ded1f-10ca-4db6-b3a6-80f30f28cb34","Type":"ContainerDied","Data":"0c552617179b3cea76f22cfdaf8d852af57ced4aabea154b74088bf2fe6615b0"} Nov 28 10:03:39 crc kubenswrapper[4838]: I1128 10:03:39.213808 4838 generic.go:334] "Generic (PLEG): container finished" podID="8a281de3-cc12-4dd1-b9be-0ca03a0613ec" containerID="6e26a372f47f409cbd328fe79d285e7b58ca7123f6fff591bcefa8d90affd22a" exitCode=0 Nov 28 10:03:39 crc kubenswrapper[4838]: I1128 10:03:39.213858 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-xsjrl" event={"ID":"8a281de3-cc12-4dd1-b9be-0ca03a0613ec","Type":"ContainerDied","Data":"6e26a372f47f409cbd328fe79d285e7b58ca7123f6fff591bcefa8d90affd22a"} Nov 28 10:03:39 crc kubenswrapper[4838]: I1128 10:03:39.223826 4838 generic.go:334] "Generic (PLEG): container finished" podID="bf9fc775-df38-4de1-b17a-d093a477938a" containerID="f008e458bc1dca0a2781deea356d38d890938c45045c1466a681fff535226ead" exitCode=0 Nov 28 10:03:39 crc kubenswrapper[4838]: I1128 10:03:39.223872 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vmqxd" event={"ID":"bf9fc775-df38-4de1-b17a-d093a477938a","Type":"ContainerDied","Data":"f008e458bc1dca0a2781deea356d38d890938c45045c1466a681fff535226ead"} Nov 28 10:03:39 crc kubenswrapper[4838]: I1128 10:03:39.228871 4838 generic.go:334] "Generic (PLEG): container finished" podID="e1f1e017-546a-4f0b-965e-bd050ad48e44" containerID="9130cd5998cc69dcf460ea6a726b4f0a2dbe0ba992f14901e0bec996820573e9" exitCode=0 Nov 28 10:03:39 crc kubenswrapper[4838]: I1128 10:03:39.228910 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-sdvg7" event={"ID":"e1f1e017-546a-4f0b-965e-bd050ad48e44","Type":"ContainerDied","Data":"9130cd5998cc69dcf460ea6a726b4f0a2dbe0ba992f14901e0bec996820573e9"} Nov 28 10:03:39 crc kubenswrapper[4838]: I1128 10:03:39.239044 4838 generic.go:334] "Generic (PLEG): container finished" podID="d8b6f770-e994-4bf8-92de-7e359cbe75a8" containerID="5e6b8d3df090431f382ebb130cd462a38463f6742c3215993268d50c4fe52e50" exitCode=0 Nov 28 10:03:39 crc kubenswrapper[4838]: I1128 10:03:39.239068 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-n5fj2" event={"ID":"d8b6f770-e994-4bf8-92de-7e359cbe75a8","Type":"ContainerDied","Data":"5e6b8d3df090431f382ebb130cd462a38463f6742c3215993268d50c4fe52e50"} Nov 28 10:03:39 crc kubenswrapper[4838]: I1128 10:03:39.341757 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-kzkzk" Nov 28 10:03:39 crc kubenswrapper[4838]: I1128 10:03:39.465252 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-6gkhp" Nov 28 10:03:39 crc kubenswrapper[4838]: I1128 10:03:39.546477 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cwtjv\" (UniqueName: \"kubernetes.io/projected/0a0eea75-7b92-4cde-bb7d-5ee4cb46ddd0-kube-api-access-cwtjv\") pod \"0a0eea75-7b92-4cde-bb7d-5ee4cb46ddd0\" (UID: \"0a0eea75-7b92-4cde-bb7d-5ee4cb46ddd0\") " Nov 28 10:03:39 crc kubenswrapper[4838]: I1128 10:03:39.546906 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0a0eea75-7b92-4cde-bb7d-5ee4cb46ddd0-utilities\") pod \"0a0eea75-7b92-4cde-bb7d-5ee4cb46ddd0\" (UID: \"0a0eea75-7b92-4cde-bb7d-5ee4cb46ddd0\") " Nov 28 10:03:39 crc kubenswrapper[4838]: I1128 10:03:39.546933 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0a0eea75-7b92-4cde-bb7d-5ee4cb46ddd0-catalog-content\") pod \"0a0eea75-7b92-4cde-bb7d-5ee4cb46ddd0\" (UID: \"0a0eea75-7b92-4cde-bb7d-5ee4cb46ddd0\") " Nov 28 10:03:39 crc kubenswrapper[4838]: I1128 10:03:39.548120 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0a0eea75-7b92-4cde-bb7d-5ee4cb46ddd0-utilities" (OuterVolumeSpecName: "utilities") pod "0a0eea75-7b92-4cde-bb7d-5ee4cb46ddd0" (UID: "0a0eea75-7b92-4cde-bb7d-5ee4cb46ddd0"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 10:03:39 crc kubenswrapper[4838]: I1128 10:03:39.557246 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0a0eea75-7b92-4cde-bb7d-5ee4cb46ddd0-kube-api-access-cwtjv" (OuterVolumeSpecName: "kube-api-access-cwtjv") pod "0a0eea75-7b92-4cde-bb7d-5ee4cb46ddd0" (UID: "0a0eea75-7b92-4cde-bb7d-5ee4cb46ddd0"). InnerVolumeSpecName "kube-api-access-cwtjv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:03:39 crc kubenswrapper[4838]: I1128 10:03:39.614525 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0a0eea75-7b92-4cde-bb7d-5ee4cb46ddd0-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "0a0eea75-7b92-4cde-bb7d-5ee4cb46ddd0" (UID: "0a0eea75-7b92-4cde-bb7d-5ee4cb46ddd0"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 10:03:39 crc kubenswrapper[4838]: I1128 10:03:39.655938 4838 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0a0eea75-7b92-4cde-bb7d-5ee4cb46ddd0-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 10:03:39 crc kubenswrapper[4838]: I1128 10:03:39.655978 4838 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0a0eea75-7b92-4cde-bb7d-5ee4cb46ddd0-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 10:03:39 crc kubenswrapper[4838]: I1128 10:03:39.655992 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cwtjv\" (UniqueName: \"kubernetes.io/projected/0a0eea75-7b92-4cde-bb7d-5ee4cb46ddd0-kube-api-access-cwtjv\") on node \"crc\" DevicePath \"\"" Nov 28 10:03:39 crc kubenswrapper[4838]: I1128 10:03:39.661480 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-sdvg7" Nov 28 10:03:39 crc kubenswrapper[4838]: I1128 10:03:39.681925 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-vmqxd" Nov 28 10:03:39 crc kubenswrapper[4838]: I1128 10:03:39.691010 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-mcgzz" Nov 28 10:03:39 crc kubenswrapper[4838]: I1128 10:03:39.757060 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rgsp8\" (UniqueName: \"kubernetes.io/projected/bf9fc775-df38-4de1-b17a-d093a477938a-kube-api-access-rgsp8\") pod \"bf9fc775-df38-4de1-b17a-d093a477938a\" (UID: \"bf9fc775-df38-4de1-b17a-d093a477938a\") " Nov 28 10:03:39 crc kubenswrapper[4838]: I1128 10:03:39.757294 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e1f1e017-546a-4f0b-965e-bd050ad48e44-catalog-content\") pod \"e1f1e017-546a-4f0b-965e-bd050ad48e44\" (UID: \"e1f1e017-546a-4f0b-965e-bd050ad48e44\") " Nov 28 10:03:39 crc kubenswrapper[4838]: I1128 10:03:39.757377 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ac5ded1f-10ca-4db6-b3a6-80f30f28cb34-utilities\") pod \"ac5ded1f-10ca-4db6-b3a6-80f30f28cb34\" (UID: \"ac5ded1f-10ca-4db6-b3a6-80f30f28cb34\") " Nov 28 10:03:39 crc kubenswrapper[4838]: I1128 10:03:39.757479 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7mw6c\" (UniqueName: \"kubernetes.io/projected/ac5ded1f-10ca-4db6-b3a6-80f30f28cb34-kube-api-access-7mw6c\") pod \"ac5ded1f-10ca-4db6-b3a6-80f30f28cb34\" (UID: \"ac5ded1f-10ca-4db6-b3a6-80f30f28cb34\") " Nov 28 10:03:39 crc kubenswrapper[4838]: I1128 10:03:39.757564 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ac5ded1f-10ca-4db6-b3a6-80f30f28cb34-catalog-content\") pod \"ac5ded1f-10ca-4db6-b3a6-80f30f28cb34\" (UID: \"ac5ded1f-10ca-4db6-b3a6-80f30f28cb34\") " Nov 28 10:03:39 crc kubenswrapper[4838]: I1128 10:03:39.757666 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e1f1e017-546a-4f0b-965e-bd050ad48e44-utilities\") pod \"e1f1e017-546a-4f0b-965e-bd050ad48e44\" (UID: \"e1f1e017-546a-4f0b-965e-bd050ad48e44\") " Nov 28 10:03:39 crc kubenswrapper[4838]: I1128 10:03:39.757762 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kxlsh\" (UniqueName: \"kubernetes.io/projected/e1f1e017-546a-4f0b-965e-bd050ad48e44-kube-api-access-kxlsh\") pod \"e1f1e017-546a-4f0b-965e-bd050ad48e44\" (UID: \"e1f1e017-546a-4f0b-965e-bd050ad48e44\") " Nov 28 10:03:39 crc kubenswrapper[4838]: I1128 10:03:39.757853 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bf9fc775-df38-4de1-b17a-d093a477938a-utilities\") pod \"bf9fc775-df38-4de1-b17a-d093a477938a\" (UID: \"bf9fc775-df38-4de1-b17a-d093a477938a\") " Nov 28 10:03:39 crc kubenswrapper[4838]: I1128 10:03:39.757954 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bf9fc775-df38-4de1-b17a-d093a477938a-catalog-content\") pod \"bf9fc775-df38-4de1-b17a-d093a477938a\" (UID: \"bf9fc775-df38-4de1-b17a-d093a477938a\") " Nov 28 10:03:39 crc kubenswrapper[4838]: I1128 10:03:39.762403 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf9fc775-df38-4de1-b17a-d093a477938a-kube-api-access-rgsp8" (OuterVolumeSpecName: "kube-api-access-rgsp8") pod "bf9fc775-df38-4de1-b17a-d093a477938a" (UID: "bf9fc775-df38-4de1-b17a-d093a477938a"). InnerVolumeSpecName "kube-api-access-rgsp8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:03:39 crc kubenswrapper[4838]: I1128 10:03:39.771610 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ac5ded1f-10ca-4db6-b3a6-80f30f28cb34-utilities" (OuterVolumeSpecName: "utilities") pod "ac5ded1f-10ca-4db6-b3a6-80f30f28cb34" (UID: "ac5ded1f-10ca-4db6-b3a6-80f30f28cb34"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 10:03:39 crc kubenswrapper[4838]: I1128 10:03:39.774515 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e1f1e017-546a-4f0b-965e-bd050ad48e44-utilities" (OuterVolumeSpecName: "utilities") pod "e1f1e017-546a-4f0b-965e-bd050ad48e44" (UID: "e1f1e017-546a-4f0b-965e-bd050ad48e44"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 10:03:39 crc kubenswrapper[4838]: I1128 10:03:39.775034 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bf9fc775-df38-4de1-b17a-d093a477938a-utilities" (OuterVolumeSpecName: "utilities") pod "bf9fc775-df38-4de1-b17a-d093a477938a" (UID: "bf9fc775-df38-4de1-b17a-d093a477938a"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 10:03:39 crc kubenswrapper[4838]: I1128 10:03:39.779283 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ac5ded1f-10ca-4db6-b3a6-80f30f28cb34-kube-api-access-7mw6c" (OuterVolumeSpecName: "kube-api-access-7mw6c") pod "ac5ded1f-10ca-4db6-b3a6-80f30f28cb34" (UID: "ac5ded1f-10ca-4db6-b3a6-80f30f28cb34"). InnerVolumeSpecName "kube-api-access-7mw6c". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:03:39 crc kubenswrapper[4838]: I1128 10:03:39.779526 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e1f1e017-546a-4f0b-965e-bd050ad48e44-kube-api-access-kxlsh" (OuterVolumeSpecName: "kube-api-access-kxlsh") pod "e1f1e017-546a-4f0b-965e-bd050ad48e44" (UID: "e1f1e017-546a-4f0b-965e-bd050ad48e44"). InnerVolumeSpecName "kube-api-access-kxlsh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:03:39 crc kubenswrapper[4838]: I1128 10:03:39.822008 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bf9fc775-df38-4de1-b17a-d093a477938a-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "bf9fc775-df38-4de1-b17a-d093a477938a" (UID: "bf9fc775-df38-4de1-b17a-d093a477938a"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 10:03:39 crc kubenswrapper[4838]: I1128 10:03:39.836748 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e1f1e017-546a-4f0b-965e-bd050ad48e44-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "e1f1e017-546a-4f0b-965e-bd050ad48e44" (UID: "e1f1e017-546a-4f0b-965e-bd050ad48e44"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 10:03:39 crc kubenswrapper[4838]: I1128 10:03:39.859797 4838 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e1f1e017-546a-4f0b-965e-bd050ad48e44-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 10:03:39 crc kubenswrapper[4838]: I1128 10:03:39.859833 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kxlsh\" (UniqueName: \"kubernetes.io/projected/e1f1e017-546a-4f0b-965e-bd050ad48e44-kube-api-access-kxlsh\") on node \"crc\" DevicePath \"\"" Nov 28 10:03:39 crc kubenswrapper[4838]: I1128 10:03:39.859846 4838 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bf9fc775-df38-4de1-b17a-d093a477938a-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 10:03:39 crc kubenswrapper[4838]: I1128 10:03:39.859854 4838 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bf9fc775-df38-4de1-b17a-d093a477938a-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 10:03:39 crc kubenswrapper[4838]: I1128 10:03:39.859863 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rgsp8\" (UniqueName: \"kubernetes.io/projected/bf9fc775-df38-4de1-b17a-d093a477938a-kube-api-access-rgsp8\") on node \"crc\" DevicePath \"\"" Nov 28 10:03:39 crc kubenswrapper[4838]: I1128 10:03:39.859872 4838 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e1f1e017-546a-4f0b-965e-bd050ad48e44-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 10:03:39 crc kubenswrapper[4838]: I1128 10:03:39.859880 4838 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ac5ded1f-10ca-4db6-b3a6-80f30f28cb34-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 10:03:39 crc kubenswrapper[4838]: I1128 10:03:39.859889 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7mw6c\" (UniqueName: \"kubernetes.io/projected/ac5ded1f-10ca-4db6-b3a6-80f30f28cb34-kube-api-access-7mw6c\") on node \"crc\" DevicePath \"\"" Nov 28 10:03:39 crc kubenswrapper[4838]: I1128 10:03:39.871101 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-n5fj2" Nov 28 10:03:39 crc kubenswrapper[4838]: I1128 10:03:39.888048 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-xsjrl" Nov 28 10:03:39 crc kubenswrapper[4838]: I1128 10:03:39.932916 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ac5ded1f-10ca-4db6-b3a6-80f30f28cb34-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "ac5ded1f-10ca-4db6-b3a6-80f30f28cb34" (UID: "ac5ded1f-10ca-4db6-b3a6-80f30f28cb34"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 10:03:39 crc kubenswrapper[4838]: I1128 10:03:39.960644 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/8a281de3-cc12-4dd1-b9be-0ca03a0613ec-marketplace-operator-metrics\") pod \"8a281de3-cc12-4dd1-b9be-0ca03a0613ec\" (UID: \"8a281de3-cc12-4dd1-b9be-0ca03a0613ec\") " Nov 28 10:03:39 crc kubenswrapper[4838]: I1128 10:03:39.960696 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mbpr7\" (UniqueName: \"kubernetes.io/projected/8a281de3-cc12-4dd1-b9be-0ca03a0613ec-kube-api-access-mbpr7\") pod \"8a281de3-cc12-4dd1-b9be-0ca03a0613ec\" (UID: \"8a281de3-cc12-4dd1-b9be-0ca03a0613ec\") " Nov 28 10:03:39 crc kubenswrapper[4838]: I1128 10:03:39.961088 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8a281de3-cc12-4dd1-b9be-0ca03a0613ec-marketplace-trusted-ca\") pod \"8a281de3-cc12-4dd1-b9be-0ca03a0613ec\" (UID: \"8a281de3-cc12-4dd1-b9be-0ca03a0613ec\") " Nov 28 10:03:39 crc kubenswrapper[4838]: I1128 10:03:39.961169 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-t9dlv\" (UniqueName: \"kubernetes.io/projected/d8b6f770-e994-4bf8-92de-7e359cbe75a8-kube-api-access-t9dlv\") pod \"d8b6f770-e994-4bf8-92de-7e359cbe75a8\" (UID: \"d8b6f770-e994-4bf8-92de-7e359cbe75a8\") " Nov 28 10:03:39 crc kubenswrapper[4838]: I1128 10:03:39.961220 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d8b6f770-e994-4bf8-92de-7e359cbe75a8-catalog-content\") pod \"d8b6f770-e994-4bf8-92de-7e359cbe75a8\" (UID: \"d8b6f770-e994-4bf8-92de-7e359cbe75a8\") " Nov 28 10:03:39 crc kubenswrapper[4838]: I1128 10:03:39.961305 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d8b6f770-e994-4bf8-92de-7e359cbe75a8-utilities\") pod \"d8b6f770-e994-4bf8-92de-7e359cbe75a8\" (UID: \"d8b6f770-e994-4bf8-92de-7e359cbe75a8\") " Nov 28 10:03:39 crc kubenswrapper[4838]: I1128 10:03:39.961434 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8a281de3-cc12-4dd1-b9be-0ca03a0613ec-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "8a281de3-cc12-4dd1-b9be-0ca03a0613ec" (UID: "8a281de3-cc12-4dd1-b9be-0ca03a0613ec"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 10:03:39 crc kubenswrapper[4838]: I1128 10:03:39.961591 4838 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8a281de3-cc12-4dd1-b9be-0ca03a0613ec-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 28 10:03:39 crc kubenswrapper[4838]: I1128 10:03:39.961603 4838 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ac5ded1f-10ca-4db6-b3a6-80f30f28cb34-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 10:03:39 crc kubenswrapper[4838]: I1128 10:03:39.962353 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d8b6f770-e994-4bf8-92de-7e359cbe75a8-utilities" (OuterVolumeSpecName: "utilities") pod "d8b6f770-e994-4bf8-92de-7e359cbe75a8" (UID: "d8b6f770-e994-4bf8-92de-7e359cbe75a8"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 10:03:39 crc kubenswrapper[4838]: I1128 10:03:39.963681 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8a281de3-cc12-4dd1-b9be-0ca03a0613ec-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "8a281de3-cc12-4dd1-b9be-0ca03a0613ec" (UID: "8a281de3-cc12-4dd1-b9be-0ca03a0613ec"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:03:39 crc kubenswrapper[4838]: I1128 10:03:39.964467 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d8b6f770-e994-4bf8-92de-7e359cbe75a8-kube-api-access-t9dlv" (OuterVolumeSpecName: "kube-api-access-t9dlv") pod "d8b6f770-e994-4bf8-92de-7e359cbe75a8" (UID: "d8b6f770-e994-4bf8-92de-7e359cbe75a8"). InnerVolumeSpecName "kube-api-access-t9dlv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:03:39 crc kubenswrapper[4838]: I1128 10:03:39.964946 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8a281de3-cc12-4dd1-b9be-0ca03a0613ec-kube-api-access-mbpr7" (OuterVolumeSpecName: "kube-api-access-mbpr7") pod "8a281de3-cc12-4dd1-b9be-0ca03a0613ec" (UID: "8a281de3-cc12-4dd1-b9be-0ca03a0613ec"). InnerVolumeSpecName "kube-api-access-mbpr7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:03:40 crc kubenswrapper[4838]: I1128 10:03:40.008531 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-6mmw4"] Nov 28 10:03:40 crc kubenswrapper[4838]: I1128 10:03:40.012532 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-kzkzk"] Nov 28 10:03:40 crc kubenswrapper[4838]: W1128 10:03:40.028504 4838 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf55a8232_4e90_405d_846a_d68ac28dd002.slice/crio-865eaafa947af95be70bc15b62a0a9da7e1b8a6961fc27d9a2a2569fdc900dfc WatchSource:0}: Error finding container 865eaafa947af95be70bc15b62a0a9da7e1b8a6961fc27d9a2a2569fdc900dfc: Status 404 returned error can't find the container with id 865eaafa947af95be70bc15b62a0a9da7e1b8a6961fc27d9a2a2569fdc900dfc Nov 28 10:03:40 crc kubenswrapper[4838]: I1128 10:03:40.063024 4838 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/8a281de3-cc12-4dd1-b9be-0ca03a0613ec-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Nov 28 10:03:40 crc kubenswrapper[4838]: I1128 10:03:40.063057 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mbpr7\" (UniqueName: \"kubernetes.io/projected/8a281de3-cc12-4dd1-b9be-0ca03a0613ec-kube-api-access-mbpr7\") on node \"crc\" DevicePath \"\"" Nov 28 10:03:40 crc kubenswrapper[4838]: I1128 10:03:40.063070 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-t9dlv\" (UniqueName: \"kubernetes.io/projected/d8b6f770-e994-4bf8-92de-7e359cbe75a8-kube-api-access-t9dlv\") on node \"crc\" DevicePath \"\"" Nov 28 10:03:40 crc kubenswrapper[4838]: I1128 10:03:40.063083 4838 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d8b6f770-e994-4bf8-92de-7e359cbe75a8-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 10:03:40 crc kubenswrapper[4838]: I1128 10:03:40.091114 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d8b6f770-e994-4bf8-92de-7e359cbe75a8-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "d8b6f770-e994-4bf8-92de-7e359cbe75a8" (UID: "d8b6f770-e994-4bf8-92de-7e359cbe75a8"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 10:03:40 crc kubenswrapper[4838]: I1128 10:03:40.164757 4838 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d8b6f770-e994-4bf8-92de-7e359cbe75a8-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 10:03:40 crc kubenswrapper[4838]: I1128 10:03:40.246036 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6gkhp" event={"ID":"0a0eea75-7b92-4cde-bb7d-5ee4cb46ddd0","Type":"ContainerDied","Data":"4ca4591549ce38fb1e07a464118ff15d26bb366ff930a6274bfecb026e7e2bc5"} Nov 28 10:03:40 crc kubenswrapper[4838]: I1128 10:03:40.246087 4838 scope.go:117] "RemoveContainer" containerID="c7f193b4ec4568793495fffecdd0e3503720c269ac15d45d3716d5b6853b6d50" Nov 28 10:03:40 crc kubenswrapper[4838]: I1128 10:03:40.246132 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-6gkhp" Nov 28 10:03:40 crc kubenswrapper[4838]: I1128 10:03:40.248772 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-mcgzz" Nov 28 10:03:40 crc kubenswrapper[4838]: I1128 10:03:40.248768 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mcgzz" event={"ID":"ac5ded1f-10ca-4db6-b3a6-80f30f28cb34","Type":"ContainerDied","Data":"4816a76d064a60017fefb4b0c4485525b3504b57e3381c3881b2b23c5308ff60"} Nov 28 10:03:40 crc kubenswrapper[4838]: I1128 10:03:40.250202 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-xsjrl" event={"ID":"8a281de3-cc12-4dd1-b9be-0ca03a0613ec","Type":"ContainerDied","Data":"e751d1b872dd99bf093972c4be19f6624fdafa3eddd0cc7b4bb4b247dcad03fe"} Nov 28 10:03:40 crc kubenswrapper[4838]: I1128 10:03:40.250309 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-xsjrl" Nov 28 10:03:40 crc kubenswrapper[4838]: I1128 10:03:40.252754 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vmqxd" event={"ID":"bf9fc775-df38-4de1-b17a-d093a477938a","Type":"ContainerDied","Data":"5a4a520c7f7c3e0bcb780656a962e2d4d97466f3b309ebc7b9dbd9cf91ce80f9"} Nov 28 10:03:40 crc kubenswrapper[4838]: I1128 10:03:40.252845 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-vmqxd" Nov 28 10:03:40 crc kubenswrapper[4838]: I1128 10:03:40.268221 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-n5fj2" event={"ID":"d8b6f770-e994-4bf8-92de-7e359cbe75a8","Type":"ContainerDied","Data":"78417b97ae79fca8e27906fe79e31a84bfc4b0e15126842c884c7514e94c87fc"} Nov 28 10:03:40 crc kubenswrapper[4838]: I1128 10:03:40.268327 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-n5fj2" Nov 28 10:03:40 crc kubenswrapper[4838]: I1128 10:03:40.276294 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-sdvg7" event={"ID":"e1f1e017-546a-4f0b-965e-bd050ad48e44","Type":"ContainerDied","Data":"2d27f2b6be67f789c4d44de8532764dd99dbebc82d3e4a72a69a947a62caffad"} Nov 28 10:03:40 crc kubenswrapper[4838]: I1128 10:03:40.276311 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-sdvg7" Nov 28 10:03:40 crc kubenswrapper[4838]: I1128 10:03:40.279032 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-kzkzk" event={"ID":"f55a8232-4e90-405d-846a-d68ac28dd002","Type":"ContainerStarted","Data":"307c6eab7f89381c511b3fa6342ed45e224ea843e46d0270ec1bdac69d598cf6"} Nov 28 10:03:40 crc kubenswrapper[4838]: I1128 10:03:40.279074 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-kzkzk" event={"ID":"f55a8232-4e90-405d-846a-d68ac28dd002","Type":"ContainerStarted","Data":"865eaafa947af95be70bc15b62a0a9da7e1b8a6961fc27d9a2a2569fdc900dfc"} Nov 28 10:03:40 crc kubenswrapper[4838]: I1128 10:03:40.279365 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-66df7c8f76-kzkzk" Nov 28 10:03:40 crc kubenswrapper[4838]: I1128 10:03:40.283382 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-6mmw4" event={"ID":"b0c9680e-7b0a-47a9-87dc-4da8cfbfce77","Type":"ContainerStarted","Data":"011938b5e1171fc182513c99f5398b0fbaea2a945227a96bd6107c4e31a93e0e"} Nov 28 10:03:40 crc kubenswrapper[4838]: I1128 10:03:40.283430 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-6mmw4" event={"ID":"b0c9680e-7b0a-47a9-87dc-4da8cfbfce77","Type":"ContainerStarted","Data":"1364edb19cdc2e43b6037ce4efd0d9b185bcbfabf0a0678c8040778e59441d02"} Nov 28 10:03:40 crc kubenswrapper[4838]: I1128 10:03:40.289681 4838 scope.go:117] "RemoveContainer" containerID="0c8cdc81d32988e542c95a81369f654dcc3d55dce34a8d6da5f27324fd237065" Nov 28 10:03:40 crc kubenswrapper[4838]: I1128 10:03:40.303889 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-66df7c8f76-kzkzk" podStartSLOduration=2.303868478 podStartE2EDuration="2.303868478s" podCreationTimestamp="2025-11-28 10:03:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 10:03:40.301094743 +0000 UTC m=+392.000068933" watchObservedRunningTime="2025-11-28 10:03:40.303868478 +0000 UTC m=+392.002842648" Nov 28 10:03:40 crc kubenswrapper[4838]: I1128 10:03:40.313903 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-vmqxd"] Nov 28 10:03:40 crc kubenswrapper[4838]: I1128 10:03:40.316846 4838 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-vmqxd"] Nov 28 10:03:40 crc kubenswrapper[4838]: I1128 10:03:40.324164 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-xsjrl"] Nov 28 10:03:40 crc kubenswrapper[4838]: I1128 10:03:40.328684 4838 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-xsjrl"] Nov 28 10:03:40 crc kubenswrapper[4838]: I1128 10:03:40.329335 4838 scope.go:117] "RemoveContainer" containerID="dfdb37a7ca77c79d0676f1cad327232e1a104e83ada4ee6a9ebcb5e583376643" Nov 28 10:03:40 crc kubenswrapper[4838]: I1128 10:03:40.335351 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-6mmw4" podStartSLOduration=2.335331663 podStartE2EDuration="2.335331663s" podCreationTimestamp="2025-11-28 10:03:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 10:03:40.335138208 +0000 UTC m=+392.034112398" watchObservedRunningTime="2025-11-28 10:03:40.335331663 +0000 UTC m=+392.034305853" Nov 28 10:03:40 crc kubenswrapper[4838]: I1128 10:03:40.353652 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-n5fj2"] Nov 28 10:03:40 crc kubenswrapper[4838]: I1128 10:03:40.358869 4838 scope.go:117] "RemoveContainer" containerID="0c552617179b3cea76f22cfdaf8d852af57ced4aabea154b74088bf2fe6615b0" Nov 28 10:03:40 crc kubenswrapper[4838]: I1128 10:03:40.362485 4838 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-n5fj2"] Nov 28 10:03:40 crc kubenswrapper[4838]: I1128 10:03:40.380124 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-mcgzz"] Nov 28 10:03:40 crc kubenswrapper[4838]: I1128 10:03:40.390010 4838 scope.go:117] "RemoveContainer" containerID="03469c72ac0991ae157861743220c2f5dc230717fd21348bba20c6d870f9c815" Nov 28 10:03:40 crc kubenswrapper[4838]: I1128 10:03:40.394986 4838 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-mcgzz"] Nov 28 10:03:40 crc kubenswrapper[4838]: I1128 10:03:40.409709 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-6gkhp"] Nov 28 10:03:40 crc kubenswrapper[4838]: I1128 10:03:40.413899 4838 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-6gkhp"] Nov 28 10:03:40 crc kubenswrapper[4838]: I1128 10:03:40.418919 4838 scope.go:117] "RemoveContainer" containerID="1b656d0ecbb8cacf14c3701393397027a6e79c82dee8cbab0309b1ed5c34654f" Nov 28 10:03:40 crc kubenswrapper[4838]: I1128 10:03:40.418968 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-sdvg7"] Nov 28 10:03:40 crc kubenswrapper[4838]: I1128 10:03:40.422460 4838 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-sdvg7"] Nov 28 10:03:40 crc kubenswrapper[4838]: I1128 10:03:40.436628 4838 scope.go:117] "RemoveContainer" containerID="6e26a372f47f409cbd328fe79d285e7b58ca7123f6fff591bcefa8d90affd22a" Nov 28 10:03:40 crc kubenswrapper[4838]: I1128 10:03:40.455513 4838 scope.go:117] "RemoveContainer" containerID="f008e458bc1dca0a2781deea356d38d890938c45045c1466a681fff535226ead" Nov 28 10:03:40 crc kubenswrapper[4838]: I1128 10:03:40.472568 4838 scope.go:117] "RemoveContainer" containerID="ac2cf7eb990edffbc429d7b339379188834ad1467899ad9966432ce3b01c9471" Nov 28 10:03:40 crc kubenswrapper[4838]: I1128 10:03:40.487939 4838 scope.go:117] "RemoveContainer" containerID="ebc74bd129c2ec0bc04d0f54fe512529c50b565a4b308f18d62e36aa273af4dc" Nov 28 10:03:40 crc kubenswrapper[4838]: I1128 10:03:40.505964 4838 scope.go:117] "RemoveContainer" containerID="5e6b8d3df090431f382ebb130cd462a38463f6742c3215993268d50c4fe52e50" Nov 28 10:03:40 crc kubenswrapper[4838]: I1128 10:03:40.518140 4838 scope.go:117] "RemoveContainer" containerID="2a444bedf0b76e2433f75ae44b5203b000e460cdc1fbd076fe1009a86151e3a4" Nov 28 10:03:40 crc kubenswrapper[4838]: I1128 10:03:40.539326 4838 scope.go:117] "RemoveContainer" containerID="fa9315aadc1944769c86803802d324f970426c4b51cf3cb3407b157a61bb94ca" Nov 28 10:03:40 crc kubenswrapper[4838]: I1128 10:03:40.560045 4838 scope.go:117] "RemoveContainer" containerID="9130cd5998cc69dcf460ea6a726b4f0a2dbe0ba992f14901e0bec996820573e9" Nov 28 10:03:40 crc kubenswrapper[4838]: I1128 10:03:40.571961 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0a0eea75-7b92-4cde-bb7d-5ee4cb46ddd0" path="/var/lib/kubelet/pods/0a0eea75-7b92-4cde-bb7d-5ee4cb46ddd0/volumes" Nov 28 10:03:40 crc kubenswrapper[4838]: I1128 10:03:40.572572 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8a281de3-cc12-4dd1-b9be-0ca03a0613ec" path="/var/lib/kubelet/pods/8a281de3-cc12-4dd1-b9be-0ca03a0613ec/volumes" Nov 28 10:03:40 crc kubenswrapper[4838]: I1128 10:03:40.573038 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ac5ded1f-10ca-4db6-b3a6-80f30f28cb34" path="/var/lib/kubelet/pods/ac5ded1f-10ca-4db6-b3a6-80f30f28cb34/volumes" Nov 28 10:03:40 crc kubenswrapper[4838]: I1128 10:03:40.574100 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bf9fc775-df38-4de1-b17a-d093a477938a" path="/var/lib/kubelet/pods/bf9fc775-df38-4de1-b17a-d093a477938a/volumes" Nov 28 10:03:40 crc kubenswrapper[4838]: I1128 10:03:40.575007 4838 scope.go:117] "RemoveContainer" containerID="9aef0bb53412b66d5aaff4ca64cd815054b8c645bca80ad62eb81ee6052c9736" Nov 28 10:03:40 crc kubenswrapper[4838]: I1128 10:03:40.576127 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d8b6f770-e994-4bf8-92de-7e359cbe75a8" path="/var/lib/kubelet/pods/d8b6f770-e994-4bf8-92de-7e359cbe75a8/volumes" Nov 28 10:03:40 crc kubenswrapper[4838]: I1128 10:03:40.576705 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e1f1e017-546a-4f0b-965e-bd050ad48e44" path="/var/lib/kubelet/pods/e1f1e017-546a-4f0b-965e-bd050ad48e44/volumes" Nov 28 10:03:40 crc kubenswrapper[4838]: I1128 10:03:40.589972 4838 scope.go:117] "RemoveContainer" containerID="d775e52040ce98e7e6734093f7423db819e3bec331e24658ecf5056e3df9bb7f" Nov 28 10:03:41 crc kubenswrapper[4838]: I1128 10:03:41.302438 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-6mmw4" Nov 28 10:03:41 crc kubenswrapper[4838]: I1128 10:03:41.306980 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-6mmw4" Nov 28 10:03:41 crc kubenswrapper[4838]: I1128 10:03:41.390266 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-trs27"] Nov 28 10:03:41 crc kubenswrapper[4838]: E1128 10:03:41.390544 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e1f1e017-546a-4f0b-965e-bd050ad48e44" containerName="extract-utilities" Nov 28 10:03:41 crc kubenswrapper[4838]: I1128 10:03:41.390559 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="e1f1e017-546a-4f0b-965e-bd050ad48e44" containerName="extract-utilities" Nov 28 10:03:41 crc kubenswrapper[4838]: E1128 10:03:41.390572 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0a0eea75-7b92-4cde-bb7d-5ee4cb46ddd0" containerName="extract-utilities" Nov 28 10:03:41 crc kubenswrapper[4838]: I1128 10:03:41.390581 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="0a0eea75-7b92-4cde-bb7d-5ee4cb46ddd0" containerName="extract-utilities" Nov 28 10:03:41 crc kubenswrapper[4838]: E1128 10:03:41.390595 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bf9fc775-df38-4de1-b17a-d093a477938a" containerName="extract-utilities" Nov 28 10:03:41 crc kubenswrapper[4838]: I1128 10:03:41.390603 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="bf9fc775-df38-4de1-b17a-d093a477938a" containerName="extract-utilities" Nov 28 10:03:41 crc kubenswrapper[4838]: E1128 10:03:41.390613 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0a0eea75-7b92-4cde-bb7d-5ee4cb46ddd0" containerName="extract-content" Nov 28 10:03:41 crc kubenswrapper[4838]: I1128 10:03:41.390620 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="0a0eea75-7b92-4cde-bb7d-5ee4cb46ddd0" containerName="extract-content" Nov 28 10:03:41 crc kubenswrapper[4838]: E1128 10:03:41.390631 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d8b6f770-e994-4bf8-92de-7e359cbe75a8" containerName="extract-content" Nov 28 10:03:41 crc kubenswrapper[4838]: I1128 10:03:41.390638 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="d8b6f770-e994-4bf8-92de-7e359cbe75a8" containerName="extract-content" Nov 28 10:03:41 crc kubenswrapper[4838]: E1128 10:03:41.390653 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bf9fc775-df38-4de1-b17a-d093a477938a" containerName="registry-server" Nov 28 10:03:41 crc kubenswrapper[4838]: I1128 10:03:41.390662 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="bf9fc775-df38-4de1-b17a-d093a477938a" containerName="registry-server" Nov 28 10:03:41 crc kubenswrapper[4838]: E1128 10:03:41.390673 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e1f1e017-546a-4f0b-965e-bd050ad48e44" containerName="registry-server" Nov 28 10:03:41 crc kubenswrapper[4838]: I1128 10:03:41.390682 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="e1f1e017-546a-4f0b-965e-bd050ad48e44" containerName="registry-server" Nov 28 10:03:41 crc kubenswrapper[4838]: E1128 10:03:41.390691 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ac5ded1f-10ca-4db6-b3a6-80f30f28cb34" containerName="extract-utilities" Nov 28 10:03:41 crc kubenswrapper[4838]: I1128 10:03:41.390698 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="ac5ded1f-10ca-4db6-b3a6-80f30f28cb34" containerName="extract-utilities" Nov 28 10:03:41 crc kubenswrapper[4838]: E1128 10:03:41.390707 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bf9fc775-df38-4de1-b17a-d093a477938a" containerName="extract-content" Nov 28 10:03:41 crc kubenswrapper[4838]: I1128 10:03:41.390764 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="bf9fc775-df38-4de1-b17a-d093a477938a" containerName="extract-content" Nov 28 10:03:41 crc kubenswrapper[4838]: E1128 10:03:41.390779 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ac5ded1f-10ca-4db6-b3a6-80f30f28cb34" containerName="registry-server" Nov 28 10:03:41 crc kubenswrapper[4838]: I1128 10:03:41.390786 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="ac5ded1f-10ca-4db6-b3a6-80f30f28cb34" containerName="registry-server" Nov 28 10:03:41 crc kubenswrapper[4838]: E1128 10:03:41.390797 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8a281de3-cc12-4dd1-b9be-0ca03a0613ec" containerName="marketplace-operator" Nov 28 10:03:41 crc kubenswrapper[4838]: I1128 10:03:41.390804 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="8a281de3-cc12-4dd1-b9be-0ca03a0613ec" containerName="marketplace-operator" Nov 28 10:03:41 crc kubenswrapper[4838]: E1128 10:03:41.390814 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ac5ded1f-10ca-4db6-b3a6-80f30f28cb34" containerName="extract-content" Nov 28 10:03:41 crc kubenswrapper[4838]: I1128 10:03:41.390823 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="ac5ded1f-10ca-4db6-b3a6-80f30f28cb34" containerName="extract-content" Nov 28 10:03:41 crc kubenswrapper[4838]: E1128 10:03:41.390835 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0a0eea75-7b92-4cde-bb7d-5ee4cb46ddd0" containerName="registry-server" Nov 28 10:03:41 crc kubenswrapper[4838]: I1128 10:03:41.390843 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="0a0eea75-7b92-4cde-bb7d-5ee4cb46ddd0" containerName="registry-server" Nov 28 10:03:41 crc kubenswrapper[4838]: E1128 10:03:41.390853 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e1f1e017-546a-4f0b-965e-bd050ad48e44" containerName="extract-content" Nov 28 10:03:41 crc kubenswrapper[4838]: I1128 10:03:41.390862 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="e1f1e017-546a-4f0b-965e-bd050ad48e44" containerName="extract-content" Nov 28 10:03:41 crc kubenswrapper[4838]: E1128 10:03:41.390872 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d8b6f770-e994-4bf8-92de-7e359cbe75a8" containerName="extract-utilities" Nov 28 10:03:41 crc kubenswrapper[4838]: I1128 10:03:41.390879 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="d8b6f770-e994-4bf8-92de-7e359cbe75a8" containerName="extract-utilities" Nov 28 10:03:41 crc kubenswrapper[4838]: E1128 10:03:41.390889 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d8b6f770-e994-4bf8-92de-7e359cbe75a8" containerName="registry-server" Nov 28 10:03:41 crc kubenswrapper[4838]: I1128 10:03:41.390896 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="d8b6f770-e994-4bf8-92de-7e359cbe75a8" containerName="registry-server" Nov 28 10:03:41 crc kubenswrapper[4838]: I1128 10:03:41.391019 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="ac5ded1f-10ca-4db6-b3a6-80f30f28cb34" containerName="registry-server" Nov 28 10:03:41 crc kubenswrapper[4838]: I1128 10:03:41.391033 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="bf9fc775-df38-4de1-b17a-d093a477938a" containerName="registry-server" Nov 28 10:03:41 crc kubenswrapper[4838]: I1128 10:03:41.391047 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="d8b6f770-e994-4bf8-92de-7e359cbe75a8" containerName="registry-server" Nov 28 10:03:41 crc kubenswrapper[4838]: I1128 10:03:41.391056 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="0a0eea75-7b92-4cde-bb7d-5ee4cb46ddd0" containerName="registry-server" Nov 28 10:03:41 crc kubenswrapper[4838]: I1128 10:03:41.391065 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="e1f1e017-546a-4f0b-965e-bd050ad48e44" containerName="registry-server" Nov 28 10:03:41 crc kubenswrapper[4838]: I1128 10:03:41.391074 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="8a281de3-cc12-4dd1-b9be-0ca03a0613ec" containerName="marketplace-operator" Nov 28 10:03:41 crc kubenswrapper[4838]: I1128 10:03:41.392062 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-trs27" Nov 28 10:03:41 crc kubenswrapper[4838]: I1128 10:03:41.397665 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-trs27"] Nov 28 10:03:41 crc kubenswrapper[4838]: I1128 10:03:41.400440 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Nov 28 10:03:41 crc kubenswrapper[4838]: I1128 10:03:41.587918 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1b0a5899-5f8c-42e9-b686-cbc3ae3b33c8-utilities\") pod \"redhat-marketplace-trs27\" (UID: \"1b0a5899-5f8c-42e9-b686-cbc3ae3b33c8\") " pod="openshift-marketplace/redhat-marketplace-trs27" Nov 28 10:03:41 crc kubenswrapper[4838]: I1128 10:03:41.588032 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-45zq5\" (UniqueName: \"kubernetes.io/projected/1b0a5899-5f8c-42e9-b686-cbc3ae3b33c8-kube-api-access-45zq5\") pod \"redhat-marketplace-trs27\" (UID: \"1b0a5899-5f8c-42e9-b686-cbc3ae3b33c8\") " pod="openshift-marketplace/redhat-marketplace-trs27" Nov 28 10:03:41 crc kubenswrapper[4838]: I1128 10:03:41.588088 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1b0a5899-5f8c-42e9-b686-cbc3ae3b33c8-catalog-content\") pod \"redhat-marketplace-trs27\" (UID: \"1b0a5899-5f8c-42e9-b686-cbc3ae3b33c8\") " pod="openshift-marketplace/redhat-marketplace-trs27" Nov 28 10:03:41 crc kubenswrapper[4838]: I1128 10:03:41.689235 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1b0a5899-5f8c-42e9-b686-cbc3ae3b33c8-utilities\") pod \"redhat-marketplace-trs27\" (UID: \"1b0a5899-5f8c-42e9-b686-cbc3ae3b33c8\") " pod="openshift-marketplace/redhat-marketplace-trs27" Nov 28 10:03:41 crc kubenswrapper[4838]: I1128 10:03:41.689387 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-45zq5\" (UniqueName: \"kubernetes.io/projected/1b0a5899-5f8c-42e9-b686-cbc3ae3b33c8-kube-api-access-45zq5\") pod \"redhat-marketplace-trs27\" (UID: \"1b0a5899-5f8c-42e9-b686-cbc3ae3b33c8\") " pod="openshift-marketplace/redhat-marketplace-trs27" Nov 28 10:03:41 crc kubenswrapper[4838]: I1128 10:03:41.689494 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1b0a5899-5f8c-42e9-b686-cbc3ae3b33c8-catalog-content\") pod \"redhat-marketplace-trs27\" (UID: \"1b0a5899-5f8c-42e9-b686-cbc3ae3b33c8\") " pod="openshift-marketplace/redhat-marketplace-trs27" Nov 28 10:03:41 crc kubenswrapper[4838]: I1128 10:03:41.691069 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1b0a5899-5f8c-42e9-b686-cbc3ae3b33c8-utilities\") pod \"redhat-marketplace-trs27\" (UID: \"1b0a5899-5f8c-42e9-b686-cbc3ae3b33c8\") " pod="openshift-marketplace/redhat-marketplace-trs27" Nov 28 10:03:41 crc kubenswrapper[4838]: I1128 10:03:41.693250 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1b0a5899-5f8c-42e9-b686-cbc3ae3b33c8-catalog-content\") pod \"redhat-marketplace-trs27\" (UID: \"1b0a5899-5f8c-42e9-b686-cbc3ae3b33c8\") " pod="openshift-marketplace/redhat-marketplace-trs27" Nov 28 10:03:41 crc kubenswrapper[4838]: I1128 10:03:41.716729 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-45zq5\" (UniqueName: \"kubernetes.io/projected/1b0a5899-5f8c-42e9-b686-cbc3ae3b33c8-kube-api-access-45zq5\") pod \"redhat-marketplace-trs27\" (UID: \"1b0a5899-5f8c-42e9-b686-cbc3ae3b33c8\") " pod="openshift-marketplace/redhat-marketplace-trs27" Nov 28 10:03:41 crc kubenswrapper[4838]: I1128 10:03:41.717705 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-trs27" Nov 28 10:03:42 crc kubenswrapper[4838]: I1128 10:03:42.137077 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-trs27"] Nov 28 10:03:42 crc kubenswrapper[4838]: W1128 10:03:42.149604 4838 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1b0a5899_5f8c_42e9_b686_cbc3ae3b33c8.slice/crio-bda316ff81bc115f36da6077df521fc983bf76802b08a34fe8fe5aa0de453822 WatchSource:0}: Error finding container bda316ff81bc115f36da6077df521fc983bf76802b08a34fe8fe5aa0de453822: Status 404 returned error can't find the container with id bda316ff81bc115f36da6077df521fc983bf76802b08a34fe8fe5aa0de453822 Nov 28 10:03:42 crc kubenswrapper[4838]: I1128 10:03:42.310697 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-trs27" event={"ID":"1b0a5899-5f8c-42e9-b686-cbc3ae3b33c8","Type":"ContainerStarted","Data":"16750e82fe7381d27d7b57357b9eb9a0da4dd25e89a6d6401edd9565c46dd50c"} Nov 28 10:03:42 crc kubenswrapper[4838]: I1128 10:03:42.311784 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-trs27" event={"ID":"1b0a5899-5f8c-42e9-b686-cbc3ae3b33c8","Type":"ContainerStarted","Data":"bda316ff81bc115f36da6077df521fc983bf76802b08a34fe8fe5aa0de453822"} Nov 28 10:03:42 crc kubenswrapper[4838]: I1128 10:03:42.374178 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-cmcj8"] Nov 28 10:03:42 crc kubenswrapper[4838]: I1128 10:03:42.376757 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-cmcj8" Nov 28 10:03:42 crc kubenswrapper[4838]: I1128 10:03:42.382034 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Nov 28 10:03:42 crc kubenswrapper[4838]: I1128 10:03:42.398092 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-cmcj8"] Nov 28 10:03:42 crc kubenswrapper[4838]: I1128 10:03:42.505543 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dwwp4\" (UniqueName: \"kubernetes.io/projected/01087280-c77b-4764-91ba-468b21f32427-kube-api-access-dwwp4\") pod \"redhat-operators-cmcj8\" (UID: \"01087280-c77b-4764-91ba-468b21f32427\") " pod="openshift-marketplace/redhat-operators-cmcj8" Nov 28 10:03:42 crc kubenswrapper[4838]: I1128 10:03:42.505642 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/01087280-c77b-4764-91ba-468b21f32427-catalog-content\") pod \"redhat-operators-cmcj8\" (UID: \"01087280-c77b-4764-91ba-468b21f32427\") " pod="openshift-marketplace/redhat-operators-cmcj8" Nov 28 10:03:42 crc kubenswrapper[4838]: I1128 10:03:42.505676 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/01087280-c77b-4764-91ba-468b21f32427-utilities\") pod \"redhat-operators-cmcj8\" (UID: \"01087280-c77b-4764-91ba-468b21f32427\") " pod="openshift-marketplace/redhat-operators-cmcj8" Nov 28 10:03:42 crc kubenswrapper[4838]: I1128 10:03:42.606547 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dwwp4\" (UniqueName: \"kubernetes.io/projected/01087280-c77b-4764-91ba-468b21f32427-kube-api-access-dwwp4\") pod \"redhat-operators-cmcj8\" (UID: \"01087280-c77b-4764-91ba-468b21f32427\") " pod="openshift-marketplace/redhat-operators-cmcj8" Nov 28 10:03:42 crc kubenswrapper[4838]: I1128 10:03:42.607077 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/01087280-c77b-4764-91ba-468b21f32427-catalog-content\") pod \"redhat-operators-cmcj8\" (UID: \"01087280-c77b-4764-91ba-468b21f32427\") " pod="openshift-marketplace/redhat-operators-cmcj8" Nov 28 10:03:42 crc kubenswrapper[4838]: I1128 10:03:42.607149 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/01087280-c77b-4764-91ba-468b21f32427-utilities\") pod \"redhat-operators-cmcj8\" (UID: \"01087280-c77b-4764-91ba-468b21f32427\") " pod="openshift-marketplace/redhat-operators-cmcj8" Nov 28 10:03:42 crc kubenswrapper[4838]: I1128 10:03:42.607430 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/01087280-c77b-4764-91ba-468b21f32427-catalog-content\") pod \"redhat-operators-cmcj8\" (UID: \"01087280-c77b-4764-91ba-468b21f32427\") " pod="openshift-marketplace/redhat-operators-cmcj8" Nov 28 10:03:42 crc kubenswrapper[4838]: I1128 10:03:42.608588 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/01087280-c77b-4764-91ba-468b21f32427-utilities\") pod \"redhat-operators-cmcj8\" (UID: \"01087280-c77b-4764-91ba-468b21f32427\") " pod="openshift-marketplace/redhat-operators-cmcj8" Nov 28 10:03:42 crc kubenswrapper[4838]: I1128 10:03:42.623638 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dwwp4\" (UniqueName: \"kubernetes.io/projected/01087280-c77b-4764-91ba-468b21f32427-kube-api-access-dwwp4\") pod \"redhat-operators-cmcj8\" (UID: \"01087280-c77b-4764-91ba-468b21f32427\") " pod="openshift-marketplace/redhat-operators-cmcj8" Nov 28 10:03:42 crc kubenswrapper[4838]: I1128 10:03:42.700185 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-cmcj8" Nov 28 10:03:43 crc kubenswrapper[4838]: I1128 10:03:43.135524 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-cmcj8"] Nov 28 10:03:43 crc kubenswrapper[4838]: W1128 10:03:43.150005 4838 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod01087280_c77b_4764_91ba_468b21f32427.slice/crio-140554b1257c61c46fe21598f21cb05962ad582a8a3193fca75818e737641f77 WatchSource:0}: Error finding container 140554b1257c61c46fe21598f21cb05962ad582a8a3193fca75818e737641f77: Status 404 returned error can't find the container with id 140554b1257c61c46fe21598f21cb05962ad582a8a3193fca75818e737641f77 Nov 28 10:03:43 crc kubenswrapper[4838]: I1128 10:03:43.316985 4838 generic.go:334] "Generic (PLEG): container finished" podID="1b0a5899-5f8c-42e9-b686-cbc3ae3b33c8" containerID="16750e82fe7381d27d7b57357b9eb9a0da4dd25e89a6d6401edd9565c46dd50c" exitCode=0 Nov 28 10:03:43 crc kubenswrapper[4838]: I1128 10:03:43.317085 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-trs27" event={"ID":"1b0a5899-5f8c-42e9-b686-cbc3ae3b33c8","Type":"ContainerDied","Data":"16750e82fe7381d27d7b57357b9eb9a0da4dd25e89a6d6401edd9565c46dd50c"} Nov 28 10:03:43 crc kubenswrapper[4838]: I1128 10:03:43.320018 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-cmcj8" event={"ID":"01087280-c77b-4764-91ba-468b21f32427","Type":"ContainerStarted","Data":"6174678dbe67516ef52f0b88269052f6782a8fd318187251060955b14cfdf714"} Nov 28 10:03:43 crc kubenswrapper[4838]: I1128 10:03:43.320073 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-cmcj8" event={"ID":"01087280-c77b-4764-91ba-468b21f32427","Type":"ContainerStarted","Data":"140554b1257c61c46fe21598f21cb05962ad582a8a3193fca75818e737641f77"} Nov 28 10:03:43 crc kubenswrapper[4838]: I1128 10:03:43.783480 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-n4cg9"] Nov 28 10:03:43 crc kubenswrapper[4838]: I1128 10:03:43.785097 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-n4cg9" Nov 28 10:03:43 crc kubenswrapper[4838]: I1128 10:03:43.786856 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-n4cg9"] Nov 28 10:03:43 crc kubenswrapper[4838]: I1128 10:03:43.787946 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Nov 28 10:03:43 crc kubenswrapper[4838]: I1128 10:03:43.925358 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9afd2484-54f9-4dd9-b081-6537c075864f-catalog-content\") pod \"community-operators-n4cg9\" (UID: \"9afd2484-54f9-4dd9-b081-6537c075864f\") " pod="openshift-marketplace/community-operators-n4cg9" Nov 28 10:03:43 crc kubenswrapper[4838]: I1128 10:03:43.925515 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9afd2484-54f9-4dd9-b081-6537c075864f-utilities\") pod \"community-operators-n4cg9\" (UID: \"9afd2484-54f9-4dd9-b081-6537c075864f\") " pod="openshift-marketplace/community-operators-n4cg9" Nov 28 10:03:43 crc kubenswrapper[4838]: I1128 10:03:43.925599 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cpj8h\" (UniqueName: \"kubernetes.io/projected/9afd2484-54f9-4dd9-b081-6537c075864f-kube-api-access-cpj8h\") pod \"community-operators-n4cg9\" (UID: \"9afd2484-54f9-4dd9-b081-6537c075864f\") " pod="openshift-marketplace/community-operators-n4cg9" Nov 28 10:03:44 crc kubenswrapper[4838]: I1128 10:03:44.026631 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9afd2484-54f9-4dd9-b081-6537c075864f-utilities\") pod \"community-operators-n4cg9\" (UID: \"9afd2484-54f9-4dd9-b081-6537c075864f\") " pod="openshift-marketplace/community-operators-n4cg9" Nov 28 10:03:44 crc kubenswrapper[4838]: I1128 10:03:44.026705 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cpj8h\" (UniqueName: \"kubernetes.io/projected/9afd2484-54f9-4dd9-b081-6537c075864f-kube-api-access-cpj8h\") pod \"community-operators-n4cg9\" (UID: \"9afd2484-54f9-4dd9-b081-6537c075864f\") " pod="openshift-marketplace/community-operators-n4cg9" Nov 28 10:03:44 crc kubenswrapper[4838]: I1128 10:03:44.026778 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9afd2484-54f9-4dd9-b081-6537c075864f-catalog-content\") pod \"community-operators-n4cg9\" (UID: \"9afd2484-54f9-4dd9-b081-6537c075864f\") " pod="openshift-marketplace/community-operators-n4cg9" Nov 28 10:03:44 crc kubenswrapper[4838]: I1128 10:03:44.027361 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9afd2484-54f9-4dd9-b081-6537c075864f-catalog-content\") pod \"community-operators-n4cg9\" (UID: \"9afd2484-54f9-4dd9-b081-6537c075864f\") " pod="openshift-marketplace/community-operators-n4cg9" Nov 28 10:03:44 crc kubenswrapper[4838]: I1128 10:03:44.027483 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9afd2484-54f9-4dd9-b081-6537c075864f-utilities\") pod \"community-operators-n4cg9\" (UID: \"9afd2484-54f9-4dd9-b081-6537c075864f\") " pod="openshift-marketplace/community-operators-n4cg9" Nov 28 10:03:44 crc kubenswrapper[4838]: I1128 10:03:44.055791 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cpj8h\" (UniqueName: \"kubernetes.io/projected/9afd2484-54f9-4dd9-b081-6537c075864f-kube-api-access-cpj8h\") pod \"community-operators-n4cg9\" (UID: \"9afd2484-54f9-4dd9-b081-6537c075864f\") " pod="openshift-marketplace/community-operators-n4cg9" Nov 28 10:03:44 crc kubenswrapper[4838]: I1128 10:03:44.111835 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-n4cg9" Nov 28 10:03:44 crc kubenswrapper[4838]: I1128 10:03:44.326239 4838 generic.go:334] "Generic (PLEG): container finished" podID="1b0a5899-5f8c-42e9-b686-cbc3ae3b33c8" containerID="676c0af6e9b320f459fc783379783a609444836c69516e97383f9bb62bbb929e" exitCode=0 Nov 28 10:03:44 crc kubenswrapper[4838]: I1128 10:03:44.326292 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-trs27" event={"ID":"1b0a5899-5f8c-42e9-b686-cbc3ae3b33c8","Type":"ContainerDied","Data":"676c0af6e9b320f459fc783379783a609444836c69516e97383f9bb62bbb929e"} Nov 28 10:03:44 crc kubenswrapper[4838]: I1128 10:03:44.327845 4838 generic.go:334] "Generic (PLEG): container finished" podID="01087280-c77b-4764-91ba-468b21f32427" containerID="6174678dbe67516ef52f0b88269052f6782a8fd318187251060955b14cfdf714" exitCode=0 Nov 28 10:03:44 crc kubenswrapper[4838]: I1128 10:03:44.327977 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-cmcj8" event={"ID":"01087280-c77b-4764-91ba-468b21f32427","Type":"ContainerDied","Data":"6174678dbe67516ef52f0b88269052f6782a8fd318187251060955b14cfdf714"} Nov 28 10:03:44 crc kubenswrapper[4838]: I1128 10:03:44.477757 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-n4cg9"] Nov 28 10:03:44 crc kubenswrapper[4838]: W1128 10:03:44.483819 4838 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9afd2484_54f9_4dd9_b081_6537c075864f.slice/crio-151fb818f7035fc015c5fdc1c27d834c54b267c29634ce221a22f6df3c978b06 WatchSource:0}: Error finding container 151fb818f7035fc015c5fdc1c27d834c54b267c29634ce221a22f6df3c978b06: Status 404 returned error can't find the container with id 151fb818f7035fc015c5fdc1c27d834c54b267c29634ce221a22f6df3c978b06 Nov 28 10:03:44 crc kubenswrapper[4838]: I1128 10:03:44.769940 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-j64zj"] Nov 28 10:03:44 crc kubenswrapper[4838]: I1128 10:03:44.771385 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-j64zj" Nov 28 10:03:44 crc kubenswrapper[4838]: I1128 10:03:44.775182 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Nov 28 10:03:44 crc kubenswrapper[4838]: I1128 10:03:44.799580 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-j64zj"] Nov 28 10:03:44 crc kubenswrapper[4838]: I1128 10:03:44.939003 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c38d1237-07ae-448a-9a53-5432a944fd83-utilities\") pod \"certified-operators-j64zj\" (UID: \"c38d1237-07ae-448a-9a53-5432a944fd83\") " pod="openshift-marketplace/certified-operators-j64zj" Nov 28 10:03:44 crc kubenswrapper[4838]: I1128 10:03:44.939366 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c38d1237-07ae-448a-9a53-5432a944fd83-catalog-content\") pod \"certified-operators-j64zj\" (UID: \"c38d1237-07ae-448a-9a53-5432a944fd83\") " pod="openshift-marketplace/certified-operators-j64zj" Nov 28 10:03:44 crc kubenswrapper[4838]: I1128 10:03:44.939429 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j2rmp\" (UniqueName: \"kubernetes.io/projected/c38d1237-07ae-448a-9a53-5432a944fd83-kube-api-access-j2rmp\") pod \"certified-operators-j64zj\" (UID: \"c38d1237-07ae-448a-9a53-5432a944fd83\") " pod="openshift-marketplace/certified-operators-j64zj" Nov 28 10:03:45 crc kubenswrapper[4838]: I1128 10:03:45.041354 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j2rmp\" (UniqueName: \"kubernetes.io/projected/c38d1237-07ae-448a-9a53-5432a944fd83-kube-api-access-j2rmp\") pod \"certified-operators-j64zj\" (UID: \"c38d1237-07ae-448a-9a53-5432a944fd83\") " pod="openshift-marketplace/certified-operators-j64zj" Nov 28 10:03:45 crc kubenswrapper[4838]: I1128 10:03:45.041457 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c38d1237-07ae-448a-9a53-5432a944fd83-utilities\") pod \"certified-operators-j64zj\" (UID: \"c38d1237-07ae-448a-9a53-5432a944fd83\") " pod="openshift-marketplace/certified-operators-j64zj" Nov 28 10:03:45 crc kubenswrapper[4838]: I1128 10:03:45.041492 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c38d1237-07ae-448a-9a53-5432a944fd83-catalog-content\") pod \"certified-operators-j64zj\" (UID: \"c38d1237-07ae-448a-9a53-5432a944fd83\") " pod="openshift-marketplace/certified-operators-j64zj" Nov 28 10:03:45 crc kubenswrapper[4838]: I1128 10:03:45.041972 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c38d1237-07ae-448a-9a53-5432a944fd83-utilities\") pod \"certified-operators-j64zj\" (UID: \"c38d1237-07ae-448a-9a53-5432a944fd83\") " pod="openshift-marketplace/certified-operators-j64zj" Nov 28 10:03:45 crc kubenswrapper[4838]: I1128 10:03:45.042044 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c38d1237-07ae-448a-9a53-5432a944fd83-catalog-content\") pod \"certified-operators-j64zj\" (UID: \"c38d1237-07ae-448a-9a53-5432a944fd83\") " pod="openshift-marketplace/certified-operators-j64zj" Nov 28 10:03:45 crc kubenswrapper[4838]: I1128 10:03:45.069115 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j2rmp\" (UniqueName: \"kubernetes.io/projected/c38d1237-07ae-448a-9a53-5432a944fd83-kube-api-access-j2rmp\") pod \"certified-operators-j64zj\" (UID: \"c38d1237-07ae-448a-9a53-5432a944fd83\") " pod="openshift-marketplace/certified-operators-j64zj" Nov 28 10:03:45 crc kubenswrapper[4838]: I1128 10:03:45.109991 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-j64zj" Nov 28 10:03:45 crc kubenswrapper[4838]: I1128 10:03:45.344617 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-trs27" event={"ID":"1b0a5899-5f8c-42e9-b686-cbc3ae3b33c8","Type":"ContainerStarted","Data":"efa9d965ecd8117fda0e2d5fbb8e8a25181775005938dfb43e875d7ef670fee8"} Nov 28 10:03:45 crc kubenswrapper[4838]: I1128 10:03:45.346572 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-cmcj8" event={"ID":"01087280-c77b-4764-91ba-468b21f32427","Type":"ContainerStarted","Data":"023c8adce6d50fe9a6c5b8ef8bf12d84794c113607dfbab5c8ea15a4c7ac69a6"} Nov 28 10:03:45 crc kubenswrapper[4838]: I1128 10:03:45.349257 4838 generic.go:334] "Generic (PLEG): container finished" podID="9afd2484-54f9-4dd9-b081-6537c075864f" containerID="da6e0ca5d682c9a6377ca47e521c5624f8608c796e7a19b9811d5cafd08cbadc" exitCode=0 Nov 28 10:03:45 crc kubenswrapper[4838]: I1128 10:03:45.349287 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-n4cg9" event={"ID":"9afd2484-54f9-4dd9-b081-6537c075864f","Type":"ContainerDied","Data":"da6e0ca5d682c9a6377ca47e521c5624f8608c796e7a19b9811d5cafd08cbadc"} Nov 28 10:03:45 crc kubenswrapper[4838]: I1128 10:03:45.349304 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-n4cg9" event={"ID":"9afd2484-54f9-4dd9-b081-6537c075864f","Type":"ContainerStarted","Data":"151fb818f7035fc015c5fdc1c27d834c54b267c29634ce221a22f6df3c978b06"} Nov 28 10:03:45 crc kubenswrapper[4838]: I1128 10:03:45.363041 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-trs27" podStartSLOduration=1.87562542 podStartE2EDuration="4.363023756s" podCreationTimestamp="2025-11-28 10:03:41 +0000 UTC" firstStartedPulling="2025-11-28 10:03:42.312347225 +0000 UTC m=+394.011321405" lastFinishedPulling="2025-11-28 10:03:44.799745561 +0000 UTC m=+396.498719741" observedRunningTime="2025-11-28 10:03:45.361031262 +0000 UTC m=+397.060005432" watchObservedRunningTime="2025-11-28 10:03:45.363023756 +0000 UTC m=+397.061997926" Nov 28 10:03:45 crc kubenswrapper[4838]: I1128 10:03:45.533562 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-j64zj"] Nov 28 10:03:45 crc kubenswrapper[4838]: W1128 10:03:45.539151 4838 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc38d1237_07ae_448a_9a53_5432a944fd83.slice/crio-c75fdb4bbdee81cf9c804ebfd8cb1da7a035b08d93c953ce70b8d814665b49cf WatchSource:0}: Error finding container c75fdb4bbdee81cf9c804ebfd8cb1da7a035b08d93c953ce70b8d814665b49cf: Status 404 returned error can't find the container with id c75fdb4bbdee81cf9c804ebfd8cb1da7a035b08d93c953ce70b8d814665b49cf Nov 28 10:03:46 crc kubenswrapper[4838]: I1128 10:03:46.359302 4838 generic.go:334] "Generic (PLEG): container finished" podID="9afd2484-54f9-4dd9-b081-6537c075864f" containerID="56d207174bf5fbea38d8b4add286c95d7ec3df0362e0c464634a87fb3758973c" exitCode=0 Nov 28 10:03:46 crc kubenswrapper[4838]: I1128 10:03:46.359534 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-n4cg9" event={"ID":"9afd2484-54f9-4dd9-b081-6537c075864f","Type":"ContainerDied","Data":"56d207174bf5fbea38d8b4add286c95d7ec3df0362e0c464634a87fb3758973c"} Nov 28 10:03:46 crc kubenswrapper[4838]: I1128 10:03:46.361213 4838 generic.go:334] "Generic (PLEG): container finished" podID="01087280-c77b-4764-91ba-468b21f32427" containerID="023c8adce6d50fe9a6c5b8ef8bf12d84794c113607dfbab5c8ea15a4c7ac69a6" exitCode=0 Nov 28 10:03:46 crc kubenswrapper[4838]: I1128 10:03:46.361261 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-cmcj8" event={"ID":"01087280-c77b-4764-91ba-468b21f32427","Type":"ContainerDied","Data":"023c8adce6d50fe9a6c5b8ef8bf12d84794c113607dfbab5c8ea15a4c7ac69a6"} Nov 28 10:03:46 crc kubenswrapper[4838]: I1128 10:03:46.363586 4838 generic.go:334] "Generic (PLEG): container finished" podID="c38d1237-07ae-448a-9a53-5432a944fd83" containerID="303eec7057551e83fea7d27fe6f5ed2886483dc1559c5075bab9fa63380ef4ae" exitCode=0 Nov 28 10:03:46 crc kubenswrapper[4838]: I1128 10:03:46.363658 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-j64zj" event={"ID":"c38d1237-07ae-448a-9a53-5432a944fd83","Type":"ContainerDied","Data":"303eec7057551e83fea7d27fe6f5ed2886483dc1559c5075bab9fa63380ef4ae"} Nov 28 10:03:46 crc kubenswrapper[4838]: I1128 10:03:46.363706 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-j64zj" event={"ID":"c38d1237-07ae-448a-9a53-5432a944fd83","Type":"ContainerStarted","Data":"c75fdb4bbdee81cf9c804ebfd8cb1da7a035b08d93c953ce70b8d814665b49cf"} Nov 28 10:03:47 crc kubenswrapper[4838]: I1128 10:03:47.372969 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-cmcj8" event={"ID":"01087280-c77b-4764-91ba-468b21f32427","Type":"ContainerStarted","Data":"c51e544eb36e9b033c19be883b90a4648e573e5f9734abe9efb27633b6f10b4d"} Nov 28 10:03:47 crc kubenswrapper[4838]: I1128 10:03:47.383357 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-n4cg9" event={"ID":"9afd2484-54f9-4dd9-b081-6537c075864f","Type":"ContainerStarted","Data":"097dd456e0c3ef677b5840fc1661a6e1456bfc3e8134aa04f3c49f8119c1a598"} Nov 28 10:03:47 crc kubenswrapper[4838]: I1128 10:03:47.401273 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-cmcj8" podStartSLOduration=2.929864936 podStartE2EDuration="5.401252042s" podCreationTimestamp="2025-11-28 10:03:42 +0000 UTC" firstStartedPulling="2025-11-28 10:03:44.330293186 +0000 UTC m=+396.029267356" lastFinishedPulling="2025-11-28 10:03:46.801680242 +0000 UTC m=+398.500654462" observedRunningTime="2025-11-28 10:03:47.392414175 +0000 UTC m=+399.091388385" watchObservedRunningTime="2025-11-28 10:03:47.401252042 +0000 UTC m=+399.100226222" Nov 28 10:03:47 crc kubenswrapper[4838]: I1128 10:03:47.415130 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-n4cg9" podStartSLOduration=2.6776711989999997 podStartE2EDuration="4.415104594s" podCreationTimestamp="2025-11-28 10:03:43 +0000 UTC" firstStartedPulling="2025-11-28 10:03:45.351727632 +0000 UTC m=+397.050701802" lastFinishedPulling="2025-11-28 10:03:47.089161027 +0000 UTC m=+398.788135197" observedRunningTime="2025-11-28 10:03:47.413517112 +0000 UTC m=+399.112491292" watchObservedRunningTime="2025-11-28 10:03:47.415104594 +0000 UTC m=+399.114078804" Nov 28 10:03:49 crc kubenswrapper[4838]: I1128 10:03:49.400197 4838 generic.go:334] "Generic (PLEG): container finished" podID="c38d1237-07ae-448a-9a53-5432a944fd83" containerID="9a68ede70e217d88ef73325ae7ffae3b6d0150f8501ba6dff990bc5cbfedd8c5" exitCode=0 Nov 28 10:03:49 crc kubenswrapper[4838]: I1128 10:03:49.400318 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-j64zj" event={"ID":"c38d1237-07ae-448a-9a53-5432a944fd83","Type":"ContainerDied","Data":"9a68ede70e217d88ef73325ae7ffae3b6d0150f8501ba6dff990bc5cbfedd8c5"} Nov 28 10:03:49 crc kubenswrapper[4838]: I1128 10:03:49.787818 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-7dc98d9d7-kdgkw"] Nov 28 10:03:49 crc kubenswrapper[4838]: I1128 10:03:49.788167 4838 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-7dc98d9d7-kdgkw" podUID="3d21d238-2cf2-4b63-9fec-2babbc832c4f" containerName="controller-manager" containerID="cri-o://5a854e952c6c657be661d2710d52246df8873b12ea47a750c25bea6137b102b7" gracePeriod=30 Nov 28 10:03:50 crc kubenswrapper[4838]: I1128 10:03:50.303893 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-7dc98d9d7-kdgkw" Nov 28 10:03:50 crc kubenswrapper[4838]: I1128 10:03:50.409784 4838 generic.go:334] "Generic (PLEG): container finished" podID="3d21d238-2cf2-4b63-9fec-2babbc832c4f" containerID="5a854e952c6c657be661d2710d52246df8873b12ea47a750c25bea6137b102b7" exitCode=0 Nov 28 10:03:50 crc kubenswrapper[4838]: I1128 10:03:50.409868 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-7dc98d9d7-kdgkw" event={"ID":"3d21d238-2cf2-4b63-9fec-2babbc832c4f","Type":"ContainerDied","Data":"5a854e952c6c657be661d2710d52246df8873b12ea47a750c25bea6137b102b7"} Nov 28 10:03:50 crc kubenswrapper[4838]: I1128 10:03:50.409900 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-7dc98d9d7-kdgkw" event={"ID":"3d21d238-2cf2-4b63-9fec-2babbc832c4f","Type":"ContainerDied","Data":"ec80a4acc13c4222900a584fd2d1fe6b55284257f44e1b176c2b649a0323d75f"} Nov 28 10:03:50 crc kubenswrapper[4838]: I1128 10:03:50.409923 4838 scope.go:117] "RemoveContainer" containerID="5a854e952c6c657be661d2710d52246df8873b12ea47a750c25bea6137b102b7" Nov 28 10:03:50 crc kubenswrapper[4838]: I1128 10:03:50.410080 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-7dc98d9d7-kdgkw" Nov 28 10:03:50 crc kubenswrapper[4838]: I1128 10:03:50.412151 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-j64zj" event={"ID":"c38d1237-07ae-448a-9a53-5432a944fd83","Type":"ContainerStarted","Data":"84094392c62062114a0641309717e19e888aed8c4799e3b65d0b9eb6231df2e6"} Nov 28 10:03:50 crc kubenswrapper[4838]: I1128 10:03:50.413255 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/3d21d238-2cf2-4b63-9fec-2babbc832c4f-proxy-ca-bundles\") pod \"3d21d238-2cf2-4b63-9fec-2babbc832c4f\" (UID: \"3d21d238-2cf2-4b63-9fec-2babbc832c4f\") " Nov 28 10:03:50 crc kubenswrapper[4838]: I1128 10:03:50.413613 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3d21d238-2cf2-4b63-9fec-2babbc832c4f-config\") pod \"3d21d238-2cf2-4b63-9fec-2babbc832c4f\" (UID: \"3d21d238-2cf2-4b63-9fec-2babbc832c4f\") " Nov 28 10:03:50 crc kubenswrapper[4838]: I1128 10:03:50.413659 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bls6k\" (UniqueName: \"kubernetes.io/projected/3d21d238-2cf2-4b63-9fec-2babbc832c4f-kube-api-access-bls6k\") pod \"3d21d238-2cf2-4b63-9fec-2babbc832c4f\" (UID: \"3d21d238-2cf2-4b63-9fec-2babbc832c4f\") " Nov 28 10:03:50 crc kubenswrapper[4838]: I1128 10:03:50.413741 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/3d21d238-2cf2-4b63-9fec-2babbc832c4f-client-ca\") pod \"3d21d238-2cf2-4b63-9fec-2babbc832c4f\" (UID: \"3d21d238-2cf2-4b63-9fec-2babbc832c4f\") " Nov 28 10:03:50 crc kubenswrapper[4838]: I1128 10:03:50.413808 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3d21d238-2cf2-4b63-9fec-2babbc832c4f-serving-cert\") pod \"3d21d238-2cf2-4b63-9fec-2babbc832c4f\" (UID: \"3d21d238-2cf2-4b63-9fec-2babbc832c4f\") " Nov 28 10:03:50 crc kubenswrapper[4838]: I1128 10:03:50.414409 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3d21d238-2cf2-4b63-9fec-2babbc832c4f-config" (OuterVolumeSpecName: "config") pod "3d21d238-2cf2-4b63-9fec-2babbc832c4f" (UID: "3d21d238-2cf2-4b63-9fec-2babbc832c4f"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 10:03:50 crc kubenswrapper[4838]: I1128 10:03:50.414413 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3d21d238-2cf2-4b63-9fec-2babbc832c4f-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "3d21d238-2cf2-4b63-9fec-2babbc832c4f" (UID: "3d21d238-2cf2-4b63-9fec-2babbc832c4f"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 10:03:50 crc kubenswrapper[4838]: I1128 10:03:50.414753 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3d21d238-2cf2-4b63-9fec-2babbc832c4f-client-ca" (OuterVolumeSpecName: "client-ca") pod "3d21d238-2cf2-4b63-9fec-2babbc832c4f" (UID: "3d21d238-2cf2-4b63-9fec-2babbc832c4f"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 10:03:50 crc kubenswrapper[4838]: I1128 10:03:50.427918 4838 scope.go:117] "RemoveContainer" containerID="5a854e952c6c657be661d2710d52246df8873b12ea47a750c25bea6137b102b7" Nov 28 10:03:50 crc kubenswrapper[4838]: E1128 10:03:50.430922 4838 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5a854e952c6c657be661d2710d52246df8873b12ea47a750c25bea6137b102b7\": container with ID starting with 5a854e952c6c657be661d2710d52246df8873b12ea47a750c25bea6137b102b7 not found: ID does not exist" containerID="5a854e952c6c657be661d2710d52246df8873b12ea47a750c25bea6137b102b7" Nov 28 10:03:50 crc kubenswrapper[4838]: I1128 10:03:50.430977 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5a854e952c6c657be661d2710d52246df8873b12ea47a750c25bea6137b102b7"} err="failed to get container status \"5a854e952c6c657be661d2710d52246df8873b12ea47a750c25bea6137b102b7\": rpc error: code = NotFound desc = could not find container \"5a854e952c6c657be661d2710d52246df8873b12ea47a750c25bea6137b102b7\": container with ID starting with 5a854e952c6c657be661d2710d52246df8873b12ea47a750c25bea6137b102b7 not found: ID does not exist" Nov 28 10:03:50 crc kubenswrapper[4838]: I1128 10:03:50.431164 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-j64zj" podStartSLOduration=2.920316499 podStartE2EDuration="6.431149455s" podCreationTimestamp="2025-11-28 10:03:44 +0000 UTC" firstStartedPulling="2025-11-28 10:03:46.36469044 +0000 UTC m=+398.063664610" lastFinishedPulling="2025-11-28 10:03:49.875523396 +0000 UTC m=+401.574497566" observedRunningTime="2025-11-28 10:03:50.431129284 +0000 UTC m=+402.130103454" watchObservedRunningTime="2025-11-28 10:03:50.431149455 +0000 UTC m=+402.130123635" Nov 28 10:03:50 crc kubenswrapper[4838]: I1128 10:03:50.434893 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3d21d238-2cf2-4b63-9fec-2babbc832c4f-kube-api-access-bls6k" (OuterVolumeSpecName: "kube-api-access-bls6k") pod "3d21d238-2cf2-4b63-9fec-2babbc832c4f" (UID: "3d21d238-2cf2-4b63-9fec-2babbc832c4f"). InnerVolumeSpecName "kube-api-access-bls6k". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:03:50 crc kubenswrapper[4838]: I1128 10:03:50.436097 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3d21d238-2cf2-4b63-9fec-2babbc832c4f-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "3d21d238-2cf2-4b63-9fec-2babbc832c4f" (UID: "3d21d238-2cf2-4b63-9fec-2babbc832c4f"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:03:50 crc kubenswrapper[4838]: I1128 10:03:50.515477 4838 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3d21d238-2cf2-4b63-9fec-2babbc832c4f-config\") on node \"crc\" DevicePath \"\"" Nov 28 10:03:50 crc kubenswrapper[4838]: I1128 10:03:50.515528 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bls6k\" (UniqueName: \"kubernetes.io/projected/3d21d238-2cf2-4b63-9fec-2babbc832c4f-kube-api-access-bls6k\") on node \"crc\" DevicePath \"\"" Nov 28 10:03:50 crc kubenswrapper[4838]: I1128 10:03:50.515551 4838 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/3d21d238-2cf2-4b63-9fec-2babbc832c4f-client-ca\") on node \"crc\" DevicePath \"\"" Nov 28 10:03:50 crc kubenswrapper[4838]: I1128 10:03:50.515568 4838 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3d21d238-2cf2-4b63-9fec-2babbc832c4f-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 10:03:50 crc kubenswrapper[4838]: I1128 10:03:50.515583 4838 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/3d21d238-2cf2-4b63-9fec-2babbc832c4f-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Nov 28 10:03:50 crc kubenswrapper[4838]: I1128 10:03:50.727340 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-7dc98d9d7-kdgkw"] Nov 28 10:03:50 crc kubenswrapper[4838]: I1128 10:03:50.729981 4838 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-7dc98d9d7-kdgkw"] Nov 28 10:03:51 crc kubenswrapper[4838]: I1128 10:03:51.051255 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-97dccc565-wc8l2"] Nov 28 10:03:51 crc kubenswrapper[4838]: E1128 10:03:51.051492 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3d21d238-2cf2-4b63-9fec-2babbc832c4f" containerName="controller-manager" Nov 28 10:03:51 crc kubenswrapper[4838]: I1128 10:03:51.051509 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="3d21d238-2cf2-4b63-9fec-2babbc832c4f" containerName="controller-manager" Nov 28 10:03:51 crc kubenswrapper[4838]: I1128 10:03:51.051635 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="3d21d238-2cf2-4b63-9fec-2babbc832c4f" containerName="controller-manager" Nov 28 10:03:51 crc kubenswrapper[4838]: I1128 10:03:51.052097 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-97dccc565-wc8l2" Nov 28 10:03:51 crc kubenswrapper[4838]: I1128 10:03:51.054313 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Nov 28 10:03:51 crc kubenswrapper[4838]: I1128 10:03:51.054402 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Nov 28 10:03:51 crc kubenswrapper[4838]: I1128 10:03:51.054944 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Nov 28 10:03:51 crc kubenswrapper[4838]: I1128 10:03:51.055991 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Nov 28 10:03:51 crc kubenswrapper[4838]: I1128 10:03:51.056317 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Nov 28 10:03:51 crc kubenswrapper[4838]: I1128 10:03:51.060773 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Nov 28 10:03:51 crc kubenswrapper[4838]: I1128 10:03:51.062208 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-97dccc565-wc8l2"] Nov 28 10:03:51 crc kubenswrapper[4838]: I1128 10:03:51.092391 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Nov 28 10:03:51 crc kubenswrapper[4838]: I1128 10:03:51.223591 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/1e7a2009-fd3b-4f22-9b63-781bda9624fe-proxy-ca-bundles\") pod \"controller-manager-97dccc565-wc8l2\" (UID: \"1e7a2009-fd3b-4f22-9b63-781bda9624fe\") " pod="openshift-controller-manager/controller-manager-97dccc565-wc8l2" Nov 28 10:03:51 crc kubenswrapper[4838]: I1128 10:03:51.223659 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qt6rr\" (UniqueName: \"kubernetes.io/projected/1e7a2009-fd3b-4f22-9b63-781bda9624fe-kube-api-access-qt6rr\") pod \"controller-manager-97dccc565-wc8l2\" (UID: \"1e7a2009-fd3b-4f22-9b63-781bda9624fe\") " pod="openshift-controller-manager/controller-manager-97dccc565-wc8l2" Nov 28 10:03:51 crc kubenswrapper[4838]: I1128 10:03:51.223755 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1e7a2009-fd3b-4f22-9b63-781bda9624fe-config\") pod \"controller-manager-97dccc565-wc8l2\" (UID: \"1e7a2009-fd3b-4f22-9b63-781bda9624fe\") " pod="openshift-controller-manager/controller-manager-97dccc565-wc8l2" Nov 28 10:03:51 crc kubenswrapper[4838]: I1128 10:03:51.223833 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1e7a2009-fd3b-4f22-9b63-781bda9624fe-serving-cert\") pod \"controller-manager-97dccc565-wc8l2\" (UID: \"1e7a2009-fd3b-4f22-9b63-781bda9624fe\") " pod="openshift-controller-manager/controller-manager-97dccc565-wc8l2" Nov 28 10:03:51 crc kubenswrapper[4838]: I1128 10:03:51.223887 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/1e7a2009-fd3b-4f22-9b63-781bda9624fe-client-ca\") pod \"controller-manager-97dccc565-wc8l2\" (UID: \"1e7a2009-fd3b-4f22-9b63-781bda9624fe\") " pod="openshift-controller-manager/controller-manager-97dccc565-wc8l2" Nov 28 10:03:51 crc kubenswrapper[4838]: I1128 10:03:51.325333 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1e7a2009-fd3b-4f22-9b63-781bda9624fe-serving-cert\") pod \"controller-manager-97dccc565-wc8l2\" (UID: \"1e7a2009-fd3b-4f22-9b63-781bda9624fe\") " pod="openshift-controller-manager/controller-manager-97dccc565-wc8l2" Nov 28 10:03:51 crc kubenswrapper[4838]: I1128 10:03:51.325400 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/1e7a2009-fd3b-4f22-9b63-781bda9624fe-client-ca\") pod \"controller-manager-97dccc565-wc8l2\" (UID: \"1e7a2009-fd3b-4f22-9b63-781bda9624fe\") " pod="openshift-controller-manager/controller-manager-97dccc565-wc8l2" Nov 28 10:03:51 crc kubenswrapper[4838]: I1128 10:03:51.325479 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/1e7a2009-fd3b-4f22-9b63-781bda9624fe-proxy-ca-bundles\") pod \"controller-manager-97dccc565-wc8l2\" (UID: \"1e7a2009-fd3b-4f22-9b63-781bda9624fe\") " pod="openshift-controller-manager/controller-manager-97dccc565-wc8l2" Nov 28 10:03:51 crc kubenswrapper[4838]: I1128 10:03:51.325517 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qt6rr\" (UniqueName: \"kubernetes.io/projected/1e7a2009-fd3b-4f22-9b63-781bda9624fe-kube-api-access-qt6rr\") pod \"controller-manager-97dccc565-wc8l2\" (UID: \"1e7a2009-fd3b-4f22-9b63-781bda9624fe\") " pod="openshift-controller-manager/controller-manager-97dccc565-wc8l2" Nov 28 10:03:51 crc kubenswrapper[4838]: I1128 10:03:51.325563 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1e7a2009-fd3b-4f22-9b63-781bda9624fe-config\") pod \"controller-manager-97dccc565-wc8l2\" (UID: \"1e7a2009-fd3b-4f22-9b63-781bda9624fe\") " pod="openshift-controller-manager/controller-manager-97dccc565-wc8l2" Nov 28 10:03:51 crc kubenswrapper[4838]: I1128 10:03:51.326622 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/1e7a2009-fd3b-4f22-9b63-781bda9624fe-client-ca\") pod \"controller-manager-97dccc565-wc8l2\" (UID: \"1e7a2009-fd3b-4f22-9b63-781bda9624fe\") " pod="openshift-controller-manager/controller-manager-97dccc565-wc8l2" Nov 28 10:03:51 crc kubenswrapper[4838]: I1128 10:03:51.326645 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/1e7a2009-fd3b-4f22-9b63-781bda9624fe-proxy-ca-bundles\") pod \"controller-manager-97dccc565-wc8l2\" (UID: \"1e7a2009-fd3b-4f22-9b63-781bda9624fe\") " pod="openshift-controller-manager/controller-manager-97dccc565-wc8l2" Nov 28 10:03:51 crc kubenswrapper[4838]: I1128 10:03:51.327241 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1e7a2009-fd3b-4f22-9b63-781bda9624fe-config\") pod \"controller-manager-97dccc565-wc8l2\" (UID: \"1e7a2009-fd3b-4f22-9b63-781bda9624fe\") " pod="openshift-controller-manager/controller-manager-97dccc565-wc8l2" Nov 28 10:03:51 crc kubenswrapper[4838]: I1128 10:03:51.329407 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1e7a2009-fd3b-4f22-9b63-781bda9624fe-serving-cert\") pod \"controller-manager-97dccc565-wc8l2\" (UID: \"1e7a2009-fd3b-4f22-9b63-781bda9624fe\") " pod="openshift-controller-manager/controller-manager-97dccc565-wc8l2" Nov 28 10:03:51 crc kubenswrapper[4838]: I1128 10:03:51.361768 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qt6rr\" (UniqueName: \"kubernetes.io/projected/1e7a2009-fd3b-4f22-9b63-781bda9624fe-kube-api-access-qt6rr\") pod \"controller-manager-97dccc565-wc8l2\" (UID: \"1e7a2009-fd3b-4f22-9b63-781bda9624fe\") " pod="openshift-controller-manager/controller-manager-97dccc565-wc8l2" Nov 28 10:03:51 crc kubenswrapper[4838]: I1128 10:03:51.399872 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-97dccc565-wc8l2" Nov 28 10:03:51 crc kubenswrapper[4838]: I1128 10:03:51.718009 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-trs27" Nov 28 10:03:51 crc kubenswrapper[4838]: I1128 10:03:51.718386 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-trs27" Nov 28 10:03:51 crc kubenswrapper[4838]: I1128 10:03:51.764701 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-trs27" Nov 28 10:03:51 crc kubenswrapper[4838]: I1128 10:03:51.819344 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-97dccc565-wc8l2"] Nov 28 10:03:51 crc kubenswrapper[4838]: W1128 10:03:51.825318 4838 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1e7a2009_fd3b_4f22_9b63_781bda9624fe.slice/crio-280e85df119ee46eeaad47d0972fe8767c78b5a81c202e0e6933d7f882bbaacf WatchSource:0}: Error finding container 280e85df119ee46eeaad47d0972fe8767c78b5a81c202e0e6933d7f882bbaacf: Status 404 returned error can't find the container with id 280e85df119ee46eeaad47d0972fe8767c78b5a81c202e0e6933d7f882bbaacf Nov 28 10:03:52 crc kubenswrapper[4838]: I1128 10:03:52.426932 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-97dccc565-wc8l2" event={"ID":"1e7a2009-fd3b-4f22-9b63-781bda9624fe","Type":"ContainerStarted","Data":"b7e91feaf3e61c5d4dca7319b78e511d2fdbdaa229b6d8fd7753e724e3e86b66"} Nov 28 10:03:52 crc kubenswrapper[4838]: I1128 10:03:52.427165 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-97dccc565-wc8l2" event={"ID":"1e7a2009-fd3b-4f22-9b63-781bda9624fe","Type":"ContainerStarted","Data":"280e85df119ee46eeaad47d0972fe8767c78b5a81c202e0e6933d7f882bbaacf"} Nov 28 10:03:52 crc kubenswrapper[4838]: I1128 10:03:52.449420 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-97dccc565-wc8l2" podStartSLOduration=3.449401074 podStartE2EDuration="3.449401074s" podCreationTimestamp="2025-11-28 10:03:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 10:03:52.446134486 +0000 UTC m=+404.145108656" watchObservedRunningTime="2025-11-28 10:03:52.449401074 +0000 UTC m=+404.148375244" Nov 28 10:03:52 crc kubenswrapper[4838]: I1128 10:03:52.468170 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-trs27" Nov 28 10:03:52 crc kubenswrapper[4838]: I1128 10:03:52.568877 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3d21d238-2cf2-4b63-9fec-2babbc832c4f" path="/var/lib/kubelet/pods/3d21d238-2cf2-4b63-9fec-2babbc832c4f/volumes" Nov 28 10:03:52 crc kubenswrapper[4838]: I1128 10:03:52.700430 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-cmcj8" Nov 28 10:03:52 crc kubenswrapper[4838]: I1128 10:03:52.700495 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-cmcj8" Nov 28 10:03:52 crc kubenswrapper[4838]: I1128 10:03:52.746502 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-cmcj8" Nov 28 10:03:53 crc kubenswrapper[4838]: I1128 10:03:53.434982 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-97dccc565-wc8l2" Nov 28 10:03:53 crc kubenswrapper[4838]: I1128 10:03:53.440020 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-97dccc565-wc8l2" Nov 28 10:03:53 crc kubenswrapper[4838]: I1128 10:03:53.495106 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-cmcj8" Nov 28 10:03:53 crc kubenswrapper[4838]: I1128 10:03:53.940369 4838 patch_prober.go:28] interesting pod/machine-config-daemon-5dxdd container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 10:03:53 crc kubenswrapper[4838]: I1128 10:03:53.940419 4838 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 10:03:54 crc kubenswrapper[4838]: I1128 10:03:54.112527 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-n4cg9" Nov 28 10:03:54 crc kubenswrapper[4838]: I1128 10:03:54.112726 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-n4cg9" Nov 28 10:03:54 crc kubenswrapper[4838]: I1128 10:03:54.160168 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-n4cg9" Nov 28 10:03:54 crc kubenswrapper[4838]: I1128 10:03:54.507974 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-n4cg9" Nov 28 10:03:55 crc kubenswrapper[4838]: I1128 10:03:55.110540 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-j64zj" Nov 28 10:03:55 crc kubenswrapper[4838]: I1128 10:03:55.110598 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-j64zj" Nov 28 10:03:55 crc kubenswrapper[4838]: I1128 10:03:55.157982 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-j64zj" Nov 28 10:03:55 crc kubenswrapper[4838]: I1128 10:03:55.485391 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-j64zj" Nov 28 10:03:59 crc kubenswrapper[4838]: I1128 10:03:59.346788 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-66df7c8f76-kzkzk" Nov 28 10:03:59 crc kubenswrapper[4838]: I1128 10:03:59.424634 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-8b7z5"] Nov 28 10:04:23 crc kubenswrapper[4838]: I1128 10:04:23.940652 4838 patch_prober.go:28] interesting pod/machine-config-daemon-5dxdd container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 10:04:23 crc kubenswrapper[4838]: I1128 10:04:23.941456 4838 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 10:04:24 crc kubenswrapper[4838]: I1128 10:04:24.479006 4838 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-image-registry/image-registry-697d97f7c8-8b7z5" podUID="6a71181f-49a4-4b69-a3e6-2413929b81dc" containerName="registry" containerID="cri-o://c8e3c4c42af8dce8330d366d20ec3dbab413107d5610ec798984050696aae641" gracePeriod=30 Nov 28 10:04:24 crc kubenswrapper[4838]: I1128 10:04:24.657913 4838 generic.go:334] "Generic (PLEG): container finished" podID="6a71181f-49a4-4b69-a3e6-2413929b81dc" containerID="c8e3c4c42af8dce8330d366d20ec3dbab413107d5610ec798984050696aae641" exitCode=0 Nov 28 10:04:24 crc kubenswrapper[4838]: I1128 10:04:24.657985 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-8b7z5" event={"ID":"6a71181f-49a4-4b69-a3e6-2413929b81dc","Type":"ContainerDied","Data":"c8e3c4c42af8dce8330d366d20ec3dbab413107d5610ec798984050696aae641"} Nov 28 10:04:25 crc kubenswrapper[4838]: I1128 10:04:25.033265 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-8b7z5" Nov 28 10:04:25 crc kubenswrapper[4838]: I1128 10:04:25.162494 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/6a71181f-49a4-4b69-a3e6-2413929b81dc-registry-tls\") pod \"6a71181f-49a4-4b69-a3e6-2413929b81dc\" (UID: \"6a71181f-49a4-4b69-a3e6-2413929b81dc\") " Nov 28 10:04:25 crc kubenswrapper[4838]: I1128 10:04:25.162597 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/6a71181f-49a4-4b69-a3e6-2413929b81dc-installation-pull-secrets\") pod \"6a71181f-49a4-4b69-a3e6-2413929b81dc\" (UID: \"6a71181f-49a4-4b69-a3e6-2413929b81dc\") " Nov 28 10:04:25 crc kubenswrapper[4838]: I1128 10:04:25.162689 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/6a71181f-49a4-4b69-a3e6-2413929b81dc-registry-certificates\") pod \"6a71181f-49a4-4b69-a3e6-2413929b81dc\" (UID: \"6a71181f-49a4-4b69-a3e6-2413929b81dc\") " Nov 28 10:04:25 crc kubenswrapper[4838]: I1128 10:04:25.162767 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/6a71181f-49a4-4b69-a3e6-2413929b81dc-ca-trust-extracted\") pod \"6a71181f-49a4-4b69-a3e6-2413929b81dc\" (UID: \"6a71181f-49a4-4b69-a3e6-2413929b81dc\") " Nov 28 10:04:25 crc kubenswrapper[4838]: I1128 10:04:25.162816 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/6a71181f-49a4-4b69-a3e6-2413929b81dc-trusted-ca\") pod \"6a71181f-49a4-4b69-a3e6-2413929b81dc\" (UID: \"6a71181f-49a4-4b69-a3e6-2413929b81dc\") " Nov 28 10:04:25 crc kubenswrapper[4838]: I1128 10:04:25.162870 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/6a71181f-49a4-4b69-a3e6-2413929b81dc-bound-sa-token\") pod \"6a71181f-49a4-4b69-a3e6-2413929b81dc\" (UID: \"6a71181f-49a4-4b69-a3e6-2413929b81dc\") " Nov 28 10:04:25 crc kubenswrapper[4838]: I1128 10:04:25.162933 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hqg6v\" (UniqueName: \"kubernetes.io/projected/6a71181f-49a4-4b69-a3e6-2413929b81dc-kube-api-access-hqg6v\") pod \"6a71181f-49a4-4b69-a3e6-2413929b81dc\" (UID: \"6a71181f-49a4-4b69-a3e6-2413929b81dc\") " Nov 28 10:04:25 crc kubenswrapper[4838]: I1128 10:04:25.163221 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-storage\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"6a71181f-49a4-4b69-a3e6-2413929b81dc\" (UID: \"6a71181f-49a4-4b69-a3e6-2413929b81dc\") " Nov 28 10:04:25 crc kubenswrapper[4838]: I1128 10:04:25.163963 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6a71181f-49a4-4b69-a3e6-2413929b81dc-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "6a71181f-49a4-4b69-a3e6-2413929b81dc" (UID: "6a71181f-49a4-4b69-a3e6-2413929b81dc"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 10:04:25 crc kubenswrapper[4838]: I1128 10:04:25.164021 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6a71181f-49a4-4b69-a3e6-2413929b81dc-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "6a71181f-49a4-4b69-a3e6-2413929b81dc" (UID: "6a71181f-49a4-4b69-a3e6-2413929b81dc"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 10:04:25 crc kubenswrapper[4838]: I1128 10:04:25.172770 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6a71181f-49a4-4b69-a3e6-2413929b81dc-kube-api-access-hqg6v" (OuterVolumeSpecName: "kube-api-access-hqg6v") pod "6a71181f-49a4-4b69-a3e6-2413929b81dc" (UID: "6a71181f-49a4-4b69-a3e6-2413929b81dc"). InnerVolumeSpecName "kube-api-access-hqg6v". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:04:25 crc kubenswrapper[4838]: I1128 10:04:25.173234 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6a71181f-49a4-4b69-a3e6-2413929b81dc-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "6a71181f-49a4-4b69-a3e6-2413929b81dc" (UID: "6a71181f-49a4-4b69-a3e6-2413929b81dc"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:04:25 crc kubenswrapper[4838]: I1128 10:04:25.173364 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6a71181f-49a4-4b69-a3e6-2413929b81dc-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "6a71181f-49a4-4b69-a3e6-2413929b81dc" (UID: "6a71181f-49a4-4b69-a3e6-2413929b81dc"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:04:25 crc kubenswrapper[4838]: I1128 10:04:25.175262 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6a71181f-49a4-4b69-a3e6-2413929b81dc-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "6a71181f-49a4-4b69-a3e6-2413929b81dc" (UID: "6a71181f-49a4-4b69-a3e6-2413929b81dc"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:04:25 crc kubenswrapper[4838]: I1128 10:04:25.177596 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "registry-storage") pod "6a71181f-49a4-4b69-a3e6-2413929b81dc" (UID: "6a71181f-49a4-4b69-a3e6-2413929b81dc"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Nov 28 10:04:25 crc kubenswrapper[4838]: I1128 10:04:25.201337 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6a71181f-49a4-4b69-a3e6-2413929b81dc-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "6a71181f-49a4-4b69-a3e6-2413929b81dc" (UID: "6a71181f-49a4-4b69-a3e6-2413929b81dc"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 10:04:25 crc kubenswrapper[4838]: I1128 10:04:25.265190 4838 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/6a71181f-49a4-4b69-a3e6-2413929b81dc-registry-certificates\") on node \"crc\" DevicePath \"\"" Nov 28 10:04:25 crc kubenswrapper[4838]: I1128 10:04:25.265240 4838 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/6a71181f-49a4-4b69-a3e6-2413929b81dc-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Nov 28 10:04:25 crc kubenswrapper[4838]: I1128 10:04:25.265262 4838 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/6a71181f-49a4-4b69-a3e6-2413929b81dc-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 28 10:04:25 crc kubenswrapper[4838]: I1128 10:04:25.265279 4838 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/6a71181f-49a4-4b69-a3e6-2413929b81dc-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 28 10:04:25 crc kubenswrapper[4838]: I1128 10:04:25.265297 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hqg6v\" (UniqueName: \"kubernetes.io/projected/6a71181f-49a4-4b69-a3e6-2413929b81dc-kube-api-access-hqg6v\") on node \"crc\" DevicePath \"\"" Nov 28 10:04:25 crc kubenswrapper[4838]: I1128 10:04:25.265316 4838 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/6a71181f-49a4-4b69-a3e6-2413929b81dc-registry-tls\") on node \"crc\" DevicePath \"\"" Nov 28 10:04:25 crc kubenswrapper[4838]: I1128 10:04:25.265333 4838 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/6a71181f-49a4-4b69-a3e6-2413929b81dc-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Nov 28 10:04:25 crc kubenswrapper[4838]: I1128 10:04:25.667241 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-8b7z5" event={"ID":"6a71181f-49a4-4b69-a3e6-2413929b81dc","Type":"ContainerDied","Data":"27b62c4fa8d7c8c5399493520456896c69662d33b5a028f8ba42508168f4ce8f"} Nov 28 10:04:25 crc kubenswrapper[4838]: I1128 10:04:25.667293 4838 scope.go:117] "RemoveContainer" containerID="c8e3c4c42af8dce8330d366d20ec3dbab413107d5610ec798984050696aae641" Nov 28 10:04:25 crc kubenswrapper[4838]: I1128 10:04:25.667324 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-8b7z5" Nov 28 10:04:25 crc kubenswrapper[4838]: I1128 10:04:25.711954 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-8b7z5"] Nov 28 10:04:25 crc kubenswrapper[4838]: I1128 10:04:25.715944 4838 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-8b7z5"] Nov 28 10:04:26 crc kubenswrapper[4838]: I1128 10:04:26.574300 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6a71181f-49a4-4b69-a3e6-2413929b81dc" path="/var/lib/kubelet/pods/6a71181f-49a4-4b69-a3e6-2413929b81dc/volumes" Nov 28 10:04:53 crc kubenswrapper[4838]: I1128 10:04:53.941556 4838 patch_prober.go:28] interesting pod/machine-config-daemon-5dxdd container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 10:04:53 crc kubenswrapper[4838]: I1128 10:04:53.942445 4838 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 10:04:53 crc kubenswrapper[4838]: I1128 10:04:53.942501 4838 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" Nov 28 10:04:53 crc kubenswrapper[4838]: I1128 10:04:53.943209 4838 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"b32d00e2222988bb06c20222520ec2748f8cbe0ff2a2fb2b4f993227ebd10de6"} pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 10:04:53 crc kubenswrapper[4838]: I1128 10:04:53.943282 4838 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" containerName="machine-config-daemon" containerID="cri-o://b32d00e2222988bb06c20222520ec2748f8cbe0ff2a2fb2b4f993227ebd10de6" gracePeriod=600 Nov 28 10:04:54 crc kubenswrapper[4838]: I1128 10:04:54.882829 4838 generic.go:334] "Generic (PLEG): container finished" podID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" containerID="b32d00e2222988bb06c20222520ec2748f8cbe0ff2a2fb2b4f993227ebd10de6" exitCode=0 Nov 28 10:04:54 crc kubenswrapper[4838]: I1128 10:04:54.882907 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" event={"ID":"5c3daa53-8c4e-4e30-aeba-146602dd45cd","Type":"ContainerDied","Data":"b32d00e2222988bb06c20222520ec2748f8cbe0ff2a2fb2b4f993227ebd10de6"} Nov 28 10:04:54 crc kubenswrapper[4838]: I1128 10:04:54.883567 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" event={"ID":"5c3daa53-8c4e-4e30-aeba-146602dd45cd","Type":"ContainerStarted","Data":"1bdd37224d3ec9a4a6830c87550bdad8405cc7c49db8d4116d7a4a00dc4a3cf3"} Nov 28 10:04:54 crc kubenswrapper[4838]: I1128 10:04:54.883596 4838 scope.go:117] "RemoveContainer" containerID="5374d5a62ca21176826339023247946593cc1a7bdb4fb39596f12bf598790697" Nov 28 10:07:08 crc kubenswrapper[4838]: I1128 10:07:08.853297 4838 scope.go:117] "RemoveContainer" containerID="f0db1cd3861414b9fe53eb897c0ed154e2a22135be0e3be34adbeb76a16a56a1" Nov 28 10:07:08 crc kubenswrapper[4838]: I1128 10:07:08.882377 4838 scope.go:117] "RemoveContainer" containerID="0ea25c5002bbff70fed0fbf0c911698d78650d3ef0cc02b83426c721fb2e5b8c" Nov 28 10:07:23 crc kubenswrapper[4838]: I1128 10:07:23.940168 4838 patch_prober.go:28] interesting pod/machine-config-daemon-5dxdd container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 10:07:23 crc kubenswrapper[4838]: I1128 10:07:23.940802 4838 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 10:07:53 crc kubenswrapper[4838]: I1128 10:07:53.939868 4838 patch_prober.go:28] interesting pod/machine-config-daemon-5dxdd container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 10:07:53 crc kubenswrapper[4838]: I1128 10:07:53.940625 4838 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 10:08:08 crc kubenswrapper[4838]: I1128 10:08:08.939134 4838 scope.go:117] "RemoveContainer" containerID="36446f37d5c4cfd29f4db891c95a00b3319ba8e54c2136cec7bffc9c2e3f6555" Nov 28 10:08:08 crc kubenswrapper[4838]: I1128 10:08:08.972455 4838 scope.go:117] "RemoveContainer" containerID="c062455fc7b8ac59bdd8a3d3cbe4f31c3dab14cbc0e10712ff9eae9ea5248808" Nov 28 10:08:09 crc kubenswrapper[4838]: I1128 10:08:09.002949 4838 scope.go:117] "RemoveContainer" containerID="fec6538109d1a8179f976da0cc7c2718e61b255d0e758466f4e3c69baab2eacb" Nov 28 10:08:09 crc kubenswrapper[4838]: I1128 10:08:09.032431 4838 scope.go:117] "RemoveContainer" containerID="4ed370e04b7437cc2fc8e19ee2fd3cde7591bdb6b2a77b3cfb9192180d951d6c" Nov 28 10:08:23 crc kubenswrapper[4838]: I1128 10:08:23.940354 4838 patch_prober.go:28] interesting pod/machine-config-daemon-5dxdd container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 10:08:23 crc kubenswrapper[4838]: I1128 10:08:23.940914 4838 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 10:08:23 crc kubenswrapper[4838]: I1128 10:08:23.940966 4838 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" Nov 28 10:08:23 crc kubenswrapper[4838]: I1128 10:08:23.941703 4838 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"1bdd37224d3ec9a4a6830c87550bdad8405cc7c49db8d4116d7a4a00dc4a3cf3"} pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 10:08:23 crc kubenswrapper[4838]: I1128 10:08:23.941819 4838 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" containerName="machine-config-daemon" containerID="cri-o://1bdd37224d3ec9a4a6830c87550bdad8405cc7c49db8d4116d7a4a00dc4a3cf3" gracePeriod=600 Nov 28 10:08:24 crc kubenswrapper[4838]: I1128 10:08:24.291954 4838 generic.go:334] "Generic (PLEG): container finished" podID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" containerID="1bdd37224d3ec9a4a6830c87550bdad8405cc7c49db8d4116d7a4a00dc4a3cf3" exitCode=0 Nov 28 10:08:24 crc kubenswrapper[4838]: I1128 10:08:24.292316 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" event={"ID":"5c3daa53-8c4e-4e30-aeba-146602dd45cd","Type":"ContainerDied","Data":"1bdd37224d3ec9a4a6830c87550bdad8405cc7c49db8d4116d7a4a00dc4a3cf3"} Nov 28 10:08:24 crc kubenswrapper[4838]: I1128 10:08:24.292356 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" event={"ID":"5c3daa53-8c4e-4e30-aeba-146602dd45cd","Type":"ContainerStarted","Data":"e44c3d5f2db51d0905366ef1f77dd84ac3c3e496e157394cd047f58af85a3fca"} Nov 28 10:08:24 crc kubenswrapper[4838]: I1128 10:08:24.292383 4838 scope.go:117] "RemoveContainer" containerID="b32d00e2222988bb06c20222520ec2748f8cbe0ff2a2fb2b4f993227ebd10de6" Nov 28 10:09:27 crc kubenswrapper[4838]: I1128 10:09:27.296522 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-cainjector-7f985d654d-pc7lt"] Nov 28 10:09:27 crc kubenswrapper[4838]: E1128 10:09:27.297879 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6a71181f-49a4-4b69-a3e6-2413929b81dc" containerName="registry" Nov 28 10:09:27 crc kubenswrapper[4838]: I1128 10:09:27.297948 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="6a71181f-49a4-4b69-a3e6-2413929b81dc" containerName="registry" Nov 28 10:09:27 crc kubenswrapper[4838]: I1128 10:09:27.298083 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="6a71181f-49a4-4b69-a3e6-2413929b81dc" containerName="registry" Nov 28 10:09:27 crc kubenswrapper[4838]: I1128 10:09:27.298483 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-7f985d654d-pc7lt" Nov 28 10:09:27 crc kubenswrapper[4838]: I1128 10:09:27.301505 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"kube-root-ca.crt" Nov 28 10:09:27 crc kubenswrapper[4838]: I1128 10:09:27.301515 4838 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-cainjector-dockercfg-lhr6d" Nov 28 10:09:27 crc kubenswrapper[4838]: I1128 10:09:27.301571 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"openshift-service-ca.crt" Nov 28 10:09:27 crc kubenswrapper[4838]: I1128 10:09:27.305232 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-5b446d88c5-gcqpn"] Nov 28 10:09:27 crc kubenswrapper[4838]: I1128 10:09:27.305865 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-5b446d88c5-gcqpn" Nov 28 10:09:27 crc kubenswrapper[4838]: I1128 10:09:27.311295 4838 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-dockercfg-txndw" Nov 28 10:09:27 crc kubenswrapper[4838]: I1128 10:09:27.320471 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-7f985d654d-pc7lt"] Nov 28 10:09:27 crc kubenswrapper[4838]: I1128 10:09:27.329984 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-5b446d88c5-gcqpn"] Nov 28 10:09:27 crc kubenswrapper[4838]: I1128 10:09:27.342035 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-webhook-5655c58dd6-zldj2"] Nov 28 10:09:27 crc kubenswrapper[4838]: I1128 10:09:27.342819 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-5655c58dd6-zldj2" Nov 28 10:09:27 crc kubenswrapper[4838]: I1128 10:09:27.346738 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-5655c58dd6-zldj2"] Nov 28 10:09:27 crc kubenswrapper[4838]: I1128 10:09:27.356647 4838 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-webhook-dockercfg-b2kfd" Nov 28 10:09:27 crc kubenswrapper[4838]: I1128 10:09:27.402957 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nr8g7\" (UniqueName: \"kubernetes.io/projected/e850d813-cc68-49bd-aa4d-ab3271b36d41-kube-api-access-nr8g7\") pod \"cert-manager-5b446d88c5-gcqpn\" (UID: \"e850d813-cc68-49bd-aa4d-ab3271b36d41\") " pod="cert-manager/cert-manager-5b446d88c5-gcqpn" Nov 28 10:09:27 crc kubenswrapper[4838]: I1128 10:09:27.403064 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-62858\" (UniqueName: \"kubernetes.io/projected/34c85aea-53ac-4f8b-b4b7-a5262768ea9a-kube-api-access-62858\") pod \"cert-manager-cainjector-7f985d654d-pc7lt\" (UID: \"34c85aea-53ac-4f8b-b4b7-a5262768ea9a\") " pod="cert-manager/cert-manager-cainjector-7f985d654d-pc7lt" Nov 28 10:09:27 crc kubenswrapper[4838]: I1128 10:09:27.504365 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nr8g7\" (UniqueName: \"kubernetes.io/projected/e850d813-cc68-49bd-aa4d-ab3271b36d41-kube-api-access-nr8g7\") pod \"cert-manager-5b446d88c5-gcqpn\" (UID: \"e850d813-cc68-49bd-aa4d-ab3271b36d41\") " pod="cert-manager/cert-manager-5b446d88c5-gcqpn" Nov 28 10:09:27 crc kubenswrapper[4838]: I1128 10:09:27.505214 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xh4vm\" (UniqueName: \"kubernetes.io/projected/863773f7-97a9-4bcc-8c5d-86b5533f1c6b-kube-api-access-xh4vm\") pod \"cert-manager-webhook-5655c58dd6-zldj2\" (UID: \"863773f7-97a9-4bcc-8c5d-86b5533f1c6b\") " pod="cert-manager/cert-manager-webhook-5655c58dd6-zldj2" Nov 28 10:09:27 crc kubenswrapper[4838]: I1128 10:09:27.505561 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-62858\" (UniqueName: \"kubernetes.io/projected/34c85aea-53ac-4f8b-b4b7-a5262768ea9a-kube-api-access-62858\") pod \"cert-manager-cainjector-7f985d654d-pc7lt\" (UID: \"34c85aea-53ac-4f8b-b4b7-a5262768ea9a\") " pod="cert-manager/cert-manager-cainjector-7f985d654d-pc7lt" Nov 28 10:09:27 crc kubenswrapper[4838]: I1128 10:09:27.537045 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-62858\" (UniqueName: \"kubernetes.io/projected/34c85aea-53ac-4f8b-b4b7-a5262768ea9a-kube-api-access-62858\") pod \"cert-manager-cainjector-7f985d654d-pc7lt\" (UID: \"34c85aea-53ac-4f8b-b4b7-a5262768ea9a\") " pod="cert-manager/cert-manager-cainjector-7f985d654d-pc7lt" Nov 28 10:09:27 crc kubenswrapper[4838]: I1128 10:09:27.539430 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nr8g7\" (UniqueName: \"kubernetes.io/projected/e850d813-cc68-49bd-aa4d-ab3271b36d41-kube-api-access-nr8g7\") pod \"cert-manager-5b446d88c5-gcqpn\" (UID: \"e850d813-cc68-49bd-aa4d-ab3271b36d41\") " pod="cert-manager/cert-manager-5b446d88c5-gcqpn" Nov 28 10:09:27 crc kubenswrapper[4838]: I1128 10:09:27.607549 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xh4vm\" (UniqueName: \"kubernetes.io/projected/863773f7-97a9-4bcc-8c5d-86b5533f1c6b-kube-api-access-xh4vm\") pod \"cert-manager-webhook-5655c58dd6-zldj2\" (UID: \"863773f7-97a9-4bcc-8c5d-86b5533f1c6b\") " pod="cert-manager/cert-manager-webhook-5655c58dd6-zldj2" Nov 28 10:09:27 crc kubenswrapper[4838]: I1128 10:09:27.619674 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-7f985d654d-pc7lt" Nov 28 10:09:27 crc kubenswrapper[4838]: I1128 10:09:27.627047 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xh4vm\" (UniqueName: \"kubernetes.io/projected/863773f7-97a9-4bcc-8c5d-86b5533f1c6b-kube-api-access-xh4vm\") pod \"cert-manager-webhook-5655c58dd6-zldj2\" (UID: \"863773f7-97a9-4bcc-8c5d-86b5533f1c6b\") " pod="cert-manager/cert-manager-webhook-5655c58dd6-zldj2" Nov 28 10:09:27 crc kubenswrapper[4838]: I1128 10:09:27.632604 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-5b446d88c5-gcqpn" Nov 28 10:09:27 crc kubenswrapper[4838]: I1128 10:09:27.659296 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-5655c58dd6-zldj2" Nov 28 10:09:27 crc kubenswrapper[4838]: I1128 10:09:27.880521 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-7f985d654d-pc7lt"] Nov 28 10:09:27 crc kubenswrapper[4838]: I1128 10:09:27.890810 4838 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 28 10:09:27 crc kubenswrapper[4838]: I1128 10:09:27.915483 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-5655c58dd6-zldj2"] Nov 28 10:09:27 crc kubenswrapper[4838]: W1128 10:09:27.920022 4838 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod863773f7_97a9_4bcc_8c5d_86b5533f1c6b.slice/crio-09d4d8586814a5d4b99d59add91912c49f83ac3fa7354524b24569f80b9e4136 WatchSource:0}: Error finding container 09d4d8586814a5d4b99d59add91912c49f83ac3fa7354524b24569f80b9e4136: Status 404 returned error can't find the container with id 09d4d8586814a5d4b99d59add91912c49f83ac3fa7354524b24569f80b9e4136 Nov 28 10:09:28 crc kubenswrapper[4838]: I1128 10:09:28.055502 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-5b446d88c5-gcqpn"] Nov 28 10:09:28 crc kubenswrapper[4838]: W1128 10:09:28.060817 4838 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode850d813_cc68_49bd_aa4d_ab3271b36d41.slice/crio-5095bdf073e97fbac35b9788b70b38159d4646e9e17c7c7d41f41dedd0fcffbc WatchSource:0}: Error finding container 5095bdf073e97fbac35b9788b70b38159d4646e9e17c7c7d41f41dedd0fcffbc: Status 404 returned error can't find the container with id 5095bdf073e97fbac35b9788b70b38159d4646e9e17c7c7d41f41dedd0fcffbc Nov 28 10:09:28 crc kubenswrapper[4838]: I1128 10:09:28.706224 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-7f985d654d-pc7lt" event={"ID":"34c85aea-53ac-4f8b-b4b7-a5262768ea9a","Type":"ContainerStarted","Data":"e5a49eb3bc7951bfeb791e8aed77a598c3bcb67e275e8c2272f4f6a8163f0209"} Nov 28 10:09:28 crc kubenswrapper[4838]: I1128 10:09:28.707594 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-5655c58dd6-zldj2" event={"ID":"863773f7-97a9-4bcc-8c5d-86b5533f1c6b","Type":"ContainerStarted","Data":"09d4d8586814a5d4b99d59add91912c49f83ac3fa7354524b24569f80b9e4136"} Nov 28 10:09:28 crc kubenswrapper[4838]: I1128 10:09:28.708549 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-5b446d88c5-gcqpn" event={"ID":"e850d813-cc68-49bd-aa4d-ab3271b36d41","Type":"ContainerStarted","Data":"5095bdf073e97fbac35b9788b70b38159d4646e9e17c7c7d41f41dedd0fcffbc"} Nov 28 10:09:30 crc kubenswrapper[4838]: I1128 10:09:30.721694 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-5655c58dd6-zldj2" event={"ID":"863773f7-97a9-4bcc-8c5d-86b5533f1c6b","Type":"ContainerStarted","Data":"837bd05163d75ed4322923392c3fe0b5a93b3ac32e4ec24b7484ca72ec631aaf"} Nov 28 10:09:30 crc kubenswrapper[4838]: I1128 10:09:30.722274 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="cert-manager/cert-manager-webhook-5655c58dd6-zldj2" Nov 28 10:09:30 crc kubenswrapper[4838]: I1128 10:09:30.723321 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-7f985d654d-pc7lt" event={"ID":"34c85aea-53ac-4f8b-b4b7-a5262768ea9a","Type":"ContainerStarted","Data":"bcf0508b9921e6ee8a0d5894e63f8e0e65e1fe82e158e6aab6fb956e7b5128b2"} Nov 28 10:09:30 crc kubenswrapper[4838]: I1128 10:09:30.760341 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-webhook-5655c58dd6-zldj2" podStartSLOduration=1.164249829 podStartE2EDuration="3.760314651s" podCreationTimestamp="2025-11-28 10:09:27 +0000 UTC" firstStartedPulling="2025-11-28 10:09:27.922129906 +0000 UTC m=+739.621104066" lastFinishedPulling="2025-11-28 10:09:30.518194718 +0000 UTC m=+742.217168888" observedRunningTime="2025-11-28 10:09:30.74266239 +0000 UTC m=+742.441636560" watchObservedRunningTime="2025-11-28 10:09:30.760314651 +0000 UTC m=+742.459288821" Nov 28 10:09:32 crc kubenswrapper[4838]: I1128 10:09:32.741837 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-5b446d88c5-gcqpn" event={"ID":"e850d813-cc68-49bd-aa4d-ab3271b36d41","Type":"ContainerStarted","Data":"57723fa6eb8a84fdd07f6e1979b6fd94ef81523f827c072d1d4ee20c952d5129"} Nov 28 10:09:32 crc kubenswrapper[4838]: I1128 10:09:32.761601 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-cainjector-7f985d654d-pc7lt" podStartSLOduration=3.202411958 podStartE2EDuration="5.761582285s" podCreationTimestamp="2025-11-28 10:09:27 +0000 UTC" firstStartedPulling="2025-11-28 10:09:27.890533032 +0000 UTC m=+739.589507212" lastFinishedPulling="2025-11-28 10:09:30.449703329 +0000 UTC m=+742.148677539" observedRunningTime="2025-11-28 10:09:30.760803404 +0000 UTC m=+742.459777574" watchObservedRunningTime="2025-11-28 10:09:32.761582285 +0000 UTC m=+744.460556475" Nov 28 10:09:32 crc kubenswrapper[4838]: I1128 10:09:32.764170 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-5b446d88c5-gcqpn" podStartSLOduration=1.93896098 podStartE2EDuration="5.764164464s" podCreationTimestamp="2025-11-28 10:09:27 +0000 UTC" firstStartedPulling="2025-11-28 10:09:28.062805081 +0000 UTC m=+739.761779251" lastFinishedPulling="2025-11-28 10:09:31.888008575 +0000 UTC m=+743.586982735" observedRunningTime="2025-11-28 10:09:32.759792698 +0000 UTC m=+744.458766878" watchObservedRunningTime="2025-11-28 10:09:32.764164464 +0000 UTC m=+744.463138644" Nov 28 10:09:37 crc kubenswrapper[4838]: I1128 10:09:37.661534 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="cert-manager/cert-manager-webhook-5655c58dd6-zldj2" Nov 28 10:09:37 crc kubenswrapper[4838]: I1128 10:09:37.819438 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-gmhsj"] Nov 28 10:09:37 crc kubenswrapper[4838]: I1128 10:09:37.819798 4838 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-gmhsj" podUID="41b01f7d-5c75-49de-86f7-87e04bf71194" containerName="ovn-controller" containerID="cri-o://669de13027827632f353811e082cce7cca326651a9bf4820e66504bb59067d72" gracePeriod=30 Nov 28 10:09:37 crc kubenswrapper[4838]: I1128 10:09:37.819918 4838 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-gmhsj" podUID="41b01f7d-5c75-49de-86f7-87e04bf71194" containerName="northd" containerID="cri-o://5781261b70d6722e8d28b91441c4241e1b48e6ff71ae3ec8973bde50180fc146" gracePeriod=30 Nov 28 10:09:37 crc kubenswrapper[4838]: I1128 10:09:37.819991 4838 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-gmhsj" podUID="41b01f7d-5c75-49de-86f7-87e04bf71194" containerName="nbdb" containerID="cri-o://b1f5c4da04a97ccc1851e901f9a13763e45ff8c7f6aab25bcbb934541ae4a776" gracePeriod=30 Nov 28 10:09:37 crc kubenswrapper[4838]: I1128 10:09:37.820072 4838 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-gmhsj" podUID="41b01f7d-5c75-49de-86f7-87e04bf71194" containerName="ovn-acl-logging" containerID="cri-o://b3a5c0b96cef205139d500d85f27d2c9230ab23a84ffc78f2587a465a7ff25e6" gracePeriod=30 Nov 28 10:09:37 crc kubenswrapper[4838]: I1128 10:09:37.820077 4838 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-gmhsj" podUID="41b01f7d-5c75-49de-86f7-87e04bf71194" containerName="kube-rbac-proxy-node" containerID="cri-o://3ccbb0df20c7e22308632a5a2d8837d77579134973f3888355be0fe46b4e59aa" gracePeriod=30 Nov 28 10:09:37 crc kubenswrapper[4838]: I1128 10:09:37.820032 4838 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-gmhsj" podUID="41b01f7d-5c75-49de-86f7-87e04bf71194" containerName="sbdb" containerID="cri-o://1ae634151347de87041ca338bcea3a34ee0c2330a3c6630a3e342f62beba0ab8" gracePeriod=30 Nov 28 10:09:37 crc kubenswrapper[4838]: I1128 10:09:37.819965 4838 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-gmhsj" podUID="41b01f7d-5c75-49de-86f7-87e04bf71194" containerName="kube-rbac-proxy-ovn-metrics" containerID="cri-o://87fcac052cf598fe1999137913f0b5ab8c999dad7d8fb512c3b079fd3b49bdaa" gracePeriod=30 Nov 28 10:09:37 crc kubenswrapper[4838]: I1128 10:09:37.859586 4838 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-gmhsj" podUID="41b01f7d-5c75-49de-86f7-87e04bf71194" containerName="ovnkube-controller" containerID="cri-o://7fe59cc9e131d69ec0f3655ef34ceba4f4ba7f369c3da8ae4470fe09eb16c16d" gracePeriod=30 Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.165904 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-gmhsj_41b01f7d-5c75-49de-86f7-87e04bf71194/ovnkube-controller/3.log" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.169568 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-gmhsj_41b01f7d-5c75-49de-86f7-87e04bf71194/ovn-acl-logging/0.log" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.170591 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-gmhsj_41b01f7d-5c75-49de-86f7-87e04bf71194/ovn-controller/0.log" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.171295 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-gmhsj" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.224693 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-gsvx5"] Nov 28 10:09:38 crc kubenswrapper[4838]: E1128 10:09:38.224948 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="41b01f7d-5c75-49de-86f7-87e04bf71194" containerName="ovnkube-controller" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.224969 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="41b01f7d-5c75-49de-86f7-87e04bf71194" containerName="ovnkube-controller" Nov 28 10:09:38 crc kubenswrapper[4838]: E1128 10:09:38.224982 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="41b01f7d-5c75-49de-86f7-87e04bf71194" containerName="kubecfg-setup" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.224990 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="41b01f7d-5c75-49de-86f7-87e04bf71194" containerName="kubecfg-setup" Nov 28 10:09:38 crc kubenswrapper[4838]: E1128 10:09:38.225002 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="41b01f7d-5c75-49de-86f7-87e04bf71194" containerName="ovnkube-controller" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.225012 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="41b01f7d-5c75-49de-86f7-87e04bf71194" containerName="ovnkube-controller" Nov 28 10:09:38 crc kubenswrapper[4838]: E1128 10:09:38.225023 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="41b01f7d-5c75-49de-86f7-87e04bf71194" containerName="ovn-controller" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.225030 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="41b01f7d-5c75-49de-86f7-87e04bf71194" containerName="ovn-controller" Nov 28 10:09:38 crc kubenswrapper[4838]: E1128 10:09:38.225044 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="41b01f7d-5c75-49de-86f7-87e04bf71194" containerName="ovn-acl-logging" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.225052 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="41b01f7d-5c75-49de-86f7-87e04bf71194" containerName="ovn-acl-logging" Nov 28 10:09:38 crc kubenswrapper[4838]: E1128 10:09:38.225062 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="41b01f7d-5c75-49de-86f7-87e04bf71194" containerName="ovnkube-controller" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.225069 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="41b01f7d-5c75-49de-86f7-87e04bf71194" containerName="ovnkube-controller" Nov 28 10:09:38 crc kubenswrapper[4838]: E1128 10:09:38.225079 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="41b01f7d-5c75-49de-86f7-87e04bf71194" containerName="nbdb" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.225089 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="41b01f7d-5c75-49de-86f7-87e04bf71194" containerName="nbdb" Nov 28 10:09:38 crc kubenswrapper[4838]: E1128 10:09:38.225106 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="41b01f7d-5c75-49de-86f7-87e04bf71194" containerName="ovnkube-controller" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.225116 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="41b01f7d-5c75-49de-86f7-87e04bf71194" containerName="ovnkube-controller" Nov 28 10:09:38 crc kubenswrapper[4838]: E1128 10:09:38.225130 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="41b01f7d-5c75-49de-86f7-87e04bf71194" containerName="kube-rbac-proxy-node" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.225139 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="41b01f7d-5c75-49de-86f7-87e04bf71194" containerName="kube-rbac-proxy-node" Nov 28 10:09:38 crc kubenswrapper[4838]: E1128 10:09:38.225150 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="41b01f7d-5c75-49de-86f7-87e04bf71194" containerName="sbdb" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.225160 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="41b01f7d-5c75-49de-86f7-87e04bf71194" containerName="sbdb" Nov 28 10:09:38 crc kubenswrapper[4838]: E1128 10:09:38.225173 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="41b01f7d-5c75-49de-86f7-87e04bf71194" containerName="kube-rbac-proxy-ovn-metrics" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.225184 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="41b01f7d-5c75-49de-86f7-87e04bf71194" containerName="kube-rbac-proxy-ovn-metrics" Nov 28 10:09:38 crc kubenswrapper[4838]: E1128 10:09:38.225196 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="41b01f7d-5c75-49de-86f7-87e04bf71194" containerName="northd" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.225205 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="41b01f7d-5c75-49de-86f7-87e04bf71194" containerName="northd" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.225360 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="41b01f7d-5c75-49de-86f7-87e04bf71194" containerName="ovnkube-controller" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.225381 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="41b01f7d-5c75-49de-86f7-87e04bf71194" containerName="ovn-controller" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.225391 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="41b01f7d-5c75-49de-86f7-87e04bf71194" containerName="kube-rbac-proxy-node" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.225404 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="41b01f7d-5c75-49de-86f7-87e04bf71194" containerName="northd" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.225414 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="41b01f7d-5c75-49de-86f7-87e04bf71194" containerName="ovnkube-controller" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.225422 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="41b01f7d-5c75-49de-86f7-87e04bf71194" containerName="ovnkube-controller" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.225430 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="41b01f7d-5c75-49de-86f7-87e04bf71194" containerName="sbdb" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.225442 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="41b01f7d-5c75-49de-86f7-87e04bf71194" containerName="nbdb" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.225453 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="41b01f7d-5c75-49de-86f7-87e04bf71194" containerName="ovn-acl-logging" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.225461 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="41b01f7d-5c75-49de-86f7-87e04bf71194" containerName="kube-rbac-proxy-ovn-metrics" Nov 28 10:09:38 crc kubenswrapper[4838]: E1128 10:09:38.225593 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="41b01f7d-5c75-49de-86f7-87e04bf71194" containerName="ovnkube-controller" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.225603 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="41b01f7d-5c75-49de-86f7-87e04bf71194" containerName="ovnkube-controller" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.225708 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="41b01f7d-5c75-49de-86f7-87e04bf71194" containerName="ovnkube-controller" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.225728 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="41b01f7d-5c75-49de-86f7-87e04bf71194" containerName="ovnkube-controller" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.227728 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-gsvx5" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.262713 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-svlft\" (UniqueName: \"kubernetes.io/projected/41b01f7d-5c75-49de-86f7-87e04bf71194-kube-api-access-svlft\") pod \"41b01f7d-5c75-49de-86f7-87e04bf71194\" (UID: \"41b01f7d-5c75-49de-86f7-87e04bf71194\") " Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.262794 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/41b01f7d-5c75-49de-86f7-87e04bf71194-ovn-node-metrics-cert\") pod \"41b01f7d-5c75-49de-86f7-87e04bf71194\" (UID: \"41b01f7d-5c75-49de-86f7-87e04bf71194\") " Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.262822 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/41b01f7d-5c75-49de-86f7-87e04bf71194-host-run-ovn-kubernetes\") pod \"41b01f7d-5c75-49de-86f7-87e04bf71194\" (UID: \"41b01f7d-5c75-49de-86f7-87e04bf71194\") " Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.262861 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/41b01f7d-5c75-49de-86f7-87e04bf71194-run-systemd\") pod \"41b01f7d-5c75-49de-86f7-87e04bf71194\" (UID: \"41b01f7d-5c75-49de-86f7-87e04bf71194\") " Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.262895 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/41b01f7d-5c75-49de-86f7-87e04bf71194-etc-openvswitch\") pod \"41b01f7d-5c75-49de-86f7-87e04bf71194\" (UID: \"41b01f7d-5c75-49de-86f7-87e04bf71194\") " Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.262917 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/41b01f7d-5c75-49de-86f7-87e04bf71194-ovnkube-script-lib\") pod \"41b01f7d-5c75-49de-86f7-87e04bf71194\" (UID: \"41b01f7d-5c75-49de-86f7-87e04bf71194\") " Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.262941 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/41b01f7d-5c75-49de-86f7-87e04bf71194-host-run-netns\") pod \"41b01f7d-5c75-49de-86f7-87e04bf71194\" (UID: \"41b01f7d-5c75-49de-86f7-87e04bf71194\") " Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.262959 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/41b01f7d-5c75-49de-86f7-87e04bf71194-node-log\") pod \"41b01f7d-5c75-49de-86f7-87e04bf71194\" (UID: \"41b01f7d-5c75-49de-86f7-87e04bf71194\") " Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.262984 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/41b01f7d-5c75-49de-86f7-87e04bf71194-host-cni-bin\") pod \"41b01f7d-5c75-49de-86f7-87e04bf71194\" (UID: \"41b01f7d-5c75-49de-86f7-87e04bf71194\") " Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.263004 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/41b01f7d-5c75-49de-86f7-87e04bf71194-run-ovn\") pod \"41b01f7d-5c75-49de-86f7-87e04bf71194\" (UID: \"41b01f7d-5c75-49de-86f7-87e04bf71194\") " Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.262993 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/41b01f7d-5c75-49de-86f7-87e04bf71194-host-run-ovn-kubernetes" (OuterVolumeSpecName: "host-run-ovn-kubernetes") pod "41b01f7d-5c75-49de-86f7-87e04bf71194" (UID: "41b01f7d-5c75-49de-86f7-87e04bf71194"). InnerVolumeSpecName "host-run-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.263027 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/41b01f7d-5c75-49de-86f7-87e04bf71194-host-var-lib-cni-networks-ovn-kubernetes\") pod \"41b01f7d-5c75-49de-86f7-87e04bf71194\" (UID: \"41b01f7d-5c75-49de-86f7-87e04bf71194\") " Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.263077 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/41b01f7d-5c75-49de-86f7-87e04bf71194-host-var-lib-cni-networks-ovn-kubernetes" (OuterVolumeSpecName: "host-var-lib-cni-networks-ovn-kubernetes") pod "41b01f7d-5c75-49de-86f7-87e04bf71194" (UID: "41b01f7d-5c75-49de-86f7-87e04bf71194"). InnerVolumeSpecName "host-var-lib-cni-networks-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.263115 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/41b01f7d-5c75-49de-86f7-87e04bf71194-etc-openvswitch" (OuterVolumeSpecName: "etc-openvswitch") pod "41b01f7d-5c75-49de-86f7-87e04bf71194" (UID: "41b01f7d-5c75-49de-86f7-87e04bf71194"). InnerVolumeSpecName "etc-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.263132 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/41b01f7d-5c75-49de-86f7-87e04bf71194-env-overrides\") pod \"41b01f7d-5c75-49de-86f7-87e04bf71194\" (UID: \"41b01f7d-5c75-49de-86f7-87e04bf71194\") " Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.263173 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/41b01f7d-5c75-49de-86f7-87e04bf71194-systemd-units\") pod \"41b01f7d-5c75-49de-86f7-87e04bf71194\" (UID: \"41b01f7d-5c75-49de-86f7-87e04bf71194\") " Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.263215 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/41b01f7d-5c75-49de-86f7-87e04bf71194-log-socket\") pod \"41b01f7d-5c75-49de-86f7-87e04bf71194\" (UID: \"41b01f7d-5c75-49de-86f7-87e04bf71194\") " Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.263248 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/41b01f7d-5c75-49de-86f7-87e04bf71194-var-lib-openvswitch\") pod \"41b01f7d-5c75-49de-86f7-87e04bf71194\" (UID: \"41b01f7d-5c75-49de-86f7-87e04bf71194\") " Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.263288 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/41b01f7d-5c75-49de-86f7-87e04bf71194-run-openvswitch\") pod \"41b01f7d-5c75-49de-86f7-87e04bf71194\" (UID: \"41b01f7d-5c75-49de-86f7-87e04bf71194\") " Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.263318 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/41b01f7d-5c75-49de-86f7-87e04bf71194-host-slash\") pod \"41b01f7d-5c75-49de-86f7-87e04bf71194\" (UID: \"41b01f7d-5c75-49de-86f7-87e04bf71194\") " Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.263351 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/41b01f7d-5c75-49de-86f7-87e04bf71194-ovnkube-config\") pod \"41b01f7d-5c75-49de-86f7-87e04bf71194\" (UID: \"41b01f7d-5c75-49de-86f7-87e04bf71194\") " Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.263393 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/41b01f7d-5c75-49de-86f7-87e04bf71194-host-kubelet\") pod \"41b01f7d-5c75-49de-86f7-87e04bf71194\" (UID: \"41b01f7d-5c75-49de-86f7-87e04bf71194\") " Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.263427 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/41b01f7d-5c75-49de-86f7-87e04bf71194-host-cni-netd\") pod \"41b01f7d-5c75-49de-86f7-87e04bf71194\" (UID: \"41b01f7d-5c75-49de-86f7-87e04bf71194\") " Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.263527 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/41b01f7d-5c75-49de-86f7-87e04bf71194-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "41b01f7d-5c75-49de-86f7-87e04bf71194" (UID: "41b01f7d-5c75-49de-86f7-87e04bf71194"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.263568 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/41b01f7d-5c75-49de-86f7-87e04bf71194-host-run-netns" (OuterVolumeSpecName: "host-run-netns") pod "41b01f7d-5c75-49de-86f7-87e04bf71194" (UID: "41b01f7d-5c75-49de-86f7-87e04bf71194"). InnerVolumeSpecName "host-run-netns". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.263574 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/41b01f7d-5c75-49de-86f7-87e04bf71194-var-lib-openvswitch" (OuterVolumeSpecName: "var-lib-openvswitch") pod "41b01f7d-5c75-49de-86f7-87e04bf71194" (UID: "41b01f7d-5c75-49de-86f7-87e04bf71194"). InnerVolumeSpecName "var-lib-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.263593 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/41b01f7d-5c75-49de-86f7-87e04bf71194-node-log" (OuterVolumeSpecName: "node-log") pod "41b01f7d-5c75-49de-86f7-87e04bf71194" (UID: "41b01f7d-5c75-49de-86f7-87e04bf71194"). InnerVolumeSpecName "node-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.263611 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/41b01f7d-5c75-49de-86f7-87e04bf71194-host-cni-bin" (OuterVolumeSpecName: "host-cni-bin") pod "41b01f7d-5c75-49de-86f7-87e04bf71194" (UID: "41b01f7d-5c75-49de-86f7-87e04bf71194"). InnerVolumeSpecName "host-cni-bin". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.263634 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/41b01f7d-5c75-49de-86f7-87e04bf71194-run-ovn" (OuterVolumeSpecName: "run-ovn") pod "41b01f7d-5c75-49de-86f7-87e04bf71194" (UID: "41b01f7d-5c75-49de-86f7-87e04bf71194"). InnerVolumeSpecName "run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.263656 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/41b01f7d-5c75-49de-86f7-87e04bf71194-host-kubelet" (OuterVolumeSpecName: "host-kubelet") pod "41b01f7d-5c75-49de-86f7-87e04bf71194" (UID: "41b01f7d-5c75-49de-86f7-87e04bf71194"). InnerVolumeSpecName "host-kubelet". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.263678 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/41b01f7d-5c75-49de-86f7-87e04bf71194-systemd-units" (OuterVolumeSpecName: "systemd-units") pod "41b01f7d-5c75-49de-86f7-87e04bf71194" (UID: "41b01f7d-5c75-49de-86f7-87e04bf71194"). InnerVolumeSpecName "systemd-units". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.263687 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/41b01f7d-5c75-49de-86f7-87e04bf71194-host-cni-netd" (OuterVolumeSpecName: "host-cni-netd") pod "41b01f7d-5c75-49de-86f7-87e04bf71194" (UID: "41b01f7d-5c75-49de-86f7-87e04bf71194"). InnerVolumeSpecName "host-cni-netd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.263732 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/41b01f7d-5c75-49de-86f7-87e04bf71194-host-slash" (OuterVolumeSpecName: "host-slash") pod "41b01f7d-5c75-49de-86f7-87e04bf71194" (UID: "41b01f7d-5c75-49de-86f7-87e04bf71194"). InnerVolumeSpecName "host-slash". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.263729 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/41b01f7d-5c75-49de-86f7-87e04bf71194-run-openvswitch" (OuterVolumeSpecName: "run-openvswitch") pod "41b01f7d-5c75-49de-86f7-87e04bf71194" (UID: "41b01f7d-5c75-49de-86f7-87e04bf71194"). InnerVolumeSpecName "run-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.263774 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/41b01f7d-5c75-49de-86f7-87e04bf71194-log-socket" (OuterVolumeSpecName: "log-socket") pod "41b01f7d-5c75-49de-86f7-87e04bf71194" (UID: "41b01f7d-5c75-49de-86f7-87e04bf71194"). InnerVolumeSpecName "log-socket". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.263785 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/8d4958ad-5878-40f9-8259-e68a1525b59b-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-gsvx5\" (UID: \"8d4958ad-5878-40f9-8259-e68a1525b59b\") " pod="openshift-ovn-kubernetes/ovnkube-node-gsvx5" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.263816 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/8d4958ad-5878-40f9-8259-e68a1525b59b-ovnkube-script-lib\") pod \"ovnkube-node-gsvx5\" (UID: \"8d4958ad-5878-40f9-8259-e68a1525b59b\") " pod="openshift-ovn-kubernetes/ovnkube-node-gsvx5" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.263845 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/8d4958ad-5878-40f9-8259-e68a1525b59b-node-log\") pod \"ovnkube-node-gsvx5\" (UID: \"8d4958ad-5878-40f9-8259-e68a1525b59b\") " pod="openshift-ovn-kubernetes/ovnkube-node-gsvx5" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.263870 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/8d4958ad-5878-40f9-8259-e68a1525b59b-env-overrides\") pod \"ovnkube-node-gsvx5\" (UID: \"8d4958ad-5878-40f9-8259-e68a1525b59b\") " pod="openshift-ovn-kubernetes/ovnkube-node-gsvx5" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.263892 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/8d4958ad-5878-40f9-8259-e68a1525b59b-host-run-ovn-kubernetes\") pod \"ovnkube-node-gsvx5\" (UID: \"8d4958ad-5878-40f9-8259-e68a1525b59b\") " pod="openshift-ovn-kubernetes/ovnkube-node-gsvx5" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.263918 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/8d4958ad-5878-40f9-8259-e68a1525b59b-host-cni-bin\") pod \"ovnkube-node-gsvx5\" (UID: \"8d4958ad-5878-40f9-8259-e68a1525b59b\") " pod="openshift-ovn-kubernetes/ovnkube-node-gsvx5" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.263957 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/8d4958ad-5878-40f9-8259-e68a1525b59b-run-ovn\") pod \"ovnkube-node-gsvx5\" (UID: \"8d4958ad-5878-40f9-8259-e68a1525b59b\") " pod="openshift-ovn-kubernetes/ovnkube-node-gsvx5" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.263980 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/8d4958ad-5878-40f9-8259-e68a1525b59b-host-cni-netd\") pod \"ovnkube-node-gsvx5\" (UID: \"8d4958ad-5878-40f9-8259-e68a1525b59b\") " pod="openshift-ovn-kubernetes/ovnkube-node-gsvx5" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.264002 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/8d4958ad-5878-40f9-8259-e68a1525b59b-ovnkube-config\") pod \"ovnkube-node-gsvx5\" (UID: \"8d4958ad-5878-40f9-8259-e68a1525b59b\") " pod="openshift-ovn-kubernetes/ovnkube-node-gsvx5" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.264032 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/41b01f7d-5c75-49de-86f7-87e04bf71194-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "41b01f7d-5c75-49de-86f7-87e04bf71194" (UID: "41b01f7d-5c75-49de-86f7-87e04bf71194"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.264194 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/41b01f7d-5c75-49de-86f7-87e04bf71194-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "41b01f7d-5c75-49de-86f7-87e04bf71194" (UID: "41b01f7d-5c75-49de-86f7-87e04bf71194"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.264072 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/8d4958ad-5878-40f9-8259-e68a1525b59b-log-socket\") pod \"ovnkube-node-gsvx5\" (UID: \"8d4958ad-5878-40f9-8259-e68a1525b59b\") " pod="openshift-ovn-kubernetes/ovnkube-node-gsvx5" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.264340 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/8d4958ad-5878-40f9-8259-e68a1525b59b-run-systemd\") pod \"ovnkube-node-gsvx5\" (UID: \"8d4958ad-5878-40f9-8259-e68a1525b59b\") " pod="openshift-ovn-kubernetes/ovnkube-node-gsvx5" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.264377 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/8d4958ad-5878-40f9-8259-e68a1525b59b-host-kubelet\") pod \"ovnkube-node-gsvx5\" (UID: \"8d4958ad-5878-40f9-8259-e68a1525b59b\") " pod="openshift-ovn-kubernetes/ovnkube-node-gsvx5" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.264401 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/8d4958ad-5878-40f9-8259-e68a1525b59b-ovn-node-metrics-cert\") pod \"ovnkube-node-gsvx5\" (UID: \"8d4958ad-5878-40f9-8259-e68a1525b59b\") " pod="openshift-ovn-kubernetes/ovnkube-node-gsvx5" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.264421 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/8d4958ad-5878-40f9-8259-e68a1525b59b-var-lib-openvswitch\") pod \"ovnkube-node-gsvx5\" (UID: \"8d4958ad-5878-40f9-8259-e68a1525b59b\") " pod="openshift-ovn-kubernetes/ovnkube-node-gsvx5" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.264450 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zf6bl\" (UniqueName: \"kubernetes.io/projected/8d4958ad-5878-40f9-8259-e68a1525b59b-kube-api-access-zf6bl\") pod \"ovnkube-node-gsvx5\" (UID: \"8d4958ad-5878-40f9-8259-e68a1525b59b\") " pod="openshift-ovn-kubernetes/ovnkube-node-gsvx5" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.264474 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/8d4958ad-5878-40f9-8259-e68a1525b59b-etc-openvswitch\") pod \"ovnkube-node-gsvx5\" (UID: \"8d4958ad-5878-40f9-8259-e68a1525b59b\") " pod="openshift-ovn-kubernetes/ovnkube-node-gsvx5" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.264501 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/8d4958ad-5878-40f9-8259-e68a1525b59b-systemd-units\") pod \"ovnkube-node-gsvx5\" (UID: \"8d4958ad-5878-40f9-8259-e68a1525b59b\") " pod="openshift-ovn-kubernetes/ovnkube-node-gsvx5" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.264535 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/8d4958ad-5878-40f9-8259-e68a1525b59b-host-run-netns\") pod \"ovnkube-node-gsvx5\" (UID: \"8d4958ad-5878-40f9-8259-e68a1525b59b\") " pod="openshift-ovn-kubernetes/ovnkube-node-gsvx5" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.264551 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/8d4958ad-5878-40f9-8259-e68a1525b59b-host-slash\") pod \"ovnkube-node-gsvx5\" (UID: \"8d4958ad-5878-40f9-8259-e68a1525b59b\") " pod="openshift-ovn-kubernetes/ovnkube-node-gsvx5" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.264592 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/8d4958ad-5878-40f9-8259-e68a1525b59b-run-openvswitch\") pod \"ovnkube-node-gsvx5\" (UID: \"8d4958ad-5878-40f9-8259-e68a1525b59b\") " pod="openshift-ovn-kubernetes/ovnkube-node-gsvx5" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.264661 4838 reconciler_common.go:293] "Volume detached for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/41b01f7d-5c75-49de-86f7-87e04bf71194-host-cni-bin\") on node \"crc\" DevicePath \"\"" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.264672 4838 reconciler_common.go:293] "Volume detached for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/41b01f7d-5c75-49de-86f7-87e04bf71194-run-ovn\") on node \"crc\" DevicePath \"\"" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.264683 4838 reconciler_common.go:293] "Volume detached for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/41b01f7d-5c75-49de-86f7-87e04bf71194-host-var-lib-cni-networks-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.264694 4838 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/41b01f7d-5c75-49de-86f7-87e04bf71194-env-overrides\") on node \"crc\" DevicePath \"\"" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.264704 4838 reconciler_common.go:293] "Volume detached for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/41b01f7d-5c75-49de-86f7-87e04bf71194-systemd-units\") on node \"crc\" DevicePath \"\"" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.264713 4838 reconciler_common.go:293] "Volume detached for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/41b01f7d-5c75-49de-86f7-87e04bf71194-log-socket\") on node \"crc\" DevicePath \"\"" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.264727 4838 reconciler_common.go:293] "Volume detached for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/41b01f7d-5c75-49de-86f7-87e04bf71194-var-lib-openvswitch\") on node \"crc\" DevicePath \"\"" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.264736 4838 reconciler_common.go:293] "Volume detached for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/41b01f7d-5c75-49de-86f7-87e04bf71194-run-openvswitch\") on node \"crc\" DevicePath \"\"" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.264757 4838 reconciler_common.go:293] "Volume detached for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/41b01f7d-5c75-49de-86f7-87e04bf71194-host-slash\") on node \"crc\" DevicePath \"\"" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.264765 4838 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/41b01f7d-5c75-49de-86f7-87e04bf71194-ovnkube-config\") on node \"crc\" DevicePath \"\"" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.264773 4838 reconciler_common.go:293] "Volume detached for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/41b01f7d-5c75-49de-86f7-87e04bf71194-host-kubelet\") on node \"crc\" DevicePath \"\"" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.264781 4838 reconciler_common.go:293] "Volume detached for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/41b01f7d-5c75-49de-86f7-87e04bf71194-host-cni-netd\") on node \"crc\" DevicePath \"\"" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.265471 4838 reconciler_common.go:293] "Volume detached for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/41b01f7d-5c75-49de-86f7-87e04bf71194-host-run-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.265485 4838 reconciler_common.go:293] "Volume detached for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/41b01f7d-5c75-49de-86f7-87e04bf71194-etc-openvswitch\") on node \"crc\" DevicePath \"\"" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.265495 4838 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/41b01f7d-5c75-49de-86f7-87e04bf71194-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.265503 4838 reconciler_common.go:293] "Volume detached for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/41b01f7d-5c75-49de-86f7-87e04bf71194-host-run-netns\") on node \"crc\" DevicePath \"\"" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.265511 4838 reconciler_common.go:293] "Volume detached for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/41b01f7d-5c75-49de-86f7-87e04bf71194-node-log\") on node \"crc\" DevicePath \"\"" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.268259 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/41b01f7d-5c75-49de-86f7-87e04bf71194-kube-api-access-svlft" (OuterVolumeSpecName: "kube-api-access-svlft") pod "41b01f7d-5c75-49de-86f7-87e04bf71194" (UID: "41b01f7d-5c75-49de-86f7-87e04bf71194"). InnerVolumeSpecName "kube-api-access-svlft". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.270343 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/41b01f7d-5c75-49de-86f7-87e04bf71194-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "41b01f7d-5c75-49de-86f7-87e04bf71194" (UID: "41b01f7d-5c75-49de-86f7-87e04bf71194"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.276368 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/41b01f7d-5c75-49de-86f7-87e04bf71194-run-systemd" (OuterVolumeSpecName: "run-systemd") pod "41b01f7d-5c75-49de-86f7-87e04bf71194" (UID: "41b01f7d-5c75-49de-86f7-87e04bf71194"). InnerVolumeSpecName "run-systemd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.367144 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/8d4958ad-5878-40f9-8259-e68a1525b59b-node-log\") pod \"ovnkube-node-gsvx5\" (UID: \"8d4958ad-5878-40f9-8259-e68a1525b59b\") " pod="openshift-ovn-kubernetes/ovnkube-node-gsvx5" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.367217 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/8d4958ad-5878-40f9-8259-e68a1525b59b-env-overrides\") pod \"ovnkube-node-gsvx5\" (UID: \"8d4958ad-5878-40f9-8259-e68a1525b59b\") " pod="openshift-ovn-kubernetes/ovnkube-node-gsvx5" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.367255 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/8d4958ad-5878-40f9-8259-e68a1525b59b-host-run-ovn-kubernetes\") pod \"ovnkube-node-gsvx5\" (UID: \"8d4958ad-5878-40f9-8259-e68a1525b59b\") " pod="openshift-ovn-kubernetes/ovnkube-node-gsvx5" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.367291 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/8d4958ad-5878-40f9-8259-e68a1525b59b-host-cni-bin\") pod \"ovnkube-node-gsvx5\" (UID: \"8d4958ad-5878-40f9-8259-e68a1525b59b\") " pod="openshift-ovn-kubernetes/ovnkube-node-gsvx5" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.367301 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/8d4958ad-5878-40f9-8259-e68a1525b59b-node-log\") pod \"ovnkube-node-gsvx5\" (UID: \"8d4958ad-5878-40f9-8259-e68a1525b59b\") " pod="openshift-ovn-kubernetes/ovnkube-node-gsvx5" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.367340 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/8d4958ad-5878-40f9-8259-e68a1525b59b-run-ovn\") pod \"ovnkube-node-gsvx5\" (UID: \"8d4958ad-5878-40f9-8259-e68a1525b59b\") " pod="openshift-ovn-kubernetes/ovnkube-node-gsvx5" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.367391 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/8d4958ad-5878-40f9-8259-e68a1525b59b-host-run-ovn-kubernetes\") pod \"ovnkube-node-gsvx5\" (UID: \"8d4958ad-5878-40f9-8259-e68a1525b59b\") " pod="openshift-ovn-kubernetes/ovnkube-node-gsvx5" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.367420 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/8d4958ad-5878-40f9-8259-e68a1525b59b-host-cni-bin\") pod \"ovnkube-node-gsvx5\" (UID: \"8d4958ad-5878-40f9-8259-e68a1525b59b\") " pod="openshift-ovn-kubernetes/ovnkube-node-gsvx5" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.367439 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/8d4958ad-5878-40f9-8259-e68a1525b59b-host-cni-netd\") pod \"ovnkube-node-gsvx5\" (UID: \"8d4958ad-5878-40f9-8259-e68a1525b59b\") " pod="openshift-ovn-kubernetes/ovnkube-node-gsvx5" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.367414 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/8d4958ad-5878-40f9-8259-e68a1525b59b-host-cni-netd\") pod \"ovnkube-node-gsvx5\" (UID: \"8d4958ad-5878-40f9-8259-e68a1525b59b\") " pod="openshift-ovn-kubernetes/ovnkube-node-gsvx5" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.367404 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/8d4958ad-5878-40f9-8259-e68a1525b59b-run-ovn\") pod \"ovnkube-node-gsvx5\" (UID: \"8d4958ad-5878-40f9-8259-e68a1525b59b\") " pod="openshift-ovn-kubernetes/ovnkube-node-gsvx5" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.367487 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/8d4958ad-5878-40f9-8259-e68a1525b59b-ovnkube-config\") pod \"ovnkube-node-gsvx5\" (UID: \"8d4958ad-5878-40f9-8259-e68a1525b59b\") " pod="openshift-ovn-kubernetes/ovnkube-node-gsvx5" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.367517 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/8d4958ad-5878-40f9-8259-e68a1525b59b-log-socket\") pod \"ovnkube-node-gsvx5\" (UID: \"8d4958ad-5878-40f9-8259-e68a1525b59b\") " pod="openshift-ovn-kubernetes/ovnkube-node-gsvx5" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.367541 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/8d4958ad-5878-40f9-8259-e68a1525b59b-run-systemd\") pod \"ovnkube-node-gsvx5\" (UID: \"8d4958ad-5878-40f9-8259-e68a1525b59b\") " pod="openshift-ovn-kubernetes/ovnkube-node-gsvx5" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.367560 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/8d4958ad-5878-40f9-8259-e68a1525b59b-host-kubelet\") pod \"ovnkube-node-gsvx5\" (UID: \"8d4958ad-5878-40f9-8259-e68a1525b59b\") " pod="openshift-ovn-kubernetes/ovnkube-node-gsvx5" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.367579 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/8d4958ad-5878-40f9-8259-e68a1525b59b-ovn-node-metrics-cert\") pod \"ovnkube-node-gsvx5\" (UID: \"8d4958ad-5878-40f9-8259-e68a1525b59b\") " pod="openshift-ovn-kubernetes/ovnkube-node-gsvx5" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.367597 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/8d4958ad-5878-40f9-8259-e68a1525b59b-var-lib-openvswitch\") pod \"ovnkube-node-gsvx5\" (UID: \"8d4958ad-5878-40f9-8259-e68a1525b59b\") " pod="openshift-ovn-kubernetes/ovnkube-node-gsvx5" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.367619 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/8d4958ad-5878-40f9-8259-e68a1525b59b-run-systemd\") pod \"ovnkube-node-gsvx5\" (UID: \"8d4958ad-5878-40f9-8259-e68a1525b59b\") " pod="openshift-ovn-kubernetes/ovnkube-node-gsvx5" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.367620 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zf6bl\" (UniqueName: \"kubernetes.io/projected/8d4958ad-5878-40f9-8259-e68a1525b59b-kube-api-access-zf6bl\") pod \"ovnkube-node-gsvx5\" (UID: \"8d4958ad-5878-40f9-8259-e68a1525b59b\") " pod="openshift-ovn-kubernetes/ovnkube-node-gsvx5" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.367670 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/8d4958ad-5878-40f9-8259-e68a1525b59b-etc-openvswitch\") pod \"ovnkube-node-gsvx5\" (UID: \"8d4958ad-5878-40f9-8259-e68a1525b59b\") " pod="openshift-ovn-kubernetes/ovnkube-node-gsvx5" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.367710 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/8d4958ad-5878-40f9-8259-e68a1525b59b-systemd-units\") pod \"ovnkube-node-gsvx5\" (UID: \"8d4958ad-5878-40f9-8259-e68a1525b59b\") " pod="openshift-ovn-kubernetes/ovnkube-node-gsvx5" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.367768 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/8d4958ad-5878-40f9-8259-e68a1525b59b-host-run-netns\") pod \"ovnkube-node-gsvx5\" (UID: \"8d4958ad-5878-40f9-8259-e68a1525b59b\") " pod="openshift-ovn-kubernetes/ovnkube-node-gsvx5" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.367763 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/8d4958ad-5878-40f9-8259-e68a1525b59b-var-lib-openvswitch\") pod \"ovnkube-node-gsvx5\" (UID: \"8d4958ad-5878-40f9-8259-e68a1525b59b\") " pod="openshift-ovn-kubernetes/ovnkube-node-gsvx5" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.367798 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/8d4958ad-5878-40f9-8259-e68a1525b59b-host-slash\") pod \"ovnkube-node-gsvx5\" (UID: \"8d4958ad-5878-40f9-8259-e68a1525b59b\") " pod="openshift-ovn-kubernetes/ovnkube-node-gsvx5" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.367828 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/8d4958ad-5878-40f9-8259-e68a1525b59b-host-slash\") pod \"ovnkube-node-gsvx5\" (UID: \"8d4958ad-5878-40f9-8259-e68a1525b59b\") " pod="openshift-ovn-kubernetes/ovnkube-node-gsvx5" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.367860 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/8d4958ad-5878-40f9-8259-e68a1525b59b-etc-openvswitch\") pod \"ovnkube-node-gsvx5\" (UID: \"8d4958ad-5878-40f9-8259-e68a1525b59b\") " pod="openshift-ovn-kubernetes/ovnkube-node-gsvx5" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.367860 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/8d4958ad-5878-40f9-8259-e68a1525b59b-run-openvswitch\") pod \"ovnkube-node-gsvx5\" (UID: \"8d4958ad-5878-40f9-8259-e68a1525b59b\") " pod="openshift-ovn-kubernetes/ovnkube-node-gsvx5" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.367896 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/8d4958ad-5878-40f9-8259-e68a1525b59b-run-openvswitch\") pod \"ovnkube-node-gsvx5\" (UID: \"8d4958ad-5878-40f9-8259-e68a1525b59b\") " pod="openshift-ovn-kubernetes/ovnkube-node-gsvx5" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.367923 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/8d4958ad-5878-40f9-8259-e68a1525b59b-systemd-units\") pod \"ovnkube-node-gsvx5\" (UID: \"8d4958ad-5878-40f9-8259-e68a1525b59b\") " pod="openshift-ovn-kubernetes/ovnkube-node-gsvx5" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.367926 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/8d4958ad-5878-40f9-8259-e68a1525b59b-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-gsvx5\" (UID: \"8d4958ad-5878-40f9-8259-e68a1525b59b\") " pod="openshift-ovn-kubernetes/ovnkube-node-gsvx5" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.367945 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/8d4958ad-5878-40f9-8259-e68a1525b59b-host-run-netns\") pod \"ovnkube-node-gsvx5\" (UID: \"8d4958ad-5878-40f9-8259-e68a1525b59b\") " pod="openshift-ovn-kubernetes/ovnkube-node-gsvx5" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.367960 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/8d4958ad-5878-40f9-8259-e68a1525b59b-host-kubelet\") pod \"ovnkube-node-gsvx5\" (UID: \"8d4958ad-5878-40f9-8259-e68a1525b59b\") " pod="openshift-ovn-kubernetes/ovnkube-node-gsvx5" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.367963 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/8d4958ad-5878-40f9-8259-e68a1525b59b-ovnkube-script-lib\") pod \"ovnkube-node-gsvx5\" (UID: \"8d4958ad-5878-40f9-8259-e68a1525b59b\") " pod="openshift-ovn-kubernetes/ovnkube-node-gsvx5" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.367995 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/8d4958ad-5878-40f9-8259-e68a1525b59b-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-gsvx5\" (UID: \"8d4958ad-5878-40f9-8259-e68a1525b59b\") " pod="openshift-ovn-kubernetes/ovnkube-node-gsvx5" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.368022 4838 reconciler_common.go:293] "Volume detached for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/41b01f7d-5c75-49de-86f7-87e04bf71194-run-systemd\") on node \"crc\" DevicePath \"\"" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.368037 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/8d4958ad-5878-40f9-8259-e68a1525b59b-log-socket\") pod \"ovnkube-node-gsvx5\" (UID: \"8d4958ad-5878-40f9-8259-e68a1525b59b\") " pod="openshift-ovn-kubernetes/ovnkube-node-gsvx5" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.368041 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-svlft\" (UniqueName: \"kubernetes.io/projected/41b01f7d-5c75-49de-86f7-87e04bf71194-kube-api-access-svlft\") on node \"crc\" DevicePath \"\"" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.368082 4838 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/41b01f7d-5c75-49de-86f7-87e04bf71194-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.368316 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/8d4958ad-5878-40f9-8259-e68a1525b59b-env-overrides\") pod \"ovnkube-node-gsvx5\" (UID: \"8d4958ad-5878-40f9-8259-e68a1525b59b\") " pod="openshift-ovn-kubernetes/ovnkube-node-gsvx5" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.368469 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/8d4958ad-5878-40f9-8259-e68a1525b59b-ovnkube-config\") pod \"ovnkube-node-gsvx5\" (UID: \"8d4958ad-5878-40f9-8259-e68a1525b59b\") " pod="openshift-ovn-kubernetes/ovnkube-node-gsvx5" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.368629 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/8d4958ad-5878-40f9-8259-e68a1525b59b-ovnkube-script-lib\") pod \"ovnkube-node-gsvx5\" (UID: \"8d4958ad-5878-40f9-8259-e68a1525b59b\") " pod="openshift-ovn-kubernetes/ovnkube-node-gsvx5" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.371038 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/8d4958ad-5878-40f9-8259-e68a1525b59b-ovn-node-metrics-cert\") pod \"ovnkube-node-gsvx5\" (UID: \"8d4958ad-5878-40f9-8259-e68a1525b59b\") " pod="openshift-ovn-kubernetes/ovnkube-node-gsvx5" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.382781 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zf6bl\" (UniqueName: \"kubernetes.io/projected/8d4958ad-5878-40f9-8259-e68a1525b59b-kube-api-access-zf6bl\") pod \"ovnkube-node-gsvx5\" (UID: \"8d4958ad-5878-40f9-8259-e68a1525b59b\") " pod="openshift-ovn-kubernetes/ovnkube-node-gsvx5" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.548940 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-gsvx5" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.791147 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-gmhsj_41b01f7d-5c75-49de-86f7-87e04bf71194/ovnkube-controller/3.log" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.793065 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-gmhsj_41b01f7d-5c75-49de-86f7-87e04bf71194/ovn-acl-logging/0.log" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.793388 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-gmhsj_41b01f7d-5c75-49de-86f7-87e04bf71194/ovn-controller/0.log" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.794014 4838 generic.go:334] "Generic (PLEG): container finished" podID="41b01f7d-5c75-49de-86f7-87e04bf71194" containerID="7fe59cc9e131d69ec0f3655ef34ceba4f4ba7f369c3da8ae4470fe09eb16c16d" exitCode=0 Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.794037 4838 generic.go:334] "Generic (PLEG): container finished" podID="41b01f7d-5c75-49de-86f7-87e04bf71194" containerID="1ae634151347de87041ca338bcea3a34ee0c2330a3c6630a3e342f62beba0ab8" exitCode=0 Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.794047 4838 generic.go:334] "Generic (PLEG): container finished" podID="41b01f7d-5c75-49de-86f7-87e04bf71194" containerID="b1f5c4da04a97ccc1851e901f9a13763e45ff8c7f6aab25bcbb934541ae4a776" exitCode=0 Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.794057 4838 generic.go:334] "Generic (PLEG): container finished" podID="41b01f7d-5c75-49de-86f7-87e04bf71194" containerID="5781261b70d6722e8d28b91441c4241e1b48e6ff71ae3ec8973bde50180fc146" exitCode=0 Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.794066 4838 generic.go:334] "Generic (PLEG): container finished" podID="41b01f7d-5c75-49de-86f7-87e04bf71194" containerID="87fcac052cf598fe1999137913f0b5ab8c999dad7d8fb512c3b079fd3b49bdaa" exitCode=0 Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.794074 4838 generic.go:334] "Generic (PLEG): container finished" podID="41b01f7d-5c75-49de-86f7-87e04bf71194" containerID="3ccbb0df20c7e22308632a5a2d8837d77579134973f3888355be0fe46b4e59aa" exitCode=0 Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.794082 4838 generic.go:334] "Generic (PLEG): container finished" podID="41b01f7d-5c75-49de-86f7-87e04bf71194" containerID="b3a5c0b96cef205139d500d85f27d2c9230ab23a84ffc78f2587a465a7ff25e6" exitCode=143 Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.794091 4838 generic.go:334] "Generic (PLEG): container finished" podID="41b01f7d-5c75-49de-86f7-87e04bf71194" containerID="669de13027827632f353811e082cce7cca326651a9bf4820e66504bb59067d72" exitCode=143 Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.794134 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gmhsj" event={"ID":"41b01f7d-5c75-49de-86f7-87e04bf71194","Type":"ContainerDied","Data":"7fe59cc9e131d69ec0f3655ef34ceba4f4ba7f369c3da8ae4470fe09eb16c16d"} Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.794167 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gmhsj" event={"ID":"41b01f7d-5c75-49de-86f7-87e04bf71194","Type":"ContainerDied","Data":"1ae634151347de87041ca338bcea3a34ee0c2330a3c6630a3e342f62beba0ab8"} Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.794181 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gmhsj" event={"ID":"41b01f7d-5c75-49de-86f7-87e04bf71194","Type":"ContainerDied","Data":"b1f5c4da04a97ccc1851e901f9a13763e45ff8c7f6aab25bcbb934541ae4a776"} Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.794195 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gmhsj" event={"ID":"41b01f7d-5c75-49de-86f7-87e04bf71194","Type":"ContainerDied","Data":"5781261b70d6722e8d28b91441c4241e1b48e6ff71ae3ec8973bde50180fc146"} Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.794206 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gmhsj" event={"ID":"41b01f7d-5c75-49de-86f7-87e04bf71194","Type":"ContainerDied","Data":"87fcac052cf598fe1999137913f0b5ab8c999dad7d8fb512c3b079fd3b49bdaa"} Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.794218 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gmhsj" event={"ID":"41b01f7d-5c75-49de-86f7-87e04bf71194","Type":"ContainerDied","Data":"3ccbb0df20c7e22308632a5a2d8837d77579134973f3888355be0fe46b4e59aa"} Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.794230 4838 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"71617073a4d76318049cb634cb4ee2135cd2c25bc3d3d6285eb69baf1fae9679"} Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.794240 4838 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"1ae634151347de87041ca338bcea3a34ee0c2330a3c6630a3e342f62beba0ab8"} Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.794246 4838 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"b1f5c4da04a97ccc1851e901f9a13763e45ff8c7f6aab25bcbb934541ae4a776"} Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.794254 4838 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"5781261b70d6722e8d28b91441c4241e1b48e6ff71ae3ec8973bde50180fc146"} Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.794261 4838 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"87fcac052cf598fe1999137913f0b5ab8c999dad7d8fb512c3b079fd3b49bdaa"} Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.794267 4838 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"3ccbb0df20c7e22308632a5a2d8837d77579134973f3888355be0fe46b4e59aa"} Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.794273 4838 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"b3a5c0b96cef205139d500d85f27d2c9230ab23a84ffc78f2587a465a7ff25e6"} Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.794279 4838 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"669de13027827632f353811e082cce7cca326651a9bf4820e66504bb59067d72"} Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.794286 4838 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"9efead1ac3ae101f3a81e0c5568645b6ac107dc126ff57bc55745b86ffacb730"} Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.794295 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gmhsj" event={"ID":"41b01f7d-5c75-49de-86f7-87e04bf71194","Type":"ContainerDied","Data":"b3a5c0b96cef205139d500d85f27d2c9230ab23a84ffc78f2587a465a7ff25e6"} Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.794305 4838 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"7fe59cc9e131d69ec0f3655ef34ceba4f4ba7f369c3da8ae4470fe09eb16c16d"} Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.794313 4838 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"71617073a4d76318049cb634cb4ee2135cd2c25bc3d3d6285eb69baf1fae9679"} Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.794320 4838 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"1ae634151347de87041ca338bcea3a34ee0c2330a3c6630a3e342f62beba0ab8"} Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.794327 4838 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"b1f5c4da04a97ccc1851e901f9a13763e45ff8c7f6aab25bcbb934541ae4a776"} Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.794334 4838 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"5781261b70d6722e8d28b91441c4241e1b48e6ff71ae3ec8973bde50180fc146"} Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.794341 4838 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"87fcac052cf598fe1999137913f0b5ab8c999dad7d8fb512c3b079fd3b49bdaa"} Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.794347 4838 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"3ccbb0df20c7e22308632a5a2d8837d77579134973f3888355be0fe46b4e59aa"} Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.794354 4838 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"b3a5c0b96cef205139d500d85f27d2c9230ab23a84ffc78f2587a465a7ff25e6"} Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.794360 4838 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"669de13027827632f353811e082cce7cca326651a9bf4820e66504bb59067d72"} Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.794366 4838 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"9efead1ac3ae101f3a81e0c5568645b6ac107dc126ff57bc55745b86ffacb730"} Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.794375 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gmhsj" event={"ID":"41b01f7d-5c75-49de-86f7-87e04bf71194","Type":"ContainerDied","Data":"669de13027827632f353811e082cce7cca326651a9bf4820e66504bb59067d72"} Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.794386 4838 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"7fe59cc9e131d69ec0f3655ef34ceba4f4ba7f369c3da8ae4470fe09eb16c16d"} Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.794394 4838 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"71617073a4d76318049cb634cb4ee2135cd2c25bc3d3d6285eb69baf1fae9679"} Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.794400 4838 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"1ae634151347de87041ca338bcea3a34ee0c2330a3c6630a3e342f62beba0ab8"} Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.794407 4838 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"b1f5c4da04a97ccc1851e901f9a13763e45ff8c7f6aab25bcbb934541ae4a776"} Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.794413 4838 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"5781261b70d6722e8d28b91441c4241e1b48e6ff71ae3ec8973bde50180fc146"} Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.794421 4838 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"87fcac052cf598fe1999137913f0b5ab8c999dad7d8fb512c3b079fd3b49bdaa"} Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.794427 4838 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"3ccbb0df20c7e22308632a5a2d8837d77579134973f3888355be0fe46b4e59aa"} Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.794434 4838 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"b3a5c0b96cef205139d500d85f27d2c9230ab23a84ffc78f2587a465a7ff25e6"} Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.794440 4838 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"669de13027827632f353811e082cce7cca326651a9bf4820e66504bb59067d72"} Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.794446 4838 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"9efead1ac3ae101f3a81e0c5568645b6ac107dc126ff57bc55745b86ffacb730"} Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.794456 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gmhsj" event={"ID":"41b01f7d-5c75-49de-86f7-87e04bf71194","Type":"ContainerDied","Data":"427d9dfdc8a9866defe7ab2b5b761ead46fe46d3b13b79c04d4ee5af6525116a"} Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.794466 4838 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"7fe59cc9e131d69ec0f3655ef34ceba4f4ba7f369c3da8ae4470fe09eb16c16d"} Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.794474 4838 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"71617073a4d76318049cb634cb4ee2135cd2c25bc3d3d6285eb69baf1fae9679"} Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.794480 4838 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"1ae634151347de87041ca338bcea3a34ee0c2330a3c6630a3e342f62beba0ab8"} Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.794487 4838 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"b1f5c4da04a97ccc1851e901f9a13763e45ff8c7f6aab25bcbb934541ae4a776"} Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.794494 4838 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"5781261b70d6722e8d28b91441c4241e1b48e6ff71ae3ec8973bde50180fc146"} Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.794500 4838 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"87fcac052cf598fe1999137913f0b5ab8c999dad7d8fb512c3b079fd3b49bdaa"} Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.794507 4838 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"3ccbb0df20c7e22308632a5a2d8837d77579134973f3888355be0fe46b4e59aa"} Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.794514 4838 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"b3a5c0b96cef205139d500d85f27d2c9230ab23a84ffc78f2587a465a7ff25e6"} Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.794520 4838 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"669de13027827632f353811e082cce7cca326651a9bf4820e66504bb59067d72"} Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.794527 4838 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"9efead1ac3ae101f3a81e0c5568645b6ac107dc126ff57bc55745b86ffacb730"} Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.794613 4838 scope.go:117] "RemoveContainer" containerID="7fe59cc9e131d69ec0f3655ef34ceba4f4ba7f369c3da8ae4470fe09eb16c16d" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.795170 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-gmhsj" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.796286 4838 generic.go:334] "Generic (PLEG): container finished" podID="8d4958ad-5878-40f9-8259-e68a1525b59b" containerID="179b2a10cc29931beabc1938da34fb54d4e72cca30f34313d5fd856a41feea8a" exitCode=0 Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.796325 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gsvx5" event={"ID":"8d4958ad-5878-40f9-8259-e68a1525b59b","Type":"ContainerDied","Data":"179b2a10cc29931beabc1938da34fb54d4e72cca30f34313d5fd856a41feea8a"} Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.796349 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gsvx5" event={"ID":"8d4958ad-5878-40f9-8259-e68a1525b59b","Type":"ContainerStarted","Data":"66946fb43b3f88edc24ba3755f30de5034d2c8f23c8d92b56cdbb1b91eb1fdcb"} Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.798826 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-4rv9b_051f7e1c-2d47-4be9-bbd5-14feec16eb16/kube-multus/2.log" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.799306 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-4rv9b_051f7e1c-2d47-4be9-bbd5-14feec16eb16/kube-multus/1.log" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.799340 4838 generic.go:334] "Generic (PLEG): container finished" podID="051f7e1c-2d47-4be9-bbd5-14feec16eb16" containerID="9c246d0893eb62f4c097d470f8716546d2e91da8aed1554ef64709537d7d377d" exitCode=2 Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.799361 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-4rv9b" event={"ID":"051f7e1c-2d47-4be9-bbd5-14feec16eb16","Type":"ContainerDied","Data":"9c246d0893eb62f4c097d470f8716546d2e91da8aed1554ef64709537d7d377d"} Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.799378 4838 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"262d73384f8aa0c5e8405e70d091fbc0003217ee2c08a4776048649b9a6eda59"} Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.799671 4838 scope.go:117] "RemoveContainer" containerID="9c246d0893eb62f4c097d470f8716546d2e91da8aed1554ef64709537d7d377d" Nov 28 10:09:38 crc kubenswrapper[4838]: E1128 10:09:38.799867 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 20s restarting failed container=kube-multus pod=multus-4rv9b_openshift-multus(051f7e1c-2d47-4be9-bbd5-14feec16eb16)\"" pod="openshift-multus/multus-4rv9b" podUID="051f7e1c-2d47-4be9-bbd5-14feec16eb16" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.810964 4838 scope.go:117] "RemoveContainer" containerID="71617073a4d76318049cb634cb4ee2135cd2c25bc3d3d6285eb69baf1fae9679" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.825249 4838 scope.go:117] "RemoveContainer" containerID="1ae634151347de87041ca338bcea3a34ee0c2330a3c6630a3e342f62beba0ab8" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.841346 4838 scope.go:117] "RemoveContainer" containerID="b1f5c4da04a97ccc1851e901f9a13763e45ff8c7f6aab25bcbb934541ae4a776" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.858177 4838 scope.go:117] "RemoveContainer" containerID="5781261b70d6722e8d28b91441c4241e1b48e6ff71ae3ec8973bde50180fc146" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.864139 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-gmhsj"] Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.870607 4838 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-gmhsj"] Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.883582 4838 scope.go:117] "RemoveContainer" containerID="87fcac052cf598fe1999137913f0b5ab8c999dad7d8fb512c3b079fd3b49bdaa" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.898576 4838 scope.go:117] "RemoveContainer" containerID="3ccbb0df20c7e22308632a5a2d8837d77579134973f3888355be0fe46b4e59aa" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.918543 4838 scope.go:117] "RemoveContainer" containerID="b3a5c0b96cef205139d500d85f27d2c9230ab23a84ffc78f2587a465a7ff25e6" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.933956 4838 scope.go:117] "RemoveContainer" containerID="669de13027827632f353811e082cce7cca326651a9bf4820e66504bb59067d72" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.952990 4838 scope.go:117] "RemoveContainer" containerID="9efead1ac3ae101f3a81e0c5568645b6ac107dc126ff57bc55745b86ffacb730" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.972315 4838 scope.go:117] "RemoveContainer" containerID="7fe59cc9e131d69ec0f3655ef34ceba4f4ba7f369c3da8ae4470fe09eb16c16d" Nov 28 10:09:38 crc kubenswrapper[4838]: E1128 10:09:38.972677 4838 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7fe59cc9e131d69ec0f3655ef34ceba4f4ba7f369c3da8ae4470fe09eb16c16d\": container with ID starting with 7fe59cc9e131d69ec0f3655ef34ceba4f4ba7f369c3da8ae4470fe09eb16c16d not found: ID does not exist" containerID="7fe59cc9e131d69ec0f3655ef34ceba4f4ba7f369c3da8ae4470fe09eb16c16d" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.972707 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7fe59cc9e131d69ec0f3655ef34ceba4f4ba7f369c3da8ae4470fe09eb16c16d"} err="failed to get container status \"7fe59cc9e131d69ec0f3655ef34ceba4f4ba7f369c3da8ae4470fe09eb16c16d\": rpc error: code = NotFound desc = could not find container \"7fe59cc9e131d69ec0f3655ef34ceba4f4ba7f369c3da8ae4470fe09eb16c16d\": container with ID starting with 7fe59cc9e131d69ec0f3655ef34ceba4f4ba7f369c3da8ae4470fe09eb16c16d not found: ID does not exist" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.972741 4838 scope.go:117] "RemoveContainer" containerID="71617073a4d76318049cb634cb4ee2135cd2c25bc3d3d6285eb69baf1fae9679" Nov 28 10:09:38 crc kubenswrapper[4838]: E1128 10:09:38.972913 4838 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"71617073a4d76318049cb634cb4ee2135cd2c25bc3d3d6285eb69baf1fae9679\": container with ID starting with 71617073a4d76318049cb634cb4ee2135cd2c25bc3d3d6285eb69baf1fae9679 not found: ID does not exist" containerID="71617073a4d76318049cb634cb4ee2135cd2c25bc3d3d6285eb69baf1fae9679" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.972947 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"71617073a4d76318049cb634cb4ee2135cd2c25bc3d3d6285eb69baf1fae9679"} err="failed to get container status \"71617073a4d76318049cb634cb4ee2135cd2c25bc3d3d6285eb69baf1fae9679\": rpc error: code = NotFound desc = could not find container \"71617073a4d76318049cb634cb4ee2135cd2c25bc3d3d6285eb69baf1fae9679\": container with ID starting with 71617073a4d76318049cb634cb4ee2135cd2c25bc3d3d6285eb69baf1fae9679 not found: ID does not exist" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.972959 4838 scope.go:117] "RemoveContainer" containerID="1ae634151347de87041ca338bcea3a34ee0c2330a3c6630a3e342f62beba0ab8" Nov 28 10:09:38 crc kubenswrapper[4838]: E1128 10:09:38.973193 4838 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1ae634151347de87041ca338bcea3a34ee0c2330a3c6630a3e342f62beba0ab8\": container with ID starting with 1ae634151347de87041ca338bcea3a34ee0c2330a3c6630a3e342f62beba0ab8 not found: ID does not exist" containerID="1ae634151347de87041ca338bcea3a34ee0c2330a3c6630a3e342f62beba0ab8" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.973213 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1ae634151347de87041ca338bcea3a34ee0c2330a3c6630a3e342f62beba0ab8"} err="failed to get container status \"1ae634151347de87041ca338bcea3a34ee0c2330a3c6630a3e342f62beba0ab8\": rpc error: code = NotFound desc = could not find container \"1ae634151347de87041ca338bcea3a34ee0c2330a3c6630a3e342f62beba0ab8\": container with ID starting with 1ae634151347de87041ca338bcea3a34ee0c2330a3c6630a3e342f62beba0ab8 not found: ID does not exist" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.973228 4838 scope.go:117] "RemoveContainer" containerID="b1f5c4da04a97ccc1851e901f9a13763e45ff8c7f6aab25bcbb934541ae4a776" Nov 28 10:09:38 crc kubenswrapper[4838]: E1128 10:09:38.973384 4838 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b1f5c4da04a97ccc1851e901f9a13763e45ff8c7f6aab25bcbb934541ae4a776\": container with ID starting with b1f5c4da04a97ccc1851e901f9a13763e45ff8c7f6aab25bcbb934541ae4a776 not found: ID does not exist" containerID="b1f5c4da04a97ccc1851e901f9a13763e45ff8c7f6aab25bcbb934541ae4a776" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.973404 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b1f5c4da04a97ccc1851e901f9a13763e45ff8c7f6aab25bcbb934541ae4a776"} err="failed to get container status \"b1f5c4da04a97ccc1851e901f9a13763e45ff8c7f6aab25bcbb934541ae4a776\": rpc error: code = NotFound desc = could not find container \"b1f5c4da04a97ccc1851e901f9a13763e45ff8c7f6aab25bcbb934541ae4a776\": container with ID starting with b1f5c4da04a97ccc1851e901f9a13763e45ff8c7f6aab25bcbb934541ae4a776 not found: ID does not exist" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.973418 4838 scope.go:117] "RemoveContainer" containerID="5781261b70d6722e8d28b91441c4241e1b48e6ff71ae3ec8973bde50180fc146" Nov 28 10:09:38 crc kubenswrapper[4838]: E1128 10:09:38.973856 4838 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5781261b70d6722e8d28b91441c4241e1b48e6ff71ae3ec8973bde50180fc146\": container with ID starting with 5781261b70d6722e8d28b91441c4241e1b48e6ff71ae3ec8973bde50180fc146 not found: ID does not exist" containerID="5781261b70d6722e8d28b91441c4241e1b48e6ff71ae3ec8973bde50180fc146" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.973877 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5781261b70d6722e8d28b91441c4241e1b48e6ff71ae3ec8973bde50180fc146"} err="failed to get container status \"5781261b70d6722e8d28b91441c4241e1b48e6ff71ae3ec8973bde50180fc146\": rpc error: code = NotFound desc = could not find container \"5781261b70d6722e8d28b91441c4241e1b48e6ff71ae3ec8973bde50180fc146\": container with ID starting with 5781261b70d6722e8d28b91441c4241e1b48e6ff71ae3ec8973bde50180fc146 not found: ID does not exist" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.973890 4838 scope.go:117] "RemoveContainer" containerID="87fcac052cf598fe1999137913f0b5ab8c999dad7d8fb512c3b079fd3b49bdaa" Nov 28 10:09:38 crc kubenswrapper[4838]: E1128 10:09:38.974063 4838 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"87fcac052cf598fe1999137913f0b5ab8c999dad7d8fb512c3b079fd3b49bdaa\": container with ID starting with 87fcac052cf598fe1999137913f0b5ab8c999dad7d8fb512c3b079fd3b49bdaa not found: ID does not exist" containerID="87fcac052cf598fe1999137913f0b5ab8c999dad7d8fb512c3b079fd3b49bdaa" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.974083 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"87fcac052cf598fe1999137913f0b5ab8c999dad7d8fb512c3b079fd3b49bdaa"} err="failed to get container status \"87fcac052cf598fe1999137913f0b5ab8c999dad7d8fb512c3b079fd3b49bdaa\": rpc error: code = NotFound desc = could not find container \"87fcac052cf598fe1999137913f0b5ab8c999dad7d8fb512c3b079fd3b49bdaa\": container with ID starting with 87fcac052cf598fe1999137913f0b5ab8c999dad7d8fb512c3b079fd3b49bdaa not found: ID does not exist" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.974094 4838 scope.go:117] "RemoveContainer" containerID="3ccbb0df20c7e22308632a5a2d8837d77579134973f3888355be0fe46b4e59aa" Nov 28 10:09:38 crc kubenswrapper[4838]: E1128 10:09:38.974383 4838 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3ccbb0df20c7e22308632a5a2d8837d77579134973f3888355be0fe46b4e59aa\": container with ID starting with 3ccbb0df20c7e22308632a5a2d8837d77579134973f3888355be0fe46b4e59aa not found: ID does not exist" containerID="3ccbb0df20c7e22308632a5a2d8837d77579134973f3888355be0fe46b4e59aa" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.974402 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3ccbb0df20c7e22308632a5a2d8837d77579134973f3888355be0fe46b4e59aa"} err="failed to get container status \"3ccbb0df20c7e22308632a5a2d8837d77579134973f3888355be0fe46b4e59aa\": rpc error: code = NotFound desc = could not find container \"3ccbb0df20c7e22308632a5a2d8837d77579134973f3888355be0fe46b4e59aa\": container with ID starting with 3ccbb0df20c7e22308632a5a2d8837d77579134973f3888355be0fe46b4e59aa not found: ID does not exist" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.974413 4838 scope.go:117] "RemoveContainer" containerID="b3a5c0b96cef205139d500d85f27d2c9230ab23a84ffc78f2587a465a7ff25e6" Nov 28 10:09:38 crc kubenswrapper[4838]: E1128 10:09:38.974575 4838 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b3a5c0b96cef205139d500d85f27d2c9230ab23a84ffc78f2587a465a7ff25e6\": container with ID starting with b3a5c0b96cef205139d500d85f27d2c9230ab23a84ffc78f2587a465a7ff25e6 not found: ID does not exist" containerID="b3a5c0b96cef205139d500d85f27d2c9230ab23a84ffc78f2587a465a7ff25e6" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.974589 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b3a5c0b96cef205139d500d85f27d2c9230ab23a84ffc78f2587a465a7ff25e6"} err="failed to get container status \"b3a5c0b96cef205139d500d85f27d2c9230ab23a84ffc78f2587a465a7ff25e6\": rpc error: code = NotFound desc = could not find container \"b3a5c0b96cef205139d500d85f27d2c9230ab23a84ffc78f2587a465a7ff25e6\": container with ID starting with b3a5c0b96cef205139d500d85f27d2c9230ab23a84ffc78f2587a465a7ff25e6 not found: ID does not exist" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.974611 4838 scope.go:117] "RemoveContainer" containerID="669de13027827632f353811e082cce7cca326651a9bf4820e66504bb59067d72" Nov 28 10:09:38 crc kubenswrapper[4838]: E1128 10:09:38.974754 4838 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"669de13027827632f353811e082cce7cca326651a9bf4820e66504bb59067d72\": container with ID starting with 669de13027827632f353811e082cce7cca326651a9bf4820e66504bb59067d72 not found: ID does not exist" containerID="669de13027827632f353811e082cce7cca326651a9bf4820e66504bb59067d72" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.974772 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"669de13027827632f353811e082cce7cca326651a9bf4820e66504bb59067d72"} err="failed to get container status \"669de13027827632f353811e082cce7cca326651a9bf4820e66504bb59067d72\": rpc error: code = NotFound desc = could not find container \"669de13027827632f353811e082cce7cca326651a9bf4820e66504bb59067d72\": container with ID starting with 669de13027827632f353811e082cce7cca326651a9bf4820e66504bb59067d72 not found: ID does not exist" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.974782 4838 scope.go:117] "RemoveContainer" containerID="9efead1ac3ae101f3a81e0c5568645b6ac107dc126ff57bc55745b86ffacb730" Nov 28 10:09:38 crc kubenswrapper[4838]: E1128 10:09:38.974948 4838 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9efead1ac3ae101f3a81e0c5568645b6ac107dc126ff57bc55745b86ffacb730\": container with ID starting with 9efead1ac3ae101f3a81e0c5568645b6ac107dc126ff57bc55745b86ffacb730 not found: ID does not exist" containerID="9efead1ac3ae101f3a81e0c5568645b6ac107dc126ff57bc55745b86ffacb730" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.974962 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9efead1ac3ae101f3a81e0c5568645b6ac107dc126ff57bc55745b86ffacb730"} err="failed to get container status \"9efead1ac3ae101f3a81e0c5568645b6ac107dc126ff57bc55745b86ffacb730\": rpc error: code = NotFound desc = could not find container \"9efead1ac3ae101f3a81e0c5568645b6ac107dc126ff57bc55745b86ffacb730\": container with ID starting with 9efead1ac3ae101f3a81e0c5568645b6ac107dc126ff57bc55745b86ffacb730 not found: ID does not exist" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.974973 4838 scope.go:117] "RemoveContainer" containerID="7fe59cc9e131d69ec0f3655ef34ceba4f4ba7f369c3da8ae4470fe09eb16c16d" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.975122 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7fe59cc9e131d69ec0f3655ef34ceba4f4ba7f369c3da8ae4470fe09eb16c16d"} err="failed to get container status \"7fe59cc9e131d69ec0f3655ef34ceba4f4ba7f369c3da8ae4470fe09eb16c16d\": rpc error: code = NotFound desc = could not find container \"7fe59cc9e131d69ec0f3655ef34ceba4f4ba7f369c3da8ae4470fe09eb16c16d\": container with ID starting with 7fe59cc9e131d69ec0f3655ef34ceba4f4ba7f369c3da8ae4470fe09eb16c16d not found: ID does not exist" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.975137 4838 scope.go:117] "RemoveContainer" containerID="71617073a4d76318049cb634cb4ee2135cd2c25bc3d3d6285eb69baf1fae9679" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.975406 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"71617073a4d76318049cb634cb4ee2135cd2c25bc3d3d6285eb69baf1fae9679"} err="failed to get container status \"71617073a4d76318049cb634cb4ee2135cd2c25bc3d3d6285eb69baf1fae9679\": rpc error: code = NotFound desc = could not find container \"71617073a4d76318049cb634cb4ee2135cd2c25bc3d3d6285eb69baf1fae9679\": container with ID starting with 71617073a4d76318049cb634cb4ee2135cd2c25bc3d3d6285eb69baf1fae9679 not found: ID does not exist" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.975419 4838 scope.go:117] "RemoveContainer" containerID="1ae634151347de87041ca338bcea3a34ee0c2330a3c6630a3e342f62beba0ab8" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.975553 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1ae634151347de87041ca338bcea3a34ee0c2330a3c6630a3e342f62beba0ab8"} err="failed to get container status \"1ae634151347de87041ca338bcea3a34ee0c2330a3c6630a3e342f62beba0ab8\": rpc error: code = NotFound desc = could not find container \"1ae634151347de87041ca338bcea3a34ee0c2330a3c6630a3e342f62beba0ab8\": container with ID starting with 1ae634151347de87041ca338bcea3a34ee0c2330a3c6630a3e342f62beba0ab8 not found: ID does not exist" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.975565 4838 scope.go:117] "RemoveContainer" containerID="b1f5c4da04a97ccc1851e901f9a13763e45ff8c7f6aab25bcbb934541ae4a776" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.976020 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b1f5c4da04a97ccc1851e901f9a13763e45ff8c7f6aab25bcbb934541ae4a776"} err="failed to get container status \"b1f5c4da04a97ccc1851e901f9a13763e45ff8c7f6aab25bcbb934541ae4a776\": rpc error: code = NotFound desc = could not find container \"b1f5c4da04a97ccc1851e901f9a13763e45ff8c7f6aab25bcbb934541ae4a776\": container with ID starting with b1f5c4da04a97ccc1851e901f9a13763e45ff8c7f6aab25bcbb934541ae4a776 not found: ID does not exist" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.976036 4838 scope.go:117] "RemoveContainer" containerID="5781261b70d6722e8d28b91441c4241e1b48e6ff71ae3ec8973bde50180fc146" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.976258 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5781261b70d6722e8d28b91441c4241e1b48e6ff71ae3ec8973bde50180fc146"} err="failed to get container status \"5781261b70d6722e8d28b91441c4241e1b48e6ff71ae3ec8973bde50180fc146\": rpc error: code = NotFound desc = could not find container \"5781261b70d6722e8d28b91441c4241e1b48e6ff71ae3ec8973bde50180fc146\": container with ID starting with 5781261b70d6722e8d28b91441c4241e1b48e6ff71ae3ec8973bde50180fc146 not found: ID does not exist" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.976276 4838 scope.go:117] "RemoveContainer" containerID="87fcac052cf598fe1999137913f0b5ab8c999dad7d8fb512c3b079fd3b49bdaa" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.976475 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"87fcac052cf598fe1999137913f0b5ab8c999dad7d8fb512c3b079fd3b49bdaa"} err="failed to get container status \"87fcac052cf598fe1999137913f0b5ab8c999dad7d8fb512c3b079fd3b49bdaa\": rpc error: code = NotFound desc = could not find container \"87fcac052cf598fe1999137913f0b5ab8c999dad7d8fb512c3b079fd3b49bdaa\": container with ID starting with 87fcac052cf598fe1999137913f0b5ab8c999dad7d8fb512c3b079fd3b49bdaa not found: ID does not exist" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.976493 4838 scope.go:117] "RemoveContainer" containerID="3ccbb0df20c7e22308632a5a2d8837d77579134973f3888355be0fe46b4e59aa" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.976873 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3ccbb0df20c7e22308632a5a2d8837d77579134973f3888355be0fe46b4e59aa"} err="failed to get container status \"3ccbb0df20c7e22308632a5a2d8837d77579134973f3888355be0fe46b4e59aa\": rpc error: code = NotFound desc = could not find container \"3ccbb0df20c7e22308632a5a2d8837d77579134973f3888355be0fe46b4e59aa\": container with ID starting with 3ccbb0df20c7e22308632a5a2d8837d77579134973f3888355be0fe46b4e59aa not found: ID does not exist" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.976893 4838 scope.go:117] "RemoveContainer" containerID="b3a5c0b96cef205139d500d85f27d2c9230ab23a84ffc78f2587a465a7ff25e6" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.977214 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b3a5c0b96cef205139d500d85f27d2c9230ab23a84ffc78f2587a465a7ff25e6"} err="failed to get container status \"b3a5c0b96cef205139d500d85f27d2c9230ab23a84ffc78f2587a465a7ff25e6\": rpc error: code = NotFound desc = could not find container \"b3a5c0b96cef205139d500d85f27d2c9230ab23a84ffc78f2587a465a7ff25e6\": container with ID starting with b3a5c0b96cef205139d500d85f27d2c9230ab23a84ffc78f2587a465a7ff25e6 not found: ID does not exist" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.977230 4838 scope.go:117] "RemoveContainer" containerID="669de13027827632f353811e082cce7cca326651a9bf4820e66504bb59067d72" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.977485 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"669de13027827632f353811e082cce7cca326651a9bf4820e66504bb59067d72"} err="failed to get container status \"669de13027827632f353811e082cce7cca326651a9bf4820e66504bb59067d72\": rpc error: code = NotFound desc = could not find container \"669de13027827632f353811e082cce7cca326651a9bf4820e66504bb59067d72\": container with ID starting with 669de13027827632f353811e082cce7cca326651a9bf4820e66504bb59067d72 not found: ID does not exist" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.977501 4838 scope.go:117] "RemoveContainer" containerID="9efead1ac3ae101f3a81e0c5568645b6ac107dc126ff57bc55745b86ffacb730" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.977751 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9efead1ac3ae101f3a81e0c5568645b6ac107dc126ff57bc55745b86ffacb730"} err="failed to get container status \"9efead1ac3ae101f3a81e0c5568645b6ac107dc126ff57bc55745b86ffacb730\": rpc error: code = NotFound desc = could not find container \"9efead1ac3ae101f3a81e0c5568645b6ac107dc126ff57bc55745b86ffacb730\": container with ID starting with 9efead1ac3ae101f3a81e0c5568645b6ac107dc126ff57bc55745b86ffacb730 not found: ID does not exist" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.977768 4838 scope.go:117] "RemoveContainer" containerID="7fe59cc9e131d69ec0f3655ef34ceba4f4ba7f369c3da8ae4470fe09eb16c16d" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.977972 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7fe59cc9e131d69ec0f3655ef34ceba4f4ba7f369c3da8ae4470fe09eb16c16d"} err="failed to get container status \"7fe59cc9e131d69ec0f3655ef34ceba4f4ba7f369c3da8ae4470fe09eb16c16d\": rpc error: code = NotFound desc = could not find container \"7fe59cc9e131d69ec0f3655ef34ceba4f4ba7f369c3da8ae4470fe09eb16c16d\": container with ID starting with 7fe59cc9e131d69ec0f3655ef34ceba4f4ba7f369c3da8ae4470fe09eb16c16d not found: ID does not exist" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.977988 4838 scope.go:117] "RemoveContainer" containerID="71617073a4d76318049cb634cb4ee2135cd2c25bc3d3d6285eb69baf1fae9679" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.978137 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"71617073a4d76318049cb634cb4ee2135cd2c25bc3d3d6285eb69baf1fae9679"} err="failed to get container status \"71617073a4d76318049cb634cb4ee2135cd2c25bc3d3d6285eb69baf1fae9679\": rpc error: code = NotFound desc = could not find container \"71617073a4d76318049cb634cb4ee2135cd2c25bc3d3d6285eb69baf1fae9679\": container with ID starting with 71617073a4d76318049cb634cb4ee2135cd2c25bc3d3d6285eb69baf1fae9679 not found: ID does not exist" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.978152 4838 scope.go:117] "RemoveContainer" containerID="1ae634151347de87041ca338bcea3a34ee0c2330a3c6630a3e342f62beba0ab8" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.978449 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1ae634151347de87041ca338bcea3a34ee0c2330a3c6630a3e342f62beba0ab8"} err="failed to get container status \"1ae634151347de87041ca338bcea3a34ee0c2330a3c6630a3e342f62beba0ab8\": rpc error: code = NotFound desc = could not find container \"1ae634151347de87041ca338bcea3a34ee0c2330a3c6630a3e342f62beba0ab8\": container with ID starting with 1ae634151347de87041ca338bcea3a34ee0c2330a3c6630a3e342f62beba0ab8 not found: ID does not exist" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.978464 4838 scope.go:117] "RemoveContainer" containerID="b1f5c4da04a97ccc1851e901f9a13763e45ff8c7f6aab25bcbb934541ae4a776" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.978610 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b1f5c4da04a97ccc1851e901f9a13763e45ff8c7f6aab25bcbb934541ae4a776"} err="failed to get container status \"b1f5c4da04a97ccc1851e901f9a13763e45ff8c7f6aab25bcbb934541ae4a776\": rpc error: code = NotFound desc = could not find container \"b1f5c4da04a97ccc1851e901f9a13763e45ff8c7f6aab25bcbb934541ae4a776\": container with ID starting with b1f5c4da04a97ccc1851e901f9a13763e45ff8c7f6aab25bcbb934541ae4a776 not found: ID does not exist" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.978625 4838 scope.go:117] "RemoveContainer" containerID="5781261b70d6722e8d28b91441c4241e1b48e6ff71ae3ec8973bde50180fc146" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.978787 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5781261b70d6722e8d28b91441c4241e1b48e6ff71ae3ec8973bde50180fc146"} err="failed to get container status \"5781261b70d6722e8d28b91441c4241e1b48e6ff71ae3ec8973bde50180fc146\": rpc error: code = NotFound desc = could not find container \"5781261b70d6722e8d28b91441c4241e1b48e6ff71ae3ec8973bde50180fc146\": container with ID starting with 5781261b70d6722e8d28b91441c4241e1b48e6ff71ae3ec8973bde50180fc146 not found: ID does not exist" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.978802 4838 scope.go:117] "RemoveContainer" containerID="87fcac052cf598fe1999137913f0b5ab8c999dad7d8fb512c3b079fd3b49bdaa" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.978944 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"87fcac052cf598fe1999137913f0b5ab8c999dad7d8fb512c3b079fd3b49bdaa"} err="failed to get container status \"87fcac052cf598fe1999137913f0b5ab8c999dad7d8fb512c3b079fd3b49bdaa\": rpc error: code = NotFound desc = could not find container \"87fcac052cf598fe1999137913f0b5ab8c999dad7d8fb512c3b079fd3b49bdaa\": container with ID starting with 87fcac052cf598fe1999137913f0b5ab8c999dad7d8fb512c3b079fd3b49bdaa not found: ID does not exist" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.978961 4838 scope.go:117] "RemoveContainer" containerID="3ccbb0df20c7e22308632a5a2d8837d77579134973f3888355be0fe46b4e59aa" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.979099 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3ccbb0df20c7e22308632a5a2d8837d77579134973f3888355be0fe46b4e59aa"} err="failed to get container status \"3ccbb0df20c7e22308632a5a2d8837d77579134973f3888355be0fe46b4e59aa\": rpc error: code = NotFound desc = could not find container \"3ccbb0df20c7e22308632a5a2d8837d77579134973f3888355be0fe46b4e59aa\": container with ID starting with 3ccbb0df20c7e22308632a5a2d8837d77579134973f3888355be0fe46b4e59aa not found: ID does not exist" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.979114 4838 scope.go:117] "RemoveContainer" containerID="b3a5c0b96cef205139d500d85f27d2c9230ab23a84ffc78f2587a465a7ff25e6" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.979251 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b3a5c0b96cef205139d500d85f27d2c9230ab23a84ffc78f2587a465a7ff25e6"} err="failed to get container status \"b3a5c0b96cef205139d500d85f27d2c9230ab23a84ffc78f2587a465a7ff25e6\": rpc error: code = NotFound desc = could not find container \"b3a5c0b96cef205139d500d85f27d2c9230ab23a84ffc78f2587a465a7ff25e6\": container with ID starting with b3a5c0b96cef205139d500d85f27d2c9230ab23a84ffc78f2587a465a7ff25e6 not found: ID does not exist" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.979267 4838 scope.go:117] "RemoveContainer" containerID="669de13027827632f353811e082cce7cca326651a9bf4820e66504bb59067d72" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.979408 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"669de13027827632f353811e082cce7cca326651a9bf4820e66504bb59067d72"} err="failed to get container status \"669de13027827632f353811e082cce7cca326651a9bf4820e66504bb59067d72\": rpc error: code = NotFound desc = could not find container \"669de13027827632f353811e082cce7cca326651a9bf4820e66504bb59067d72\": container with ID starting with 669de13027827632f353811e082cce7cca326651a9bf4820e66504bb59067d72 not found: ID does not exist" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.979427 4838 scope.go:117] "RemoveContainer" containerID="9efead1ac3ae101f3a81e0c5568645b6ac107dc126ff57bc55745b86ffacb730" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.979568 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9efead1ac3ae101f3a81e0c5568645b6ac107dc126ff57bc55745b86ffacb730"} err="failed to get container status \"9efead1ac3ae101f3a81e0c5568645b6ac107dc126ff57bc55745b86ffacb730\": rpc error: code = NotFound desc = could not find container \"9efead1ac3ae101f3a81e0c5568645b6ac107dc126ff57bc55745b86ffacb730\": container with ID starting with 9efead1ac3ae101f3a81e0c5568645b6ac107dc126ff57bc55745b86ffacb730 not found: ID does not exist" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.979583 4838 scope.go:117] "RemoveContainer" containerID="7fe59cc9e131d69ec0f3655ef34ceba4f4ba7f369c3da8ae4470fe09eb16c16d" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.979945 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7fe59cc9e131d69ec0f3655ef34ceba4f4ba7f369c3da8ae4470fe09eb16c16d"} err="failed to get container status \"7fe59cc9e131d69ec0f3655ef34ceba4f4ba7f369c3da8ae4470fe09eb16c16d\": rpc error: code = NotFound desc = could not find container \"7fe59cc9e131d69ec0f3655ef34ceba4f4ba7f369c3da8ae4470fe09eb16c16d\": container with ID starting with 7fe59cc9e131d69ec0f3655ef34ceba4f4ba7f369c3da8ae4470fe09eb16c16d not found: ID does not exist" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.979958 4838 scope.go:117] "RemoveContainer" containerID="71617073a4d76318049cb634cb4ee2135cd2c25bc3d3d6285eb69baf1fae9679" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.980344 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"71617073a4d76318049cb634cb4ee2135cd2c25bc3d3d6285eb69baf1fae9679"} err="failed to get container status \"71617073a4d76318049cb634cb4ee2135cd2c25bc3d3d6285eb69baf1fae9679\": rpc error: code = NotFound desc = could not find container \"71617073a4d76318049cb634cb4ee2135cd2c25bc3d3d6285eb69baf1fae9679\": container with ID starting with 71617073a4d76318049cb634cb4ee2135cd2c25bc3d3d6285eb69baf1fae9679 not found: ID does not exist" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.980397 4838 scope.go:117] "RemoveContainer" containerID="1ae634151347de87041ca338bcea3a34ee0c2330a3c6630a3e342f62beba0ab8" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.980952 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1ae634151347de87041ca338bcea3a34ee0c2330a3c6630a3e342f62beba0ab8"} err="failed to get container status \"1ae634151347de87041ca338bcea3a34ee0c2330a3c6630a3e342f62beba0ab8\": rpc error: code = NotFound desc = could not find container \"1ae634151347de87041ca338bcea3a34ee0c2330a3c6630a3e342f62beba0ab8\": container with ID starting with 1ae634151347de87041ca338bcea3a34ee0c2330a3c6630a3e342f62beba0ab8 not found: ID does not exist" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.980972 4838 scope.go:117] "RemoveContainer" containerID="b1f5c4da04a97ccc1851e901f9a13763e45ff8c7f6aab25bcbb934541ae4a776" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.981248 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b1f5c4da04a97ccc1851e901f9a13763e45ff8c7f6aab25bcbb934541ae4a776"} err="failed to get container status \"b1f5c4da04a97ccc1851e901f9a13763e45ff8c7f6aab25bcbb934541ae4a776\": rpc error: code = NotFound desc = could not find container \"b1f5c4da04a97ccc1851e901f9a13763e45ff8c7f6aab25bcbb934541ae4a776\": container with ID starting with b1f5c4da04a97ccc1851e901f9a13763e45ff8c7f6aab25bcbb934541ae4a776 not found: ID does not exist" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.981265 4838 scope.go:117] "RemoveContainer" containerID="5781261b70d6722e8d28b91441c4241e1b48e6ff71ae3ec8973bde50180fc146" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.981416 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5781261b70d6722e8d28b91441c4241e1b48e6ff71ae3ec8973bde50180fc146"} err="failed to get container status \"5781261b70d6722e8d28b91441c4241e1b48e6ff71ae3ec8973bde50180fc146\": rpc error: code = NotFound desc = could not find container \"5781261b70d6722e8d28b91441c4241e1b48e6ff71ae3ec8973bde50180fc146\": container with ID starting with 5781261b70d6722e8d28b91441c4241e1b48e6ff71ae3ec8973bde50180fc146 not found: ID does not exist" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.981433 4838 scope.go:117] "RemoveContainer" containerID="87fcac052cf598fe1999137913f0b5ab8c999dad7d8fb512c3b079fd3b49bdaa" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.981776 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"87fcac052cf598fe1999137913f0b5ab8c999dad7d8fb512c3b079fd3b49bdaa"} err="failed to get container status \"87fcac052cf598fe1999137913f0b5ab8c999dad7d8fb512c3b079fd3b49bdaa\": rpc error: code = NotFound desc = could not find container \"87fcac052cf598fe1999137913f0b5ab8c999dad7d8fb512c3b079fd3b49bdaa\": container with ID starting with 87fcac052cf598fe1999137913f0b5ab8c999dad7d8fb512c3b079fd3b49bdaa not found: ID does not exist" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.981793 4838 scope.go:117] "RemoveContainer" containerID="3ccbb0df20c7e22308632a5a2d8837d77579134973f3888355be0fe46b4e59aa" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.981947 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3ccbb0df20c7e22308632a5a2d8837d77579134973f3888355be0fe46b4e59aa"} err="failed to get container status \"3ccbb0df20c7e22308632a5a2d8837d77579134973f3888355be0fe46b4e59aa\": rpc error: code = NotFound desc = could not find container \"3ccbb0df20c7e22308632a5a2d8837d77579134973f3888355be0fe46b4e59aa\": container with ID starting with 3ccbb0df20c7e22308632a5a2d8837d77579134973f3888355be0fe46b4e59aa not found: ID does not exist" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.981962 4838 scope.go:117] "RemoveContainer" containerID="b3a5c0b96cef205139d500d85f27d2c9230ab23a84ffc78f2587a465a7ff25e6" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.982090 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b3a5c0b96cef205139d500d85f27d2c9230ab23a84ffc78f2587a465a7ff25e6"} err="failed to get container status \"b3a5c0b96cef205139d500d85f27d2c9230ab23a84ffc78f2587a465a7ff25e6\": rpc error: code = NotFound desc = could not find container \"b3a5c0b96cef205139d500d85f27d2c9230ab23a84ffc78f2587a465a7ff25e6\": container with ID starting with b3a5c0b96cef205139d500d85f27d2c9230ab23a84ffc78f2587a465a7ff25e6 not found: ID does not exist" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.982106 4838 scope.go:117] "RemoveContainer" containerID="669de13027827632f353811e082cce7cca326651a9bf4820e66504bb59067d72" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.982368 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"669de13027827632f353811e082cce7cca326651a9bf4820e66504bb59067d72"} err="failed to get container status \"669de13027827632f353811e082cce7cca326651a9bf4820e66504bb59067d72\": rpc error: code = NotFound desc = could not find container \"669de13027827632f353811e082cce7cca326651a9bf4820e66504bb59067d72\": container with ID starting with 669de13027827632f353811e082cce7cca326651a9bf4820e66504bb59067d72 not found: ID does not exist" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.982386 4838 scope.go:117] "RemoveContainer" containerID="9efead1ac3ae101f3a81e0c5568645b6ac107dc126ff57bc55745b86ffacb730" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.982564 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9efead1ac3ae101f3a81e0c5568645b6ac107dc126ff57bc55745b86ffacb730"} err="failed to get container status \"9efead1ac3ae101f3a81e0c5568645b6ac107dc126ff57bc55745b86ffacb730\": rpc error: code = NotFound desc = could not find container \"9efead1ac3ae101f3a81e0c5568645b6ac107dc126ff57bc55745b86ffacb730\": container with ID starting with 9efead1ac3ae101f3a81e0c5568645b6ac107dc126ff57bc55745b86ffacb730 not found: ID does not exist" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.982580 4838 scope.go:117] "RemoveContainer" containerID="7fe59cc9e131d69ec0f3655ef34ceba4f4ba7f369c3da8ae4470fe09eb16c16d" Nov 28 10:09:38 crc kubenswrapper[4838]: I1128 10:09:38.982961 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7fe59cc9e131d69ec0f3655ef34ceba4f4ba7f369c3da8ae4470fe09eb16c16d"} err="failed to get container status \"7fe59cc9e131d69ec0f3655ef34ceba4f4ba7f369c3da8ae4470fe09eb16c16d\": rpc error: code = NotFound desc = could not find container \"7fe59cc9e131d69ec0f3655ef34ceba4f4ba7f369c3da8ae4470fe09eb16c16d\": container with ID starting with 7fe59cc9e131d69ec0f3655ef34ceba4f4ba7f369c3da8ae4470fe09eb16c16d not found: ID does not exist" Nov 28 10:09:39 crc kubenswrapper[4838]: I1128 10:09:39.814187 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gsvx5" event={"ID":"8d4958ad-5878-40f9-8259-e68a1525b59b","Type":"ContainerStarted","Data":"f76b89dac4608b6fc1daff69100c86480755ac02ebaaa835c736a9b14863d120"} Nov 28 10:09:39 crc kubenswrapper[4838]: I1128 10:09:39.814604 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gsvx5" event={"ID":"8d4958ad-5878-40f9-8259-e68a1525b59b","Type":"ContainerStarted","Data":"b4191c6ce7a428e2df6f520e9d269d768437534a96ce53506236e0d37f13cd0d"} Nov 28 10:09:39 crc kubenswrapper[4838]: I1128 10:09:39.814626 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gsvx5" event={"ID":"8d4958ad-5878-40f9-8259-e68a1525b59b","Type":"ContainerStarted","Data":"26f8e60d88a53b823bc61f124e2d34fe3511bb47788b2101ac5907e592c06252"} Nov 28 10:09:39 crc kubenswrapper[4838]: I1128 10:09:39.814643 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gsvx5" event={"ID":"8d4958ad-5878-40f9-8259-e68a1525b59b","Type":"ContainerStarted","Data":"d179662b3f078702f2d4307cce648b4d2e0bbe2a0e50d816e69e18e721af6b8d"} Nov 28 10:09:39 crc kubenswrapper[4838]: I1128 10:09:39.814659 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gsvx5" event={"ID":"8d4958ad-5878-40f9-8259-e68a1525b59b","Type":"ContainerStarted","Data":"b34d07e5e1cecf5efac35f1a2d55e0f06f4dc0e7c1e905930af5c77fe022881a"} Nov 28 10:09:40 crc kubenswrapper[4838]: I1128 10:09:40.573952 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="41b01f7d-5c75-49de-86f7-87e04bf71194" path="/var/lib/kubelet/pods/41b01f7d-5c75-49de-86f7-87e04bf71194/volumes" Nov 28 10:09:40 crc kubenswrapper[4838]: I1128 10:09:40.826552 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gsvx5" event={"ID":"8d4958ad-5878-40f9-8259-e68a1525b59b","Type":"ContainerStarted","Data":"94899dba360059987b1116a1c0cd341afa06adf32f77ac8277c1531eec724dda"} Nov 28 10:09:42 crc kubenswrapper[4838]: I1128 10:09:42.850213 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gsvx5" event={"ID":"8d4958ad-5878-40f9-8259-e68a1525b59b","Type":"ContainerStarted","Data":"b1c9662eea2bcfa068313a915d53fa8765893cd403a84c4f79c1f18f142aa737"} Nov 28 10:09:44 crc kubenswrapper[4838]: I1128 10:09:44.866162 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-gsvx5" event={"ID":"8d4958ad-5878-40f9-8259-e68a1525b59b","Type":"ContainerStarted","Data":"9abd27df2c29b4df5e89382863bbb985bbd5f574574f397bf726e82430afd2ca"} Nov 28 10:09:44 crc kubenswrapper[4838]: I1128 10:09:44.866517 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-gsvx5" Nov 28 10:09:44 crc kubenswrapper[4838]: I1128 10:09:44.866533 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-gsvx5" Nov 28 10:09:44 crc kubenswrapper[4838]: I1128 10:09:44.866544 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-gsvx5" Nov 28 10:09:44 crc kubenswrapper[4838]: I1128 10:09:44.891203 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-gsvx5" Nov 28 10:09:44 crc kubenswrapper[4838]: I1128 10:09:44.891848 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-gsvx5" Nov 28 10:09:44 crc kubenswrapper[4838]: I1128 10:09:44.896228 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-gsvx5" podStartSLOduration=6.89621102 podStartE2EDuration="6.89621102s" podCreationTimestamp="2025-11-28 10:09:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 10:09:44.89282917 +0000 UTC m=+756.591803340" watchObservedRunningTime="2025-11-28 10:09:44.89621102 +0000 UTC m=+756.595185190" Nov 28 10:09:49 crc kubenswrapper[4838]: I1128 10:09:49.562816 4838 scope.go:117] "RemoveContainer" containerID="9c246d0893eb62f4c097d470f8716546d2e91da8aed1554ef64709537d7d377d" Nov 28 10:09:49 crc kubenswrapper[4838]: E1128 10:09:49.564834 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 20s restarting failed container=kube-multus pod=multus-4rv9b_openshift-multus(051f7e1c-2d47-4be9-bbd5-14feec16eb16)\"" pod="openshift-multus/multus-4rv9b" podUID="051f7e1c-2d47-4be9-bbd5-14feec16eb16" Nov 28 10:10:03 crc kubenswrapper[4838]: I1128 10:10:03.562412 4838 scope.go:117] "RemoveContainer" containerID="9c246d0893eb62f4c097d470f8716546d2e91da8aed1554ef64709537d7d377d" Nov 28 10:10:03 crc kubenswrapper[4838]: I1128 10:10:03.993776 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-4rv9b_051f7e1c-2d47-4be9-bbd5-14feec16eb16/kube-multus/2.log" Nov 28 10:10:03 crc kubenswrapper[4838]: I1128 10:10:03.994518 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-4rv9b_051f7e1c-2d47-4be9-bbd5-14feec16eb16/kube-multus/1.log" Nov 28 10:10:03 crc kubenswrapper[4838]: I1128 10:10:03.994565 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-4rv9b" event={"ID":"051f7e1c-2d47-4be9-bbd5-14feec16eb16","Type":"ContainerStarted","Data":"170dca5c10fa4cef83b03e4fab218b9107e783134e40b526bd42e2ca553587e4"} Nov 28 10:10:08 crc kubenswrapper[4838]: I1128 10:10:08.588195 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-gsvx5" Nov 28 10:10:09 crc kubenswrapper[4838]: I1128 10:10:09.113561 4838 scope.go:117] "RemoveContainer" containerID="262d73384f8aa0c5e8405e70d091fbc0003217ee2c08a4776048649b9a6eda59" Nov 28 10:10:10 crc kubenswrapper[4838]: I1128 10:10:10.034654 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-4rv9b_051f7e1c-2d47-4be9-bbd5-14feec16eb16/kube-multus/2.log" Nov 28 10:10:18 crc kubenswrapper[4838]: I1128 10:10:18.515760 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f6zxql"] Nov 28 10:10:18 crc kubenswrapper[4838]: I1128 10:10:18.517895 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f6zxql" Nov 28 10:10:18 crc kubenswrapper[4838]: I1128 10:10:18.523126 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f6zxql"] Nov 28 10:10:18 crc kubenswrapper[4838]: I1128 10:10:18.526259 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Nov 28 10:10:18 crc kubenswrapper[4838]: I1128 10:10:18.610686 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/607ae4a0-90c8-48ff-afa9-21eb5b545fce-bundle\") pod \"5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f6zxql\" (UID: \"607ae4a0-90c8-48ff-afa9-21eb5b545fce\") " pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f6zxql" Nov 28 10:10:18 crc kubenswrapper[4838]: I1128 10:10:18.610906 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/607ae4a0-90c8-48ff-afa9-21eb5b545fce-util\") pod \"5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f6zxql\" (UID: \"607ae4a0-90c8-48ff-afa9-21eb5b545fce\") " pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f6zxql" Nov 28 10:10:18 crc kubenswrapper[4838]: I1128 10:10:18.611006 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wzczk\" (UniqueName: \"kubernetes.io/projected/607ae4a0-90c8-48ff-afa9-21eb5b545fce-kube-api-access-wzczk\") pod \"5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f6zxql\" (UID: \"607ae4a0-90c8-48ff-afa9-21eb5b545fce\") " pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f6zxql" Nov 28 10:10:18 crc kubenswrapper[4838]: I1128 10:10:18.711844 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/607ae4a0-90c8-48ff-afa9-21eb5b545fce-bundle\") pod \"5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f6zxql\" (UID: \"607ae4a0-90c8-48ff-afa9-21eb5b545fce\") " pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f6zxql" Nov 28 10:10:18 crc kubenswrapper[4838]: I1128 10:10:18.711928 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/607ae4a0-90c8-48ff-afa9-21eb5b545fce-util\") pod \"5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f6zxql\" (UID: \"607ae4a0-90c8-48ff-afa9-21eb5b545fce\") " pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f6zxql" Nov 28 10:10:18 crc kubenswrapper[4838]: I1128 10:10:18.711966 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wzczk\" (UniqueName: \"kubernetes.io/projected/607ae4a0-90c8-48ff-afa9-21eb5b545fce-kube-api-access-wzczk\") pod \"5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f6zxql\" (UID: \"607ae4a0-90c8-48ff-afa9-21eb5b545fce\") " pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f6zxql" Nov 28 10:10:18 crc kubenswrapper[4838]: I1128 10:10:18.712384 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/607ae4a0-90c8-48ff-afa9-21eb5b545fce-util\") pod \"5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f6zxql\" (UID: \"607ae4a0-90c8-48ff-afa9-21eb5b545fce\") " pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f6zxql" Nov 28 10:10:18 crc kubenswrapper[4838]: I1128 10:10:18.712383 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/607ae4a0-90c8-48ff-afa9-21eb5b545fce-bundle\") pod \"5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f6zxql\" (UID: \"607ae4a0-90c8-48ff-afa9-21eb5b545fce\") " pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f6zxql" Nov 28 10:10:18 crc kubenswrapper[4838]: I1128 10:10:18.736959 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wzczk\" (UniqueName: \"kubernetes.io/projected/607ae4a0-90c8-48ff-afa9-21eb5b545fce-kube-api-access-wzczk\") pod \"5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f6zxql\" (UID: \"607ae4a0-90c8-48ff-afa9-21eb5b545fce\") " pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f6zxql" Nov 28 10:10:18 crc kubenswrapper[4838]: I1128 10:10:18.839471 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f6zxql" Nov 28 10:10:19 crc kubenswrapper[4838]: I1128 10:10:19.273395 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f6zxql"] Nov 28 10:10:19 crc kubenswrapper[4838]: I1128 10:10:19.820758 4838 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Nov 28 10:10:20 crc kubenswrapper[4838]: I1128 10:10:20.092735 4838 generic.go:334] "Generic (PLEG): container finished" podID="607ae4a0-90c8-48ff-afa9-21eb5b545fce" containerID="00e87c3c75929381b3ed6ee440ca033b461fee6b4840e274da32717bb2c77771" exitCode=0 Nov 28 10:10:20 crc kubenswrapper[4838]: I1128 10:10:20.092818 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f6zxql" event={"ID":"607ae4a0-90c8-48ff-afa9-21eb5b545fce","Type":"ContainerDied","Data":"00e87c3c75929381b3ed6ee440ca033b461fee6b4840e274da32717bb2c77771"} Nov 28 10:10:20 crc kubenswrapper[4838]: I1128 10:10:20.092900 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f6zxql" event={"ID":"607ae4a0-90c8-48ff-afa9-21eb5b545fce","Type":"ContainerStarted","Data":"6f8fdd8b4df92f4bf58945bc2c35c136393d58afc77131f6055861c715ec4c76"} Nov 28 10:10:20 crc kubenswrapper[4838]: I1128 10:10:20.803554 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-6gmrz"] Nov 28 10:10:20 crc kubenswrapper[4838]: I1128 10:10:20.809834 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-6gmrz" Nov 28 10:10:20 crc kubenswrapper[4838]: I1128 10:10:20.822462 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-6gmrz"] Nov 28 10:10:20 crc kubenswrapper[4838]: I1128 10:10:20.845270 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7a381223-ecef-429b-8233-90b9084c3f23-catalog-content\") pod \"redhat-operators-6gmrz\" (UID: \"7a381223-ecef-429b-8233-90b9084c3f23\") " pod="openshift-marketplace/redhat-operators-6gmrz" Nov 28 10:10:20 crc kubenswrapper[4838]: I1128 10:10:20.845484 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7a381223-ecef-429b-8233-90b9084c3f23-utilities\") pod \"redhat-operators-6gmrz\" (UID: \"7a381223-ecef-429b-8233-90b9084c3f23\") " pod="openshift-marketplace/redhat-operators-6gmrz" Nov 28 10:10:20 crc kubenswrapper[4838]: I1128 10:10:20.845554 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c8m6w\" (UniqueName: \"kubernetes.io/projected/7a381223-ecef-429b-8233-90b9084c3f23-kube-api-access-c8m6w\") pod \"redhat-operators-6gmrz\" (UID: \"7a381223-ecef-429b-8233-90b9084c3f23\") " pod="openshift-marketplace/redhat-operators-6gmrz" Nov 28 10:10:20 crc kubenswrapper[4838]: I1128 10:10:20.946456 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7a381223-ecef-429b-8233-90b9084c3f23-utilities\") pod \"redhat-operators-6gmrz\" (UID: \"7a381223-ecef-429b-8233-90b9084c3f23\") " pod="openshift-marketplace/redhat-operators-6gmrz" Nov 28 10:10:20 crc kubenswrapper[4838]: I1128 10:10:20.946518 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c8m6w\" (UniqueName: \"kubernetes.io/projected/7a381223-ecef-429b-8233-90b9084c3f23-kube-api-access-c8m6w\") pod \"redhat-operators-6gmrz\" (UID: \"7a381223-ecef-429b-8233-90b9084c3f23\") " pod="openshift-marketplace/redhat-operators-6gmrz" Nov 28 10:10:20 crc kubenswrapper[4838]: I1128 10:10:20.946568 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7a381223-ecef-429b-8233-90b9084c3f23-catalog-content\") pod \"redhat-operators-6gmrz\" (UID: \"7a381223-ecef-429b-8233-90b9084c3f23\") " pod="openshift-marketplace/redhat-operators-6gmrz" Nov 28 10:10:20 crc kubenswrapper[4838]: I1128 10:10:20.947030 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7a381223-ecef-429b-8233-90b9084c3f23-utilities\") pod \"redhat-operators-6gmrz\" (UID: \"7a381223-ecef-429b-8233-90b9084c3f23\") " pod="openshift-marketplace/redhat-operators-6gmrz" Nov 28 10:10:20 crc kubenswrapper[4838]: I1128 10:10:20.947118 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7a381223-ecef-429b-8233-90b9084c3f23-catalog-content\") pod \"redhat-operators-6gmrz\" (UID: \"7a381223-ecef-429b-8233-90b9084c3f23\") " pod="openshift-marketplace/redhat-operators-6gmrz" Nov 28 10:10:20 crc kubenswrapper[4838]: I1128 10:10:20.967511 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c8m6w\" (UniqueName: \"kubernetes.io/projected/7a381223-ecef-429b-8233-90b9084c3f23-kube-api-access-c8m6w\") pod \"redhat-operators-6gmrz\" (UID: \"7a381223-ecef-429b-8233-90b9084c3f23\") " pod="openshift-marketplace/redhat-operators-6gmrz" Nov 28 10:10:21 crc kubenswrapper[4838]: I1128 10:10:21.177789 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-6gmrz" Nov 28 10:10:21 crc kubenswrapper[4838]: I1128 10:10:21.370520 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-6gmrz"] Nov 28 10:10:21 crc kubenswrapper[4838]: W1128 10:10:21.375938 4838 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod7a381223_ecef_429b_8233_90b9084c3f23.slice/crio-b75eba6885f95366064884aac2aa331d03003c875bba41a0f23deaf12f462e5c WatchSource:0}: Error finding container b75eba6885f95366064884aac2aa331d03003c875bba41a0f23deaf12f462e5c: Status 404 returned error can't find the container with id b75eba6885f95366064884aac2aa331d03003c875bba41a0f23deaf12f462e5c Nov 28 10:10:22 crc kubenswrapper[4838]: I1128 10:10:22.107190 4838 generic.go:334] "Generic (PLEG): container finished" podID="7a381223-ecef-429b-8233-90b9084c3f23" containerID="d3639154f25965c1974700c45e3ec6ae5ef2f736e635a915eff97c9d097e74c5" exitCode=0 Nov 28 10:10:22 crc kubenswrapper[4838]: I1128 10:10:22.107300 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6gmrz" event={"ID":"7a381223-ecef-429b-8233-90b9084c3f23","Type":"ContainerDied","Data":"d3639154f25965c1974700c45e3ec6ae5ef2f736e635a915eff97c9d097e74c5"} Nov 28 10:10:22 crc kubenswrapper[4838]: I1128 10:10:22.107587 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6gmrz" event={"ID":"7a381223-ecef-429b-8233-90b9084c3f23","Type":"ContainerStarted","Data":"b75eba6885f95366064884aac2aa331d03003c875bba41a0f23deaf12f462e5c"} Nov 28 10:10:23 crc kubenswrapper[4838]: I1128 10:10:23.115100 4838 generic.go:334] "Generic (PLEG): container finished" podID="607ae4a0-90c8-48ff-afa9-21eb5b545fce" containerID="e5c031c504b93a9e22f8af5693b2be1834f3dd36baa1635f23d979e7ad5805a3" exitCode=0 Nov 28 10:10:23 crc kubenswrapper[4838]: I1128 10:10:23.115458 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f6zxql" event={"ID":"607ae4a0-90c8-48ff-afa9-21eb5b545fce","Type":"ContainerDied","Data":"e5c031c504b93a9e22f8af5693b2be1834f3dd36baa1635f23d979e7ad5805a3"} Nov 28 10:10:23 crc kubenswrapper[4838]: I1128 10:10:23.119408 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6gmrz" event={"ID":"7a381223-ecef-429b-8233-90b9084c3f23","Type":"ContainerStarted","Data":"0f3c31ff24d803abe584b746fbb2bc0f584dbd8fee443cdf1a0c035afc565ff8"} Nov 28 10:10:24 crc kubenswrapper[4838]: I1128 10:10:24.127789 4838 generic.go:334] "Generic (PLEG): container finished" podID="7a381223-ecef-429b-8233-90b9084c3f23" containerID="0f3c31ff24d803abe584b746fbb2bc0f584dbd8fee443cdf1a0c035afc565ff8" exitCode=0 Nov 28 10:10:24 crc kubenswrapper[4838]: I1128 10:10:24.127884 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6gmrz" event={"ID":"7a381223-ecef-429b-8233-90b9084c3f23","Type":"ContainerDied","Data":"0f3c31ff24d803abe584b746fbb2bc0f584dbd8fee443cdf1a0c035afc565ff8"} Nov 28 10:10:24 crc kubenswrapper[4838]: I1128 10:10:24.132485 4838 generic.go:334] "Generic (PLEG): container finished" podID="607ae4a0-90c8-48ff-afa9-21eb5b545fce" containerID="382261d66a28715e97da004d0930458be5c8288815ab38ddff223b1012a8d5e3" exitCode=0 Nov 28 10:10:24 crc kubenswrapper[4838]: I1128 10:10:24.132534 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f6zxql" event={"ID":"607ae4a0-90c8-48ff-afa9-21eb5b545fce","Type":"ContainerDied","Data":"382261d66a28715e97da004d0930458be5c8288815ab38ddff223b1012a8d5e3"} Nov 28 10:10:25 crc kubenswrapper[4838]: I1128 10:10:25.457024 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f6zxql" Nov 28 10:10:25 crc kubenswrapper[4838]: I1128 10:10:25.607687 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wzczk\" (UniqueName: \"kubernetes.io/projected/607ae4a0-90c8-48ff-afa9-21eb5b545fce-kube-api-access-wzczk\") pod \"607ae4a0-90c8-48ff-afa9-21eb5b545fce\" (UID: \"607ae4a0-90c8-48ff-afa9-21eb5b545fce\") " Nov 28 10:10:25 crc kubenswrapper[4838]: I1128 10:10:25.607874 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/607ae4a0-90c8-48ff-afa9-21eb5b545fce-bundle\") pod \"607ae4a0-90c8-48ff-afa9-21eb5b545fce\" (UID: \"607ae4a0-90c8-48ff-afa9-21eb5b545fce\") " Nov 28 10:10:25 crc kubenswrapper[4838]: I1128 10:10:25.607908 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/607ae4a0-90c8-48ff-afa9-21eb5b545fce-util\") pod \"607ae4a0-90c8-48ff-afa9-21eb5b545fce\" (UID: \"607ae4a0-90c8-48ff-afa9-21eb5b545fce\") " Nov 28 10:10:25 crc kubenswrapper[4838]: I1128 10:10:25.608914 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/607ae4a0-90c8-48ff-afa9-21eb5b545fce-bundle" (OuterVolumeSpecName: "bundle") pod "607ae4a0-90c8-48ff-afa9-21eb5b545fce" (UID: "607ae4a0-90c8-48ff-afa9-21eb5b545fce"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 10:10:25 crc kubenswrapper[4838]: I1128 10:10:25.616705 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/607ae4a0-90c8-48ff-afa9-21eb5b545fce-kube-api-access-wzczk" (OuterVolumeSpecName: "kube-api-access-wzczk") pod "607ae4a0-90c8-48ff-afa9-21eb5b545fce" (UID: "607ae4a0-90c8-48ff-afa9-21eb5b545fce"). InnerVolumeSpecName "kube-api-access-wzczk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:10:25 crc kubenswrapper[4838]: I1128 10:10:25.622614 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/607ae4a0-90c8-48ff-afa9-21eb5b545fce-util" (OuterVolumeSpecName: "util") pod "607ae4a0-90c8-48ff-afa9-21eb5b545fce" (UID: "607ae4a0-90c8-48ff-afa9-21eb5b545fce"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 10:10:25 crc kubenswrapper[4838]: I1128 10:10:25.709067 4838 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/607ae4a0-90c8-48ff-afa9-21eb5b545fce-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 10:10:25 crc kubenswrapper[4838]: I1128 10:10:25.709111 4838 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/607ae4a0-90c8-48ff-afa9-21eb5b545fce-util\") on node \"crc\" DevicePath \"\"" Nov 28 10:10:25 crc kubenswrapper[4838]: I1128 10:10:25.709120 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wzczk\" (UniqueName: \"kubernetes.io/projected/607ae4a0-90c8-48ff-afa9-21eb5b545fce-kube-api-access-wzczk\") on node \"crc\" DevicePath \"\"" Nov 28 10:10:26 crc kubenswrapper[4838]: I1128 10:10:26.157881 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f6zxql" event={"ID":"607ae4a0-90c8-48ff-afa9-21eb5b545fce","Type":"ContainerDied","Data":"6f8fdd8b4df92f4bf58945bc2c35c136393d58afc77131f6055861c715ec4c76"} Nov 28 10:10:26 crc kubenswrapper[4838]: I1128 10:10:26.157962 4838 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6f8fdd8b4df92f4bf58945bc2c35c136393d58afc77131f6055861c715ec4c76" Nov 28 10:10:26 crc kubenswrapper[4838]: I1128 10:10:26.158136 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f6zxql" Nov 28 10:10:26 crc kubenswrapper[4838]: I1128 10:10:26.166092 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6gmrz" event={"ID":"7a381223-ecef-429b-8233-90b9084c3f23","Type":"ContainerStarted","Data":"b4a3d5689e23be999f492e12de79ff26cfdeeb51a5f64829cf25643b38b69923"} Nov 28 10:10:26 crc kubenswrapper[4838]: I1128 10:10:26.211101 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-6gmrz" podStartSLOduration=3.607549851 podStartE2EDuration="6.211083335s" podCreationTimestamp="2025-11-28 10:10:20 +0000 UTC" firstStartedPulling="2025-11-28 10:10:22.109041609 +0000 UTC m=+793.808015819" lastFinishedPulling="2025-11-28 10:10:24.712575093 +0000 UTC m=+796.411549303" observedRunningTime="2025-11-28 10:10:26.204430085 +0000 UTC m=+797.903404335" watchObservedRunningTime="2025-11-28 10:10:26.211083335 +0000 UTC m=+797.910057515" Nov 28 10:10:29 crc kubenswrapper[4838]: I1128 10:10:29.853269 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-operator-5b5b58f5c8-9n28j"] Nov 28 10:10:29 crc kubenswrapper[4838]: E1128 10:10:29.854265 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="607ae4a0-90c8-48ff-afa9-21eb5b545fce" containerName="pull" Nov 28 10:10:29 crc kubenswrapper[4838]: I1128 10:10:29.854284 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="607ae4a0-90c8-48ff-afa9-21eb5b545fce" containerName="pull" Nov 28 10:10:29 crc kubenswrapper[4838]: E1128 10:10:29.854295 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="607ae4a0-90c8-48ff-afa9-21eb5b545fce" containerName="util" Nov 28 10:10:29 crc kubenswrapper[4838]: I1128 10:10:29.854302 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="607ae4a0-90c8-48ff-afa9-21eb5b545fce" containerName="util" Nov 28 10:10:29 crc kubenswrapper[4838]: E1128 10:10:29.854316 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="607ae4a0-90c8-48ff-afa9-21eb5b545fce" containerName="extract" Nov 28 10:10:29 crc kubenswrapper[4838]: I1128 10:10:29.854324 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="607ae4a0-90c8-48ff-afa9-21eb5b545fce" containerName="extract" Nov 28 10:10:29 crc kubenswrapper[4838]: I1128 10:10:29.854442 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="607ae4a0-90c8-48ff-afa9-21eb5b545fce" containerName="extract" Nov 28 10:10:29 crc kubenswrapper[4838]: I1128 10:10:29.854954 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-operator-5b5b58f5c8-9n28j" Nov 28 10:10:29 crc kubenswrapper[4838]: I1128 10:10:29.857524 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"openshift-service-ca.crt" Nov 28 10:10:29 crc kubenswrapper[4838]: I1128 10:10:29.857550 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-operator-dockercfg-m7vmt" Nov 28 10:10:29 crc kubenswrapper[4838]: I1128 10:10:29.857702 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"kube-root-ca.crt" Nov 28 10:10:29 crc kubenswrapper[4838]: I1128 10:10:29.863376 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-operator-5b5b58f5c8-9n28j"] Nov 28 10:10:29 crc kubenswrapper[4838]: I1128 10:10:29.965579 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fcrw4\" (UniqueName: \"kubernetes.io/projected/8726f2de-449f-4a3d-ae20-cf2e1f14abe2-kube-api-access-fcrw4\") pod \"nmstate-operator-5b5b58f5c8-9n28j\" (UID: \"8726f2de-449f-4a3d-ae20-cf2e1f14abe2\") " pod="openshift-nmstate/nmstate-operator-5b5b58f5c8-9n28j" Nov 28 10:10:30 crc kubenswrapper[4838]: I1128 10:10:30.066460 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fcrw4\" (UniqueName: \"kubernetes.io/projected/8726f2de-449f-4a3d-ae20-cf2e1f14abe2-kube-api-access-fcrw4\") pod \"nmstate-operator-5b5b58f5c8-9n28j\" (UID: \"8726f2de-449f-4a3d-ae20-cf2e1f14abe2\") " pod="openshift-nmstate/nmstate-operator-5b5b58f5c8-9n28j" Nov 28 10:10:30 crc kubenswrapper[4838]: I1128 10:10:30.086866 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fcrw4\" (UniqueName: \"kubernetes.io/projected/8726f2de-449f-4a3d-ae20-cf2e1f14abe2-kube-api-access-fcrw4\") pod \"nmstate-operator-5b5b58f5c8-9n28j\" (UID: \"8726f2de-449f-4a3d-ae20-cf2e1f14abe2\") " pod="openshift-nmstate/nmstate-operator-5b5b58f5c8-9n28j" Nov 28 10:10:30 crc kubenswrapper[4838]: I1128 10:10:30.170702 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-operator-5b5b58f5c8-9n28j" Nov 28 10:10:30 crc kubenswrapper[4838]: I1128 10:10:30.610225 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-operator-5b5b58f5c8-9n28j"] Nov 28 10:10:31 crc kubenswrapper[4838]: I1128 10:10:31.178104 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-6gmrz" Nov 28 10:10:31 crc kubenswrapper[4838]: I1128 10:10:31.178151 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-6gmrz" Nov 28 10:10:31 crc kubenswrapper[4838]: I1128 10:10:31.210510 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-operator-5b5b58f5c8-9n28j" event={"ID":"8726f2de-449f-4a3d-ae20-cf2e1f14abe2","Type":"ContainerStarted","Data":"8b9081a7b098bfd3244ae4d947b2795345cd1f21ebc8ce03aefe6f6d6a383e7d"} Nov 28 10:10:32 crc kubenswrapper[4838]: I1128 10:10:32.241970 4838 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-6gmrz" podUID="7a381223-ecef-429b-8233-90b9084c3f23" containerName="registry-server" probeResult="failure" output=< Nov 28 10:10:32 crc kubenswrapper[4838]: timeout: failed to connect service ":50051" within 1s Nov 28 10:10:32 crc kubenswrapper[4838]: > Nov 28 10:10:33 crc kubenswrapper[4838]: I1128 10:10:33.225275 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-operator-5b5b58f5c8-9n28j" event={"ID":"8726f2de-449f-4a3d-ae20-cf2e1f14abe2","Type":"ContainerStarted","Data":"20e09a745a230882f89f4b636fb01ef7e968ebc623d987fcbe3164efcadc1700"} Nov 28 10:10:33 crc kubenswrapper[4838]: I1128 10:10:33.246849 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-operator-5b5b58f5c8-9n28j" podStartSLOduration=2.267784487 podStartE2EDuration="4.246816414s" podCreationTimestamp="2025-11-28 10:10:29 +0000 UTC" firstStartedPulling="2025-11-28 10:10:30.627006969 +0000 UTC m=+802.325981139" lastFinishedPulling="2025-11-28 10:10:32.606038896 +0000 UTC m=+804.305013066" observedRunningTime="2025-11-28 10:10:33.240499343 +0000 UTC m=+804.939473513" watchObservedRunningTime="2025-11-28 10:10:33.246816414 +0000 UTC m=+804.945790624" Nov 28 10:10:39 crc kubenswrapper[4838]: I1128 10:10:39.031774 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-metrics-7f946cbc9-hrdrc"] Nov 28 10:10:39 crc kubenswrapper[4838]: I1128 10:10:39.033136 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-metrics-7f946cbc9-hrdrc" Nov 28 10:10:39 crc kubenswrapper[4838]: I1128 10:10:39.035409 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-handler-dockercfg-8ggn5" Nov 28 10:10:39 crc kubenswrapper[4838]: I1128 10:10:39.039560 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-metrics-7f946cbc9-hrdrc"] Nov 28 10:10:39 crc kubenswrapper[4838]: I1128 10:10:39.083435 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-webhook-5f6d4c5ccb-pf4kk"] Nov 28 10:10:39 crc kubenswrapper[4838]: I1128 10:10:39.084089 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-pf4kk" Nov 28 10:10:39 crc kubenswrapper[4838]: I1128 10:10:39.085832 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"openshift-nmstate-webhook" Nov 28 10:10:39 crc kubenswrapper[4838]: I1128 10:10:39.104080 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-handler-sgkvp"] Nov 28 10:10:39 crc kubenswrapper[4838]: I1128 10:10:39.104764 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-handler-sgkvp" Nov 28 10:10:39 crc kubenswrapper[4838]: I1128 10:10:39.112360 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-webhook-5f6d4c5ccb-pf4kk"] Nov 28 10:10:39 crc kubenswrapper[4838]: I1128 10:10:39.160925 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-console-plugin-7fbb5f6569-qvrkm"] Nov 28 10:10:39 crc kubenswrapper[4838]: I1128 10:10:39.161922 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-qvrkm" Nov 28 10:10:39 crc kubenswrapper[4838]: I1128 10:10:39.164192 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"default-dockercfg-zrsls" Nov 28 10:10:39 crc kubenswrapper[4838]: I1128 10:10:39.164431 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"nginx-conf" Nov 28 10:10:39 crc kubenswrapper[4838]: I1128 10:10:39.164465 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"plugin-serving-cert" Nov 28 10:10:39 crc kubenswrapper[4838]: I1128 10:10:39.175784 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-console-plugin-7fbb5f6569-qvrkm"] Nov 28 10:10:39 crc kubenswrapper[4838]: I1128 10:10:39.192107 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7x8r9\" (UniqueName: \"kubernetes.io/projected/1190df35-2195-49c1-abb5-1a5e11626ec4-kube-api-access-7x8r9\") pod \"nmstate-metrics-7f946cbc9-hrdrc\" (UID: \"1190df35-2195-49c1-abb5-1a5e11626ec4\") " pod="openshift-nmstate/nmstate-metrics-7f946cbc9-hrdrc" Nov 28 10:10:39 crc kubenswrapper[4838]: I1128 10:10:39.192428 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/27f91860-f5ee-4232-b298-bf97137a1d12-tls-key-pair\") pod \"nmstate-webhook-5f6d4c5ccb-pf4kk\" (UID: \"27f91860-f5ee-4232-b298-bf97137a1d12\") " pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-pf4kk" Nov 28 10:10:39 crc kubenswrapper[4838]: I1128 10:10:39.192482 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sz5vf\" (UniqueName: \"kubernetes.io/projected/27f91860-f5ee-4232-b298-bf97137a1d12-kube-api-access-sz5vf\") pod \"nmstate-webhook-5f6d4c5ccb-pf4kk\" (UID: \"27f91860-f5ee-4232-b298-bf97137a1d12\") " pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-pf4kk" Nov 28 10:10:39 crc kubenswrapper[4838]: I1128 10:10:39.293414 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6vxjp\" (UniqueName: \"kubernetes.io/projected/7d076e50-6cc5-4258-b334-faa9d4f1a3b4-kube-api-access-6vxjp\") pod \"nmstate-handler-sgkvp\" (UID: \"7d076e50-6cc5-4258-b334-faa9d4f1a3b4\") " pod="openshift-nmstate/nmstate-handler-sgkvp" Nov 28 10:10:39 crc kubenswrapper[4838]: I1128 10:10:39.293475 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/7d076e50-6cc5-4258-b334-faa9d4f1a3b4-ovs-socket\") pod \"nmstate-handler-sgkvp\" (UID: \"7d076e50-6cc5-4258-b334-faa9d4f1a3b4\") " pod="openshift-nmstate/nmstate-handler-sgkvp" Nov 28 10:10:39 crc kubenswrapper[4838]: I1128 10:10:39.293516 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/7d076e50-6cc5-4258-b334-faa9d4f1a3b4-dbus-socket\") pod \"nmstate-handler-sgkvp\" (UID: \"7d076e50-6cc5-4258-b334-faa9d4f1a3b4\") " pod="openshift-nmstate/nmstate-handler-sgkvp" Nov 28 10:10:39 crc kubenswrapper[4838]: I1128 10:10:39.293551 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-98x4j\" (UniqueName: \"kubernetes.io/projected/2205c062-150d-43c9-8e91-9187c92a1908-kube-api-access-98x4j\") pod \"nmstate-console-plugin-7fbb5f6569-qvrkm\" (UID: \"2205c062-150d-43c9-8e91-9187c92a1908\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-qvrkm" Nov 28 10:10:39 crc kubenswrapper[4838]: I1128 10:10:39.293609 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7x8r9\" (UniqueName: \"kubernetes.io/projected/1190df35-2195-49c1-abb5-1a5e11626ec4-kube-api-access-7x8r9\") pod \"nmstate-metrics-7f946cbc9-hrdrc\" (UID: \"1190df35-2195-49c1-abb5-1a5e11626ec4\") " pod="openshift-nmstate/nmstate-metrics-7f946cbc9-hrdrc" Nov 28 10:10:39 crc kubenswrapper[4838]: I1128 10:10:39.293646 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/27f91860-f5ee-4232-b298-bf97137a1d12-tls-key-pair\") pod \"nmstate-webhook-5f6d4c5ccb-pf4kk\" (UID: \"27f91860-f5ee-4232-b298-bf97137a1d12\") " pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-pf4kk" Nov 28 10:10:39 crc kubenswrapper[4838]: I1128 10:10:39.293708 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/2205c062-150d-43c9-8e91-9187c92a1908-nginx-conf\") pod \"nmstate-console-plugin-7fbb5f6569-qvrkm\" (UID: \"2205c062-150d-43c9-8e91-9187c92a1908\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-qvrkm" Nov 28 10:10:39 crc kubenswrapper[4838]: I1128 10:10:39.293765 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/7d076e50-6cc5-4258-b334-faa9d4f1a3b4-nmstate-lock\") pod \"nmstate-handler-sgkvp\" (UID: \"7d076e50-6cc5-4258-b334-faa9d4f1a3b4\") " pod="openshift-nmstate/nmstate-handler-sgkvp" Nov 28 10:10:39 crc kubenswrapper[4838]: I1128 10:10:39.293801 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/2205c062-150d-43c9-8e91-9187c92a1908-plugin-serving-cert\") pod \"nmstate-console-plugin-7fbb5f6569-qvrkm\" (UID: \"2205c062-150d-43c9-8e91-9187c92a1908\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-qvrkm" Nov 28 10:10:39 crc kubenswrapper[4838]: I1128 10:10:39.293829 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sz5vf\" (UniqueName: \"kubernetes.io/projected/27f91860-f5ee-4232-b298-bf97137a1d12-kube-api-access-sz5vf\") pod \"nmstate-webhook-5f6d4c5ccb-pf4kk\" (UID: \"27f91860-f5ee-4232-b298-bf97137a1d12\") " pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-pf4kk" Nov 28 10:10:39 crc kubenswrapper[4838]: E1128 10:10:39.294070 4838 secret.go:188] Couldn't get secret openshift-nmstate/openshift-nmstate-webhook: secret "openshift-nmstate-webhook" not found Nov 28 10:10:39 crc kubenswrapper[4838]: E1128 10:10:39.294212 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/27f91860-f5ee-4232-b298-bf97137a1d12-tls-key-pair podName:27f91860-f5ee-4232-b298-bf97137a1d12 nodeName:}" failed. No retries permitted until 2025-11-28 10:10:39.794192466 +0000 UTC m=+811.493166646 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "tls-key-pair" (UniqueName: "kubernetes.io/secret/27f91860-f5ee-4232-b298-bf97137a1d12-tls-key-pair") pod "nmstate-webhook-5f6d4c5ccb-pf4kk" (UID: "27f91860-f5ee-4232-b298-bf97137a1d12") : secret "openshift-nmstate-webhook" not found Nov 28 10:10:39 crc kubenswrapper[4838]: I1128 10:10:39.313579 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7x8r9\" (UniqueName: \"kubernetes.io/projected/1190df35-2195-49c1-abb5-1a5e11626ec4-kube-api-access-7x8r9\") pod \"nmstate-metrics-7f946cbc9-hrdrc\" (UID: \"1190df35-2195-49c1-abb5-1a5e11626ec4\") " pod="openshift-nmstate/nmstate-metrics-7f946cbc9-hrdrc" Nov 28 10:10:39 crc kubenswrapper[4838]: I1128 10:10:39.323795 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sz5vf\" (UniqueName: \"kubernetes.io/projected/27f91860-f5ee-4232-b298-bf97137a1d12-kube-api-access-sz5vf\") pod \"nmstate-webhook-5f6d4c5ccb-pf4kk\" (UID: \"27f91860-f5ee-4232-b298-bf97137a1d12\") " pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-pf4kk" Nov 28 10:10:39 crc kubenswrapper[4838]: I1128 10:10:39.394850 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/2205c062-150d-43c9-8e91-9187c92a1908-nginx-conf\") pod \"nmstate-console-plugin-7fbb5f6569-qvrkm\" (UID: \"2205c062-150d-43c9-8e91-9187c92a1908\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-qvrkm" Nov 28 10:10:39 crc kubenswrapper[4838]: I1128 10:10:39.394902 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/7d076e50-6cc5-4258-b334-faa9d4f1a3b4-nmstate-lock\") pod \"nmstate-handler-sgkvp\" (UID: \"7d076e50-6cc5-4258-b334-faa9d4f1a3b4\") " pod="openshift-nmstate/nmstate-handler-sgkvp" Nov 28 10:10:39 crc kubenswrapper[4838]: I1128 10:10:39.394931 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/2205c062-150d-43c9-8e91-9187c92a1908-plugin-serving-cert\") pod \"nmstate-console-plugin-7fbb5f6569-qvrkm\" (UID: \"2205c062-150d-43c9-8e91-9187c92a1908\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-qvrkm" Nov 28 10:10:39 crc kubenswrapper[4838]: I1128 10:10:39.394995 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6vxjp\" (UniqueName: \"kubernetes.io/projected/7d076e50-6cc5-4258-b334-faa9d4f1a3b4-kube-api-access-6vxjp\") pod \"nmstate-handler-sgkvp\" (UID: \"7d076e50-6cc5-4258-b334-faa9d4f1a3b4\") " pod="openshift-nmstate/nmstate-handler-sgkvp" Nov 28 10:10:39 crc kubenswrapper[4838]: I1128 10:10:39.395017 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/7d076e50-6cc5-4258-b334-faa9d4f1a3b4-ovs-socket\") pod \"nmstate-handler-sgkvp\" (UID: \"7d076e50-6cc5-4258-b334-faa9d4f1a3b4\") " pod="openshift-nmstate/nmstate-handler-sgkvp" Nov 28 10:10:39 crc kubenswrapper[4838]: I1128 10:10:39.395041 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/7d076e50-6cc5-4258-b334-faa9d4f1a3b4-dbus-socket\") pod \"nmstate-handler-sgkvp\" (UID: \"7d076e50-6cc5-4258-b334-faa9d4f1a3b4\") " pod="openshift-nmstate/nmstate-handler-sgkvp" Nov 28 10:10:39 crc kubenswrapper[4838]: I1128 10:10:39.395037 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/7d076e50-6cc5-4258-b334-faa9d4f1a3b4-nmstate-lock\") pod \"nmstate-handler-sgkvp\" (UID: \"7d076e50-6cc5-4258-b334-faa9d4f1a3b4\") " pod="openshift-nmstate/nmstate-handler-sgkvp" Nov 28 10:10:39 crc kubenswrapper[4838]: I1128 10:10:39.395090 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-98x4j\" (UniqueName: \"kubernetes.io/projected/2205c062-150d-43c9-8e91-9187c92a1908-kube-api-access-98x4j\") pod \"nmstate-console-plugin-7fbb5f6569-qvrkm\" (UID: \"2205c062-150d-43c9-8e91-9187c92a1908\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-qvrkm" Nov 28 10:10:39 crc kubenswrapper[4838]: I1128 10:10:39.395124 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/7d076e50-6cc5-4258-b334-faa9d4f1a3b4-ovs-socket\") pod \"nmstate-handler-sgkvp\" (UID: \"7d076e50-6cc5-4258-b334-faa9d4f1a3b4\") " pod="openshift-nmstate/nmstate-handler-sgkvp" Nov 28 10:10:39 crc kubenswrapper[4838]: I1128 10:10:39.395689 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-metrics-7f946cbc9-hrdrc" Nov 28 10:10:39 crc kubenswrapper[4838]: I1128 10:10:39.395835 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/2205c062-150d-43c9-8e91-9187c92a1908-nginx-conf\") pod \"nmstate-console-plugin-7fbb5f6569-qvrkm\" (UID: \"2205c062-150d-43c9-8e91-9187c92a1908\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-qvrkm" Nov 28 10:10:39 crc kubenswrapper[4838]: I1128 10:10:39.395997 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-5595cfd85-m9r2b"] Nov 28 10:10:39 crc kubenswrapper[4838]: I1128 10:10:39.396095 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/7d076e50-6cc5-4258-b334-faa9d4f1a3b4-dbus-socket\") pod \"nmstate-handler-sgkvp\" (UID: \"7d076e50-6cc5-4258-b334-faa9d4f1a3b4\") " pod="openshift-nmstate/nmstate-handler-sgkvp" Nov 28 10:10:39 crc kubenswrapper[4838]: I1128 10:10:39.396704 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-5595cfd85-m9r2b" Nov 28 10:10:39 crc kubenswrapper[4838]: I1128 10:10:39.405487 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/2205c062-150d-43c9-8e91-9187c92a1908-plugin-serving-cert\") pod \"nmstate-console-plugin-7fbb5f6569-qvrkm\" (UID: \"2205c062-150d-43c9-8e91-9187c92a1908\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-qvrkm" Nov 28 10:10:39 crc kubenswrapper[4838]: I1128 10:10:39.414445 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-98x4j\" (UniqueName: \"kubernetes.io/projected/2205c062-150d-43c9-8e91-9187c92a1908-kube-api-access-98x4j\") pod \"nmstate-console-plugin-7fbb5f6569-qvrkm\" (UID: \"2205c062-150d-43c9-8e91-9187c92a1908\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-qvrkm" Nov 28 10:10:39 crc kubenswrapper[4838]: I1128 10:10:39.430071 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6vxjp\" (UniqueName: \"kubernetes.io/projected/7d076e50-6cc5-4258-b334-faa9d4f1a3b4-kube-api-access-6vxjp\") pod \"nmstate-handler-sgkvp\" (UID: \"7d076e50-6cc5-4258-b334-faa9d4f1a3b4\") " pod="openshift-nmstate/nmstate-handler-sgkvp" Nov 28 10:10:39 crc kubenswrapper[4838]: I1128 10:10:39.449174 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-5595cfd85-m9r2b"] Nov 28 10:10:39 crc kubenswrapper[4838]: I1128 10:10:39.479340 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-qvrkm" Nov 28 10:10:39 crc kubenswrapper[4838]: I1128 10:10:39.497497 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/21d6ff6a-e58c-43fc-bd24-de7b95588532-console-serving-cert\") pod \"console-5595cfd85-m9r2b\" (UID: \"21d6ff6a-e58c-43fc-bd24-de7b95588532\") " pod="openshift-console/console-5595cfd85-m9r2b" Nov 28 10:10:39 crc kubenswrapper[4838]: I1128 10:10:39.497581 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qfwlc\" (UniqueName: \"kubernetes.io/projected/21d6ff6a-e58c-43fc-bd24-de7b95588532-kube-api-access-qfwlc\") pod \"console-5595cfd85-m9r2b\" (UID: \"21d6ff6a-e58c-43fc-bd24-de7b95588532\") " pod="openshift-console/console-5595cfd85-m9r2b" Nov 28 10:10:39 crc kubenswrapper[4838]: I1128 10:10:39.497601 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/21d6ff6a-e58c-43fc-bd24-de7b95588532-oauth-serving-cert\") pod \"console-5595cfd85-m9r2b\" (UID: \"21d6ff6a-e58c-43fc-bd24-de7b95588532\") " pod="openshift-console/console-5595cfd85-m9r2b" Nov 28 10:10:39 crc kubenswrapper[4838]: I1128 10:10:39.497654 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/21d6ff6a-e58c-43fc-bd24-de7b95588532-service-ca\") pod \"console-5595cfd85-m9r2b\" (UID: \"21d6ff6a-e58c-43fc-bd24-de7b95588532\") " pod="openshift-console/console-5595cfd85-m9r2b" Nov 28 10:10:39 crc kubenswrapper[4838]: I1128 10:10:39.497677 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/21d6ff6a-e58c-43fc-bd24-de7b95588532-console-oauth-config\") pod \"console-5595cfd85-m9r2b\" (UID: \"21d6ff6a-e58c-43fc-bd24-de7b95588532\") " pod="openshift-console/console-5595cfd85-m9r2b" Nov 28 10:10:39 crc kubenswrapper[4838]: I1128 10:10:39.497743 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/21d6ff6a-e58c-43fc-bd24-de7b95588532-console-config\") pod \"console-5595cfd85-m9r2b\" (UID: \"21d6ff6a-e58c-43fc-bd24-de7b95588532\") " pod="openshift-console/console-5595cfd85-m9r2b" Nov 28 10:10:39 crc kubenswrapper[4838]: I1128 10:10:39.497815 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/21d6ff6a-e58c-43fc-bd24-de7b95588532-trusted-ca-bundle\") pod \"console-5595cfd85-m9r2b\" (UID: \"21d6ff6a-e58c-43fc-bd24-de7b95588532\") " pod="openshift-console/console-5595cfd85-m9r2b" Nov 28 10:10:39 crc kubenswrapper[4838]: I1128 10:10:39.596606 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-metrics-7f946cbc9-hrdrc"] Nov 28 10:10:39 crc kubenswrapper[4838]: I1128 10:10:39.599879 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/21d6ff6a-e58c-43fc-bd24-de7b95588532-trusted-ca-bundle\") pod \"console-5595cfd85-m9r2b\" (UID: \"21d6ff6a-e58c-43fc-bd24-de7b95588532\") " pod="openshift-console/console-5595cfd85-m9r2b" Nov 28 10:10:39 crc kubenswrapper[4838]: I1128 10:10:39.600008 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/21d6ff6a-e58c-43fc-bd24-de7b95588532-console-serving-cert\") pod \"console-5595cfd85-m9r2b\" (UID: \"21d6ff6a-e58c-43fc-bd24-de7b95588532\") " pod="openshift-console/console-5595cfd85-m9r2b" Nov 28 10:10:39 crc kubenswrapper[4838]: I1128 10:10:39.600081 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/21d6ff6a-e58c-43fc-bd24-de7b95588532-oauth-serving-cert\") pod \"console-5595cfd85-m9r2b\" (UID: \"21d6ff6a-e58c-43fc-bd24-de7b95588532\") " pod="openshift-console/console-5595cfd85-m9r2b" Nov 28 10:10:39 crc kubenswrapper[4838]: I1128 10:10:39.600106 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qfwlc\" (UniqueName: \"kubernetes.io/projected/21d6ff6a-e58c-43fc-bd24-de7b95588532-kube-api-access-qfwlc\") pod \"console-5595cfd85-m9r2b\" (UID: \"21d6ff6a-e58c-43fc-bd24-de7b95588532\") " pod="openshift-console/console-5595cfd85-m9r2b" Nov 28 10:10:39 crc kubenswrapper[4838]: I1128 10:10:39.600166 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/21d6ff6a-e58c-43fc-bd24-de7b95588532-service-ca\") pod \"console-5595cfd85-m9r2b\" (UID: \"21d6ff6a-e58c-43fc-bd24-de7b95588532\") " pod="openshift-console/console-5595cfd85-m9r2b" Nov 28 10:10:39 crc kubenswrapper[4838]: I1128 10:10:39.600202 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/21d6ff6a-e58c-43fc-bd24-de7b95588532-console-oauth-config\") pod \"console-5595cfd85-m9r2b\" (UID: \"21d6ff6a-e58c-43fc-bd24-de7b95588532\") " pod="openshift-console/console-5595cfd85-m9r2b" Nov 28 10:10:39 crc kubenswrapper[4838]: I1128 10:10:39.600261 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/21d6ff6a-e58c-43fc-bd24-de7b95588532-console-config\") pod \"console-5595cfd85-m9r2b\" (UID: \"21d6ff6a-e58c-43fc-bd24-de7b95588532\") " pod="openshift-console/console-5595cfd85-m9r2b" Nov 28 10:10:39 crc kubenswrapper[4838]: I1128 10:10:39.601455 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/21d6ff6a-e58c-43fc-bd24-de7b95588532-oauth-serving-cert\") pod \"console-5595cfd85-m9r2b\" (UID: \"21d6ff6a-e58c-43fc-bd24-de7b95588532\") " pod="openshift-console/console-5595cfd85-m9r2b" Nov 28 10:10:39 crc kubenswrapper[4838]: I1128 10:10:39.601582 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/21d6ff6a-e58c-43fc-bd24-de7b95588532-service-ca\") pod \"console-5595cfd85-m9r2b\" (UID: \"21d6ff6a-e58c-43fc-bd24-de7b95588532\") " pod="openshift-console/console-5595cfd85-m9r2b" Nov 28 10:10:39 crc kubenswrapper[4838]: I1128 10:10:39.601634 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/21d6ff6a-e58c-43fc-bd24-de7b95588532-console-config\") pod \"console-5595cfd85-m9r2b\" (UID: \"21d6ff6a-e58c-43fc-bd24-de7b95588532\") " pod="openshift-console/console-5595cfd85-m9r2b" Nov 28 10:10:39 crc kubenswrapper[4838]: I1128 10:10:39.601691 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/21d6ff6a-e58c-43fc-bd24-de7b95588532-trusted-ca-bundle\") pod \"console-5595cfd85-m9r2b\" (UID: \"21d6ff6a-e58c-43fc-bd24-de7b95588532\") " pod="openshift-console/console-5595cfd85-m9r2b" Nov 28 10:10:39 crc kubenswrapper[4838]: I1128 10:10:39.606705 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/21d6ff6a-e58c-43fc-bd24-de7b95588532-console-oauth-config\") pod \"console-5595cfd85-m9r2b\" (UID: \"21d6ff6a-e58c-43fc-bd24-de7b95588532\") " pod="openshift-console/console-5595cfd85-m9r2b" Nov 28 10:10:39 crc kubenswrapper[4838]: I1128 10:10:39.608210 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/21d6ff6a-e58c-43fc-bd24-de7b95588532-console-serving-cert\") pod \"console-5595cfd85-m9r2b\" (UID: \"21d6ff6a-e58c-43fc-bd24-de7b95588532\") " pod="openshift-console/console-5595cfd85-m9r2b" Nov 28 10:10:39 crc kubenswrapper[4838]: I1128 10:10:39.616896 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qfwlc\" (UniqueName: \"kubernetes.io/projected/21d6ff6a-e58c-43fc-bd24-de7b95588532-kube-api-access-qfwlc\") pod \"console-5595cfd85-m9r2b\" (UID: \"21d6ff6a-e58c-43fc-bd24-de7b95588532\") " pod="openshift-console/console-5595cfd85-m9r2b" Nov 28 10:10:39 crc kubenswrapper[4838]: I1128 10:10:39.653668 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-console-plugin-7fbb5f6569-qvrkm"] Nov 28 10:10:39 crc kubenswrapper[4838]: W1128 10:10:39.657808 4838 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2205c062_150d_43c9_8e91_9187c92a1908.slice/crio-f1f3bbfbd6bb73ff4643a1f5f127105b90d6e0d9e736c8a8cf7b6752cb099c38 WatchSource:0}: Error finding container f1f3bbfbd6bb73ff4643a1f5f127105b90d6e0d9e736c8a8cf7b6752cb099c38: Status 404 returned error can't find the container with id f1f3bbfbd6bb73ff4643a1f5f127105b90d6e0d9e736c8a8cf7b6752cb099c38 Nov 28 10:10:39 crc kubenswrapper[4838]: I1128 10:10:39.720545 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-handler-sgkvp" Nov 28 10:10:39 crc kubenswrapper[4838]: W1128 10:10:39.737524 4838 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod7d076e50_6cc5_4258_b334_faa9d4f1a3b4.slice/crio-9f49259c3dd10061fac177a8423c23f3596199788703835e732f71832c7c66d2 WatchSource:0}: Error finding container 9f49259c3dd10061fac177a8423c23f3596199788703835e732f71832c7c66d2: Status 404 returned error can't find the container with id 9f49259c3dd10061fac177a8423c23f3596199788703835e732f71832c7c66d2 Nov 28 10:10:39 crc kubenswrapper[4838]: I1128 10:10:39.771557 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-5595cfd85-m9r2b" Nov 28 10:10:39 crc kubenswrapper[4838]: I1128 10:10:39.803063 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/27f91860-f5ee-4232-b298-bf97137a1d12-tls-key-pair\") pod \"nmstate-webhook-5f6d4c5ccb-pf4kk\" (UID: \"27f91860-f5ee-4232-b298-bf97137a1d12\") " pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-pf4kk" Nov 28 10:10:39 crc kubenswrapper[4838]: I1128 10:10:39.807886 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/27f91860-f5ee-4232-b298-bf97137a1d12-tls-key-pair\") pod \"nmstate-webhook-5f6d4c5ccb-pf4kk\" (UID: \"27f91860-f5ee-4232-b298-bf97137a1d12\") " pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-pf4kk" Nov 28 10:10:40 crc kubenswrapper[4838]: I1128 10:10:40.004342 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-pf4kk" Nov 28 10:10:40 crc kubenswrapper[4838]: I1128 10:10:40.184266 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-5595cfd85-m9r2b"] Nov 28 10:10:40 crc kubenswrapper[4838]: W1128 10:10:40.205232 4838 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod21d6ff6a_e58c_43fc_bd24_de7b95588532.slice/crio-84e03abfae25e9a2e85e1ac97c42aac1d579abe09cceb303b5e0dc102d4ac97e WatchSource:0}: Error finding container 84e03abfae25e9a2e85e1ac97c42aac1d579abe09cceb303b5e0dc102d4ac97e: Status 404 returned error can't find the container with id 84e03abfae25e9a2e85e1ac97c42aac1d579abe09cceb303b5e0dc102d4ac97e Nov 28 10:10:40 crc kubenswrapper[4838]: I1128 10:10:40.245754 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-webhook-5f6d4c5ccb-pf4kk"] Nov 28 10:10:40 crc kubenswrapper[4838]: I1128 10:10:40.260798 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-7f946cbc9-hrdrc" event={"ID":"1190df35-2195-49c1-abb5-1a5e11626ec4","Type":"ContainerStarted","Data":"aa753a5066b202f4e61c68c88a8138b4aa5ea98f329556c482cb15abf66e6df2"} Nov 28 10:10:40 crc kubenswrapper[4838]: I1128 10:10:40.262120 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-handler-sgkvp" event={"ID":"7d076e50-6cc5-4258-b334-faa9d4f1a3b4","Type":"ContainerStarted","Data":"9f49259c3dd10061fac177a8423c23f3596199788703835e732f71832c7c66d2"} Nov 28 10:10:40 crc kubenswrapper[4838]: I1128 10:10:40.263006 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-qvrkm" event={"ID":"2205c062-150d-43c9-8e91-9187c92a1908","Type":"ContainerStarted","Data":"f1f3bbfbd6bb73ff4643a1f5f127105b90d6e0d9e736c8a8cf7b6752cb099c38"} Nov 28 10:10:40 crc kubenswrapper[4838]: I1128 10:10:40.265628 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-5595cfd85-m9r2b" event={"ID":"21d6ff6a-e58c-43fc-bd24-de7b95588532","Type":"ContainerStarted","Data":"84e03abfae25e9a2e85e1ac97c42aac1d579abe09cceb303b5e0dc102d4ac97e"} Nov 28 10:10:40 crc kubenswrapper[4838]: W1128 10:10:40.266006 4838 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod27f91860_f5ee_4232_b298_bf97137a1d12.slice/crio-ae21d728bac3ddc9707ce4e46db19318625da6e44aa1372d63709195365f71d0 WatchSource:0}: Error finding container ae21d728bac3ddc9707ce4e46db19318625da6e44aa1372d63709195365f71d0: Status 404 returned error can't find the container with id ae21d728bac3ddc9707ce4e46db19318625da6e44aa1372d63709195365f71d0 Nov 28 10:10:41 crc kubenswrapper[4838]: I1128 10:10:41.252640 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-6gmrz" Nov 28 10:10:41 crc kubenswrapper[4838]: I1128 10:10:41.279929 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-pf4kk" event={"ID":"27f91860-f5ee-4232-b298-bf97137a1d12","Type":"ContainerStarted","Data":"ae21d728bac3ddc9707ce4e46db19318625da6e44aa1372d63709195365f71d0"} Nov 28 10:10:41 crc kubenswrapper[4838]: I1128 10:10:41.318159 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-6gmrz" Nov 28 10:10:41 crc kubenswrapper[4838]: I1128 10:10:41.492291 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-6gmrz"] Nov 28 10:10:42 crc kubenswrapper[4838]: I1128 10:10:42.286455 4838 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-6gmrz" podUID="7a381223-ecef-429b-8233-90b9084c3f23" containerName="registry-server" containerID="cri-o://b4a3d5689e23be999f492e12de79ff26cfdeeb51a5f64829cf25643b38b69923" gracePeriod=2 Nov 28 10:10:43 crc kubenswrapper[4838]: I1128 10:10:43.294337 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-5595cfd85-m9r2b" event={"ID":"21d6ff6a-e58c-43fc-bd24-de7b95588532","Type":"ContainerStarted","Data":"4da7827e493d3cbadfc1dea884cb2951ea6867983c5fca2d7643eb51252a36ac"} Nov 28 10:10:43 crc kubenswrapper[4838]: I1128 10:10:43.297788 4838 generic.go:334] "Generic (PLEG): container finished" podID="7a381223-ecef-429b-8233-90b9084c3f23" containerID="b4a3d5689e23be999f492e12de79ff26cfdeeb51a5f64829cf25643b38b69923" exitCode=0 Nov 28 10:10:43 crc kubenswrapper[4838]: I1128 10:10:43.297851 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6gmrz" event={"ID":"7a381223-ecef-429b-8233-90b9084c3f23","Type":"ContainerDied","Data":"b4a3d5689e23be999f492e12de79ff26cfdeeb51a5f64829cf25643b38b69923"} Nov 28 10:10:43 crc kubenswrapper[4838]: I1128 10:10:43.311975 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-5595cfd85-m9r2b" podStartSLOduration=4.311952463 podStartE2EDuration="4.311952463s" podCreationTimestamp="2025-11-28 10:10:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 10:10:43.310790732 +0000 UTC m=+815.009764912" watchObservedRunningTime="2025-11-28 10:10:43.311952463 +0000 UTC m=+815.010926663" Nov 28 10:10:44 crc kubenswrapper[4838]: I1128 10:10:44.527948 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-6gmrz" Nov 28 10:10:44 crc kubenswrapper[4838]: I1128 10:10:44.682406 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-c8m6w\" (UniqueName: \"kubernetes.io/projected/7a381223-ecef-429b-8233-90b9084c3f23-kube-api-access-c8m6w\") pod \"7a381223-ecef-429b-8233-90b9084c3f23\" (UID: \"7a381223-ecef-429b-8233-90b9084c3f23\") " Nov 28 10:10:44 crc kubenswrapper[4838]: I1128 10:10:44.682841 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7a381223-ecef-429b-8233-90b9084c3f23-catalog-content\") pod \"7a381223-ecef-429b-8233-90b9084c3f23\" (UID: \"7a381223-ecef-429b-8233-90b9084c3f23\") " Nov 28 10:10:44 crc kubenswrapper[4838]: I1128 10:10:44.682879 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7a381223-ecef-429b-8233-90b9084c3f23-utilities\") pod \"7a381223-ecef-429b-8233-90b9084c3f23\" (UID: \"7a381223-ecef-429b-8233-90b9084c3f23\") " Nov 28 10:10:44 crc kubenswrapper[4838]: I1128 10:10:44.684176 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7a381223-ecef-429b-8233-90b9084c3f23-utilities" (OuterVolumeSpecName: "utilities") pod "7a381223-ecef-429b-8233-90b9084c3f23" (UID: "7a381223-ecef-429b-8233-90b9084c3f23"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 10:10:44 crc kubenswrapper[4838]: I1128 10:10:44.686444 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7a381223-ecef-429b-8233-90b9084c3f23-kube-api-access-c8m6w" (OuterVolumeSpecName: "kube-api-access-c8m6w") pod "7a381223-ecef-429b-8233-90b9084c3f23" (UID: "7a381223-ecef-429b-8233-90b9084c3f23"). InnerVolumeSpecName "kube-api-access-c8m6w". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:10:44 crc kubenswrapper[4838]: I1128 10:10:44.784337 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-c8m6w\" (UniqueName: \"kubernetes.io/projected/7a381223-ecef-429b-8233-90b9084c3f23-kube-api-access-c8m6w\") on node \"crc\" DevicePath \"\"" Nov 28 10:10:44 crc kubenswrapper[4838]: I1128 10:10:44.784366 4838 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7a381223-ecef-429b-8233-90b9084c3f23-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 10:10:44 crc kubenswrapper[4838]: I1128 10:10:44.794793 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7a381223-ecef-429b-8233-90b9084c3f23-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "7a381223-ecef-429b-8233-90b9084c3f23" (UID: "7a381223-ecef-429b-8233-90b9084c3f23"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 10:10:44 crc kubenswrapper[4838]: I1128 10:10:44.885732 4838 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7a381223-ecef-429b-8233-90b9084c3f23-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 10:10:45 crc kubenswrapper[4838]: I1128 10:10:45.313035 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-handler-sgkvp" event={"ID":"7d076e50-6cc5-4258-b334-faa9d4f1a3b4","Type":"ContainerStarted","Data":"d97dd24d56598c741cd71546e116965b2b6089e8fbfa1e6db46983eee41af51f"} Nov 28 10:10:45 crc kubenswrapper[4838]: I1128 10:10:45.314325 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-nmstate/nmstate-handler-sgkvp" Nov 28 10:10:45 crc kubenswrapper[4838]: I1128 10:10:45.314400 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-qvrkm" event={"ID":"2205c062-150d-43c9-8e91-9187c92a1908","Type":"ContainerStarted","Data":"b78c5111e1f2714f1f8bd574da2dfcddfe0ab2711a923e2ee2682e89d83fa491"} Nov 28 10:10:45 crc kubenswrapper[4838]: I1128 10:10:45.317800 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-7f946cbc9-hrdrc" event={"ID":"1190df35-2195-49c1-abb5-1a5e11626ec4","Type":"ContainerStarted","Data":"b83d292cb09beb58b72e252b3041d5e4ced278288b576a1a0911630bb8d1edd3"} Nov 28 10:10:45 crc kubenswrapper[4838]: I1128 10:10:45.321562 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-6gmrz" Nov 28 10:10:45 crc kubenswrapper[4838]: I1128 10:10:45.321568 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6gmrz" event={"ID":"7a381223-ecef-429b-8233-90b9084c3f23","Type":"ContainerDied","Data":"b75eba6885f95366064884aac2aa331d03003c875bba41a0f23deaf12f462e5c"} Nov 28 10:10:45 crc kubenswrapper[4838]: I1128 10:10:45.321617 4838 scope.go:117] "RemoveContainer" containerID="b4a3d5689e23be999f492e12de79ff26cfdeeb51a5f64829cf25643b38b69923" Nov 28 10:10:45 crc kubenswrapper[4838]: I1128 10:10:45.323853 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-pf4kk" event={"ID":"27f91860-f5ee-4232-b298-bf97137a1d12","Type":"ContainerStarted","Data":"ecfa1d864f96431ef173e9bd633637674ddb69fb8a591e52abdae036ce550360"} Nov 28 10:10:45 crc kubenswrapper[4838]: I1128 10:10:45.324091 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-pf4kk" Nov 28 10:10:45 crc kubenswrapper[4838]: I1128 10:10:45.339548 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-handler-sgkvp" podStartSLOduration=1.493123817 podStartE2EDuration="6.339528072s" podCreationTimestamp="2025-11-28 10:10:39 +0000 UTC" firstStartedPulling="2025-11-28 10:10:39.739313223 +0000 UTC m=+811.438287403" lastFinishedPulling="2025-11-28 10:10:44.585717488 +0000 UTC m=+816.284691658" observedRunningTime="2025-11-28 10:10:45.331365632 +0000 UTC m=+817.030339852" watchObservedRunningTime="2025-11-28 10:10:45.339528072 +0000 UTC m=+817.038502252" Nov 28 10:10:45 crc kubenswrapper[4838]: I1128 10:10:45.352608 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-pf4kk" podStartSLOduration=2.027186059 podStartE2EDuration="6.352594386s" podCreationTimestamp="2025-11-28 10:10:39 +0000 UTC" firstStartedPulling="2025-11-28 10:10:40.268743599 +0000 UTC m=+811.967717769" lastFinishedPulling="2025-11-28 10:10:44.594151926 +0000 UTC m=+816.293126096" observedRunningTime="2025-11-28 10:10:45.352133764 +0000 UTC m=+817.051107934" watchObservedRunningTime="2025-11-28 10:10:45.352594386 +0000 UTC m=+817.051568566" Nov 28 10:10:45 crc kubenswrapper[4838]: I1128 10:10:45.354533 4838 scope.go:117] "RemoveContainer" containerID="0f3c31ff24d803abe584b746fbb2bc0f584dbd8fee443cdf1a0c035afc565ff8" Nov 28 10:10:45 crc kubenswrapper[4838]: I1128 10:10:45.392055 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-qvrkm" podStartSLOduration=1.4668241659999999 podStartE2EDuration="6.392027312s" podCreationTimestamp="2025-11-28 10:10:39 +0000 UTC" firstStartedPulling="2025-11-28 10:10:39.660180373 +0000 UTC m=+811.359154543" lastFinishedPulling="2025-11-28 10:10:44.585383529 +0000 UTC m=+816.284357689" observedRunningTime="2025-11-28 10:10:45.374324904 +0000 UTC m=+817.073299104" watchObservedRunningTime="2025-11-28 10:10:45.392027312 +0000 UTC m=+817.091001522" Nov 28 10:10:45 crc kubenswrapper[4838]: I1128 10:10:45.397805 4838 scope.go:117] "RemoveContainer" containerID="d3639154f25965c1974700c45e3ec6ae5ef2f736e635a915eff97c9d097e74c5" Nov 28 10:10:45 crc kubenswrapper[4838]: I1128 10:10:45.409454 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-6gmrz"] Nov 28 10:10:45 crc kubenswrapper[4838]: I1128 10:10:45.412241 4838 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-6gmrz"] Nov 28 10:10:46 crc kubenswrapper[4838]: I1128 10:10:46.570010 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7a381223-ecef-429b-8233-90b9084c3f23" path="/var/lib/kubelet/pods/7a381223-ecef-429b-8233-90b9084c3f23/volumes" Nov 28 10:10:47 crc kubenswrapper[4838]: I1128 10:10:47.348944 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-7f946cbc9-hrdrc" event={"ID":"1190df35-2195-49c1-abb5-1a5e11626ec4","Type":"ContainerStarted","Data":"eb514b89605965712ea86b69ab6bb35091dbc7375d7ac69c8a3498cf286f9552"} Nov 28 10:10:47 crc kubenswrapper[4838]: I1128 10:10:47.372893 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-metrics-7f946cbc9-hrdrc" podStartSLOduration=0.932751494 podStartE2EDuration="8.372871758s" podCreationTimestamp="2025-11-28 10:10:39 +0000 UTC" firstStartedPulling="2025-11-28 10:10:39.609620056 +0000 UTC m=+811.308594226" lastFinishedPulling="2025-11-28 10:10:47.04974032 +0000 UTC m=+818.748714490" observedRunningTime="2025-11-28 10:10:47.372345804 +0000 UTC m=+819.071320014" watchObservedRunningTime="2025-11-28 10:10:47.372871758 +0000 UTC m=+819.071845928" Nov 28 10:10:49 crc kubenswrapper[4838]: I1128 10:10:49.757562 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-nmstate/nmstate-handler-sgkvp" Nov 28 10:10:49 crc kubenswrapper[4838]: I1128 10:10:49.773093 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-5595cfd85-m9r2b" Nov 28 10:10:49 crc kubenswrapper[4838]: I1128 10:10:49.773150 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-5595cfd85-m9r2b" Nov 28 10:10:49 crc kubenswrapper[4838]: I1128 10:10:49.782538 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-5595cfd85-m9r2b" Nov 28 10:10:50 crc kubenswrapper[4838]: I1128 10:10:50.369316 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-5595cfd85-m9r2b" Nov 28 10:10:50 crc kubenswrapper[4838]: I1128 10:10:50.433153 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-f9d7485db-lvtzk"] Nov 28 10:10:53 crc kubenswrapper[4838]: I1128 10:10:53.940842 4838 patch_prober.go:28] interesting pod/machine-config-daemon-5dxdd container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 10:10:53 crc kubenswrapper[4838]: I1128 10:10:53.941311 4838 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 10:11:00 crc kubenswrapper[4838]: I1128 10:11:00.011755 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-pf4kk" Nov 28 10:11:15 crc kubenswrapper[4838]: I1128 10:11:15.491600 4838 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-console/console-f9d7485db-lvtzk" podUID="9fb065c7-1402-4294-a8f6-f1aa662ecbb0" containerName="console" containerID="cri-o://9987b620f50ad69f71453bb0a5dcde6871fb95839426a7dff767e8460a8c56e9" gracePeriod=15 Nov 28 10:11:15 crc kubenswrapper[4838]: I1128 10:11:15.909894 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f9d7485db-lvtzk_9fb065c7-1402-4294-a8f6-f1aa662ecbb0/console/0.log" Nov 28 10:11:15 crc kubenswrapper[4838]: I1128 10:11:15.910225 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-lvtzk" Nov 28 10:11:16 crc kubenswrapper[4838]: I1128 10:11:16.002001 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/9fb065c7-1402-4294-a8f6-f1aa662ecbb0-service-ca\") pod \"9fb065c7-1402-4294-a8f6-f1aa662ecbb0\" (UID: \"9fb065c7-1402-4294-a8f6-f1aa662ecbb0\") " Nov 28 10:11:16 crc kubenswrapper[4838]: I1128 10:11:16.002076 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/9fb065c7-1402-4294-a8f6-f1aa662ecbb0-console-serving-cert\") pod \"9fb065c7-1402-4294-a8f6-f1aa662ecbb0\" (UID: \"9fb065c7-1402-4294-a8f6-f1aa662ecbb0\") " Nov 28 10:11:16 crc kubenswrapper[4838]: I1128 10:11:16.002139 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/9fb065c7-1402-4294-a8f6-f1aa662ecbb0-trusted-ca-bundle\") pod \"9fb065c7-1402-4294-a8f6-f1aa662ecbb0\" (UID: \"9fb065c7-1402-4294-a8f6-f1aa662ecbb0\") " Nov 28 10:11:16 crc kubenswrapper[4838]: I1128 10:11:16.002207 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tdpxt\" (UniqueName: \"kubernetes.io/projected/9fb065c7-1402-4294-a8f6-f1aa662ecbb0-kube-api-access-tdpxt\") pod \"9fb065c7-1402-4294-a8f6-f1aa662ecbb0\" (UID: \"9fb065c7-1402-4294-a8f6-f1aa662ecbb0\") " Nov 28 10:11:16 crc kubenswrapper[4838]: I1128 10:11:16.002243 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/9fb065c7-1402-4294-a8f6-f1aa662ecbb0-console-oauth-config\") pod \"9fb065c7-1402-4294-a8f6-f1aa662ecbb0\" (UID: \"9fb065c7-1402-4294-a8f6-f1aa662ecbb0\") " Nov 28 10:11:16 crc kubenswrapper[4838]: I1128 10:11:16.003219 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9fb065c7-1402-4294-a8f6-f1aa662ecbb0-service-ca" (OuterVolumeSpecName: "service-ca") pod "9fb065c7-1402-4294-a8f6-f1aa662ecbb0" (UID: "9fb065c7-1402-4294-a8f6-f1aa662ecbb0"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 10:11:16 crc kubenswrapper[4838]: I1128 10:11:16.003239 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9fb065c7-1402-4294-a8f6-f1aa662ecbb0-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "9fb065c7-1402-4294-a8f6-f1aa662ecbb0" (UID: "9fb065c7-1402-4294-a8f6-f1aa662ecbb0"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 10:11:16 crc kubenswrapper[4838]: I1128 10:11:16.003385 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/9fb065c7-1402-4294-a8f6-f1aa662ecbb0-oauth-serving-cert\") pod \"9fb065c7-1402-4294-a8f6-f1aa662ecbb0\" (UID: \"9fb065c7-1402-4294-a8f6-f1aa662ecbb0\") " Nov 28 10:11:16 crc kubenswrapper[4838]: I1128 10:11:16.003409 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/9fb065c7-1402-4294-a8f6-f1aa662ecbb0-console-config\") pod \"9fb065c7-1402-4294-a8f6-f1aa662ecbb0\" (UID: \"9fb065c7-1402-4294-a8f6-f1aa662ecbb0\") " Nov 28 10:11:16 crc kubenswrapper[4838]: I1128 10:11:16.003759 4838 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/9fb065c7-1402-4294-a8f6-f1aa662ecbb0-service-ca\") on node \"crc\" DevicePath \"\"" Nov 28 10:11:16 crc kubenswrapper[4838]: I1128 10:11:16.003777 4838 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/9fb065c7-1402-4294-a8f6-f1aa662ecbb0-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 10:11:16 crc kubenswrapper[4838]: I1128 10:11:16.004313 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9fb065c7-1402-4294-a8f6-f1aa662ecbb0-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "9fb065c7-1402-4294-a8f6-f1aa662ecbb0" (UID: "9fb065c7-1402-4294-a8f6-f1aa662ecbb0"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 10:11:16 crc kubenswrapper[4838]: I1128 10:11:16.004371 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9fb065c7-1402-4294-a8f6-f1aa662ecbb0-console-config" (OuterVolumeSpecName: "console-config") pod "9fb065c7-1402-4294-a8f6-f1aa662ecbb0" (UID: "9fb065c7-1402-4294-a8f6-f1aa662ecbb0"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 10:11:16 crc kubenswrapper[4838]: I1128 10:11:16.008713 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9fb065c7-1402-4294-a8f6-f1aa662ecbb0-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "9fb065c7-1402-4294-a8f6-f1aa662ecbb0" (UID: "9fb065c7-1402-4294-a8f6-f1aa662ecbb0"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:11:16 crc kubenswrapper[4838]: I1128 10:11:16.016442 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9fb065c7-1402-4294-a8f6-f1aa662ecbb0-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "9fb065c7-1402-4294-a8f6-f1aa662ecbb0" (UID: "9fb065c7-1402-4294-a8f6-f1aa662ecbb0"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:11:16 crc kubenswrapper[4838]: I1128 10:11:16.019071 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9fb065c7-1402-4294-a8f6-f1aa662ecbb0-kube-api-access-tdpxt" (OuterVolumeSpecName: "kube-api-access-tdpxt") pod "9fb065c7-1402-4294-a8f6-f1aa662ecbb0" (UID: "9fb065c7-1402-4294-a8f6-f1aa662ecbb0"). InnerVolumeSpecName "kube-api-access-tdpxt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:11:16 crc kubenswrapper[4838]: I1128 10:11:16.108053 4838 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/9fb065c7-1402-4294-a8f6-f1aa662ecbb0-console-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 10:11:16 crc kubenswrapper[4838]: I1128 10:11:16.108099 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tdpxt\" (UniqueName: \"kubernetes.io/projected/9fb065c7-1402-4294-a8f6-f1aa662ecbb0-kube-api-access-tdpxt\") on node \"crc\" DevicePath \"\"" Nov 28 10:11:16 crc kubenswrapper[4838]: I1128 10:11:16.108113 4838 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/9fb065c7-1402-4294-a8f6-f1aa662ecbb0-console-oauth-config\") on node \"crc\" DevicePath \"\"" Nov 28 10:11:16 crc kubenswrapper[4838]: I1128 10:11:16.108125 4838 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/9fb065c7-1402-4294-a8f6-f1aa662ecbb0-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 10:11:16 crc kubenswrapper[4838]: I1128 10:11:16.108138 4838 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/9fb065c7-1402-4294-a8f6-f1aa662ecbb0-console-config\") on node \"crc\" DevicePath \"\"" Nov 28 10:11:16 crc kubenswrapper[4838]: I1128 10:11:16.386755 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f8375dl7"] Nov 28 10:11:16 crc kubenswrapper[4838]: E1128 10:11:16.387006 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7a381223-ecef-429b-8233-90b9084c3f23" containerName="extract-utilities" Nov 28 10:11:16 crc kubenswrapper[4838]: I1128 10:11:16.387018 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="7a381223-ecef-429b-8233-90b9084c3f23" containerName="extract-utilities" Nov 28 10:11:16 crc kubenswrapper[4838]: E1128 10:11:16.387028 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7a381223-ecef-429b-8233-90b9084c3f23" containerName="registry-server" Nov 28 10:11:16 crc kubenswrapper[4838]: I1128 10:11:16.387035 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="7a381223-ecef-429b-8233-90b9084c3f23" containerName="registry-server" Nov 28 10:11:16 crc kubenswrapper[4838]: E1128 10:11:16.387044 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7a381223-ecef-429b-8233-90b9084c3f23" containerName="extract-content" Nov 28 10:11:16 crc kubenswrapper[4838]: I1128 10:11:16.387051 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="7a381223-ecef-429b-8233-90b9084c3f23" containerName="extract-content" Nov 28 10:11:16 crc kubenswrapper[4838]: E1128 10:11:16.387064 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9fb065c7-1402-4294-a8f6-f1aa662ecbb0" containerName="console" Nov 28 10:11:16 crc kubenswrapper[4838]: I1128 10:11:16.387071 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="9fb065c7-1402-4294-a8f6-f1aa662ecbb0" containerName="console" Nov 28 10:11:16 crc kubenswrapper[4838]: I1128 10:11:16.387174 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="9fb065c7-1402-4294-a8f6-f1aa662ecbb0" containerName="console" Nov 28 10:11:16 crc kubenswrapper[4838]: I1128 10:11:16.387184 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="7a381223-ecef-429b-8233-90b9084c3f23" containerName="registry-server" Nov 28 10:11:16 crc kubenswrapper[4838]: I1128 10:11:16.387984 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f8375dl7" Nov 28 10:11:16 crc kubenswrapper[4838]: I1128 10:11:16.390997 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Nov 28 10:11:16 crc kubenswrapper[4838]: I1128 10:11:16.409078 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f8375dl7"] Nov 28 10:11:16 crc kubenswrapper[4838]: I1128 10:11:16.412348 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/29970a13-5b66-4cf6-8515-3b4bd570dd2f-bundle\") pod \"af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f8375dl7\" (UID: \"29970a13-5b66-4cf6-8515-3b4bd570dd2f\") " pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f8375dl7" Nov 28 10:11:16 crc kubenswrapper[4838]: I1128 10:11:16.412397 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/29970a13-5b66-4cf6-8515-3b4bd570dd2f-util\") pod \"af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f8375dl7\" (UID: \"29970a13-5b66-4cf6-8515-3b4bd570dd2f\") " pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f8375dl7" Nov 28 10:11:16 crc kubenswrapper[4838]: I1128 10:11:16.412446 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nkwmt\" (UniqueName: \"kubernetes.io/projected/29970a13-5b66-4cf6-8515-3b4bd570dd2f-kube-api-access-nkwmt\") pod \"af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f8375dl7\" (UID: \"29970a13-5b66-4cf6-8515-3b4bd570dd2f\") " pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f8375dl7" Nov 28 10:11:16 crc kubenswrapper[4838]: I1128 10:11:16.513789 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/29970a13-5b66-4cf6-8515-3b4bd570dd2f-util\") pod \"af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f8375dl7\" (UID: \"29970a13-5b66-4cf6-8515-3b4bd570dd2f\") " pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f8375dl7" Nov 28 10:11:16 crc kubenswrapper[4838]: I1128 10:11:16.514298 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/29970a13-5b66-4cf6-8515-3b4bd570dd2f-util\") pod \"af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f8375dl7\" (UID: \"29970a13-5b66-4cf6-8515-3b4bd570dd2f\") " pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f8375dl7" Nov 28 10:11:16 crc kubenswrapper[4838]: I1128 10:11:16.514690 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nkwmt\" (UniqueName: \"kubernetes.io/projected/29970a13-5b66-4cf6-8515-3b4bd570dd2f-kube-api-access-nkwmt\") pod \"af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f8375dl7\" (UID: \"29970a13-5b66-4cf6-8515-3b4bd570dd2f\") " pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f8375dl7" Nov 28 10:11:16 crc kubenswrapper[4838]: I1128 10:11:16.514907 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/29970a13-5b66-4cf6-8515-3b4bd570dd2f-bundle\") pod \"af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f8375dl7\" (UID: \"29970a13-5b66-4cf6-8515-3b4bd570dd2f\") " pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f8375dl7" Nov 28 10:11:16 crc kubenswrapper[4838]: I1128 10:11:16.515300 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/29970a13-5b66-4cf6-8515-3b4bd570dd2f-bundle\") pod \"af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f8375dl7\" (UID: \"29970a13-5b66-4cf6-8515-3b4bd570dd2f\") " pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f8375dl7" Nov 28 10:11:16 crc kubenswrapper[4838]: I1128 10:11:16.530045 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nkwmt\" (UniqueName: \"kubernetes.io/projected/29970a13-5b66-4cf6-8515-3b4bd570dd2f-kube-api-access-nkwmt\") pod \"af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f8375dl7\" (UID: \"29970a13-5b66-4cf6-8515-3b4bd570dd2f\") " pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f8375dl7" Nov 28 10:11:16 crc kubenswrapper[4838]: I1128 10:11:16.559867 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f9d7485db-lvtzk_9fb065c7-1402-4294-a8f6-f1aa662ecbb0/console/0.log" Nov 28 10:11:16 crc kubenswrapper[4838]: I1128 10:11:16.559949 4838 generic.go:334] "Generic (PLEG): container finished" podID="9fb065c7-1402-4294-a8f6-f1aa662ecbb0" containerID="9987b620f50ad69f71453bb0a5dcde6871fb95839426a7dff767e8460a8c56e9" exitCode=2 Nov 28 10:11:16 crc kubenswrapper[4838]: I1128 10:11:16.559996 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-lvtzk" event={"ID":"9fb065c7-1402-4294-a8f6-f1aa662ecbb0","Type":"ContainerDied","Data":"9987b620f50ad69f71453bb0a5dcde6871fb95839426a7dff767e8460a8c56e9"} Nov 28 10:11:16 crc kubenswrapper[4838]: I1128 10:11:16.559995 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-lvtzk" Nov 28 10:11:16 crc kubenswrapper[4838]: I1128 10:11:16.560035 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-lvtzk" event={"ID":"9fb065c7-1402-4294-a8f6-f1aa662ecbb0","Type":"ContainerDied","Data":"33bc225c6b26822596184d2d688100df66a5d4009c39bb68b8560cf61229a6af"} Nov 28 10:11:16 crc kubenswrapper[4838]: I1128 10:11:16.560061 4838 scope.go:117] "RemoveContainer" containerID="9987b620f50ad69f71453bb0a5dcde6871fb95839426a7dff767e8460a8c56e9" Nov 28 10:11:16 crc kubenswrapper[4838]: I1128 10:11:16.583161 4838 scope.go:117] "RemoveContainer" containerID="9987b620f50ad69f71453bb0a5dcde6871fb95839426a7dff767e8460a8c56e9" Nov 28 10:11:16 crc kubenswrapper[4838]: E1128 10:11:16.583678 4838 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9987b620f50ad69f71453bb0a5dcde6871fb95839426a7dff767e8460a8c56e9\": container with ID starting with 9987b620f50ad69f71453bb0a5dcde6871fb95839426a7dff767e8460a8c56e9 not found: ID does not exist" containerID="9987b620f50ad69f71453bb0a5dcde6871fb95839426a7dff767e8460a8c56e9" Nov 28 10:11:16 crc kubenswrapper[4838]: I1128 10:11:16.583832 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9987b620f50ad69f71453bb0a5dcde6871fb95839426a7dff767e8460a8c56e9"} err="failed to get container status \"9987b620f50ad69f71453bb0a5dcde6871fb95839426a7dff767e8460a8c56e9\": rpc error: code = NotFound desc = could not find container \"9987b620f50ad69f71453bb0a5dcde6871fb95839426a7dff767e8460a8c56e9\": container with ID starting with 9987b620f50ad69f71453bb0a5dcde6871fb95839426a7dff767e8460a8c56e9 not found: ID does not exist" Nov 28 10:11:16 crc kubenswrapper[4838]: I1128 10:11:16.586697 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-f9d7485db-lvtzk"] Nov 28 10:11:16 crc kubenswrapper[4838]: I1128 10:11:16.595785 4838 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-console/console-f9d7485db-lvtzk"] Nov 28 10:11:16 crc kubenswrapper[4838]: I1128 10:11:16.701281 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f8375dl7" Nov 28 10:11:16 crc kubenswrapper[4838]: I1128 10:11:16.909194 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f8375dl7"] Nov 28 10:11:17 crc kubenswrapper[4838]: I1128 10:11:17.570629 4838 generic.go:334] "Generic (PLEG): container finished" podID="29970a13-5b66-4cf6-8515-3b4bd570dd2f" containerID="2f391cea21014a2f76df840e2b9882f5bc0bf5f114c49792ef3eaf15e1119302" exitCode=0 Nov 28 10:11:17 crc kubenswrapper[4838]: I1128 10:11:17.570700 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f8375dl7" event={"ID":"29970a13-5b66-4cf6-8515-3b4bd570dd2f","Type":"ContainerDied","Data":"2f391cea21014a2f76df840e2b9882f5bc0bf5f114c49792ef3eaf15e1119302"} Nov 28 10:11:17 crc kubenswrapper[4838]: I1128 10:11:17.570744 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f8375dl7" event={"ID":"29970a13-5b66-4cf6-8515-3b4bd570dd2f","Type":"ContainerStarted","Data":"3f90a08f369b6d1c49aa289a08a3af7224eb5d0646acd9f838846aa7f6f38d2c"} Nov 28 10:11:18 crc kubenswrapper[4838]: I1128 10:11:18.572580 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9fb065c7-1402-4294-a8f6-f1aa662ecbb0" path="/var/lib/kubelet/pods/9fb065c7-1402-4294-a8f6-f1aa662ecbb0/volumes" Nov 28 10:11:20 crc kubenswrapper[4838]: I1128 10:11:20.595308 4838 generic.go:334] "Generic (PLEG): container finished" podID="29970a13-5b66-4cf6-8515-3b4bd570dd2f" containerID="249574e90a33f60866457bdd88e9d338d6aea947d7e5b77c26025ac17376a3ce" exitCode=0 Nov 28 10:11:20 crc kubenswrapper[4838]: I1128 10:11:20.595401 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f8375dl7" event={"ID":"29970a13-5b66-4cf6-8515-3b4bd570dd2f","Type":"ContainerDied","Data":"249574e90a33f60866457bdd88e9d338d6aea947d7e5b77c26025ac17376a3ce"} Nov 28 10:11:21 crc kubenswrapper[4838]: I1128 10:11:21.605366 4838 generic.go:334] "Generic (PLEG): container finished" podID="29970a13-5b66-4cf6-8515-3b4bd570dd2f" containerID="c3a90bb25e23394938ab8dd7fbcc7829b398a1b7bb5c2de7692c1a5a57ea2aba" exitCode=0 Nov 28 10:11:21 crc kubenswrapper[4838]: I1128 10:11:21.605441 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f8375dl7" event={"ID":"29970a13-5b66-4cf6-8515-3b4bd570dd2f","Type":"ContainerDied","Data":"c3a90bb25e23394938ab8dd7fbcc7829b398a1b7bb5c2de7692c1a5a57ea2aba"} Nov 28 10:11:22 crc kubenswrapper[4838]: I1128 10:11:22.958093 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f8375dl7" Nov 28 10:11:23 crc kubenswrapper[4838]: I1128 10:11:23.002069 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nkwmt\" (UniqueName: \"kubernetes.io/projected/29970a13-5b66-4cf6-8515-3b4bd570dd2f-kube-api-access-nkwmt\") pod \"29970a13-5b66-4cf6-8515-3b4bd570dd2f\" (UID: \"29970a13-5b66-4cf6-8515-3b4bd570dd2f\") " Nov 28 10:11:23 crc kubenswrapper[4838]: I1128 10:11:23.002132 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/29970a13-5b66-4cf6-8515-3b4bd570dd2f-util\") pod \"29970a13-5b66-4cf6-8515-3b4bd570dd2f\" (UID: \"29970a13-5b66-4cf6-8515-3b4bd570dd2f\") " Nov 28 10:11:23 crc kubenswrapper[4838]: I1128 10:11:23.002170 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/29970a13-5b66-4cf6-8515-3b4bd570dd2f-bundle\") pod \"29970a13-5b66-4cf6-8515-3b4bd570dd2f\" (UID: \"29970a13-5b66-4cf6-8515-3b4bd570dd2f\") " Nov 28 10:11:23 crc kubenswrapper[4838]: I1128 10:11:23.003768 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/29970a13-5b66-4cf6-8515-3b4bd570dd2f-bundle" (OuterVolumeSpecName: "bundle") pod "29970a13-5b66-4cf6-8515-3b4bd570dd2f" (UID: "29970a13-5b66-4cf6-8515-3b4bd570dd2f"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 10:11:23 crc kubenswrapper[4838]: I1128 10:11:23.007067 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/29970a13-5b66-4cf6-8515-3b4bd570dd2f-kube-api-access-nkwmt" (OuterVolumeSpecName: "kube-api-access-nkwmt") pod "29970a13-5b66-4cf6-8515-3b4bd570dd2f" (UID: "29970a13-5b66-4cf6-8515-3b4bd570dd2f"). InnerVolumeSpecName "kube-api-access-nkwmt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:11:23 crc kubenswrapper[4838]: I1128 10:11:23.027077 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/29970a13-5b66-4cf6-8515-3b4bd570dd2f-util" (OuterVolumeSpecName: "util") pod "29970a13-5b66-4cf6-8515-3b4bd570dd2f" (UID: "29970a13-5b66-4cf6-8515-3b4bd570dd2f"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 10:11:23 crc kubenswrapper[4838]: I1128 10:11:23.102934 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nkwmt\" (UniqueName: \"kubernetes.io/projected/29970a13-5b66-4cf6-8515-3b4bd570dd2f-kube-api-access-nkwmt\") on node \"crc\" DevicePath \"\"" Nov 28 10:11:23 crc kubenswrapper[4838]: I1128 10:11:23.102963 4838 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/29970a13-5b66-4cf6-8515-3b4bd570dd2f-util\") on node \"crc\" DevicePath \"\"" Nov 28 10:11:23 crc kubenswrapper[4838]: I1128 10:11:23.102975 4838 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/29970a13-5b66-4cf6-8515-3b4bd570dd2f-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 10:11:23 crc kubenswrapper[4838]: I1128 10:11:23.624670 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f8375dl7" event={"ID":"29970a13-5b66-4cf6-8515-3b4bd570dd2f","Type":"ContainerDied","Data":"3f90a08f369b6d1c49aa289a08a3af7224eb5d0646acd9f838846aa7f6f38d2c"} Nov 28 10:11:23 crc kubenswrapper[4838]: I1128 10:11:23.625089 4838 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3f90a08f369b6d1c49aa289a08a3af7224eb5d0646acd9f838846aa7f6f38d2c" Nov 28 10:11:23 crc kubenswrapper[4838]: I1128 10:11:23.624848 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f8375dl7" Nov 28 10:11:23 crc kubenswrapper[4838]: I1128 10:11:23.939885 4838 patch_prober.go:28] interesting pod/machine-config-daemon-5dxdd container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 10:11:23 crc kubenswrapper[4838]: I1128 10:11:23.939986 4838 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 10:11:35 crc kubenswrapper[4838]: I1128 10:11:35.364786 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-controller-manager-84ddbcdd65-dqrq8"] Nov 28 10:11:35 crc kubenswrapper[4838]: E1128 10:11:35.365562 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="29970a13-5b66-4cf6-8515-3b4bd570dd2f" containerName="pull" Nov 28 10:11:35 crc kubenswrapper[4838]: I1128 10:11:35.365576 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="29970a13-5b66-4cf6-8515-3b4bd570dd2f" containerName="pull" Nov 28 10:11:35 crc kubenswrapper[4838]: E1128 10:11:35.365589 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="29970a13-5b66-4cf6-8515-3b4bd570dd2f" containerName="extract" Nov 28 10:11:35 crc kubenswrapper[4838]: I1128 10:11:35.365598 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="29970a13-5b66-4cf6-8515-3b4bd570dd2f" containerName="extract" Nov 28 10:11:35 crc kubenswrapper[4838]: E1128 10:11:35.365614 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="29970a13-5b66-4cf6-8515-3b4bd570dd2f" containerName="util" Nov 28 10:11:35 crc kubenswrapper[4838]: I1128 10:11:35.365623 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="29970a13-5b66-4cf6-8515-3b4bd570dd2f" containerName="util" Nov 28 10:11:35 crc kubenswrapper[4838]: I1128 10:11:35.365774 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="29970a13-5b66-4cf6-8515-3b4bd570dd2f" containerName="extract" Nov 28 10:11:35 crc kubenswrapper[4838]: I1128 10:11:35.366236 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-84ddbcdd65-dqrq8" Nov 28 10:11:35 crc kubenswrapper[4838]: I1128 10:11:35.367825 4838 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-cert" Nov 28 10:11:35 crc kubenswrapper[4838]: I1128 10:11:35.367966 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"kube-root-ca.crt" Nov 28 10:11:35 crc kubenswrapper[4838]: I1128 10:11:35.368541 4838 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-controller-manager-service-cert" Nov 28 10:11:35 crc kubenswrapper[4838]: I1128 10:11:35.368644 4838 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"manager-account-dockercfg-6s5tl" Nov 28 10:11:35 crc kubenswrapper[4838]: I1128 10:11:35.374287 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"openshift-service-ca.crt" Nov 28 10:11:35 crc kubenswrapper[4838]: I1128 10:11:35.389207 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-84ddbcdd65-dqrq8"] Nov 28 10:11:35 crc kubenswrapper[4838]: I1128 10:11:35.458475 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/3eb34f58-6fc9-4220-87a6-74090d0f1874-webhook-cert\") pod \"metallb-operator-controller-manager-84ddbcdd65-dqrq8\" (UID: \"3eb34f58-6fc9-4220-87a6-74090d0f1874\") " pod="metallb-system/metallb-operator-controller-manager-84ddbcdd65-dqrq8" Nov 28 10:11:35 crc kubenswrapper[4838]: I1128 10:11:35.458533 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-slzxr\" (UniqueName: \"kubernetes.io/projected/3eb34f58-6fc9-4220-87a6-74090d0f1874-kube-api-access-slzxr\") pod \"metallb-operator-controller-manager-84ddbcdd65-dqrq8\" (UID: \"3eb34f58-6fc9-4220-87a6-74090d0f1874\") " pod="metallb-system/metallb-operator-controller-manager-84ddbcdd65-dqrq8" Nov 28 10:11:35 crc kubenswrapper[4838]: I1128 10:11:35.458568 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/3eb34f58-6fc9-4220-87a6-74090d0f1874-apiservice-cert\") pod \"metallb-operator-controller-manager-84ddbcdd65-dqrq8\" (UID: \"3eb34f58-6fc9-4220-87a6-74090d0f1874\") " pod="metallb-system/metallb-operator-controller-manager-84ddbcdd65-dqrq8" Nov 28 10:11:35 crc kubenswrapper[4838]: I1128 10:11:35.560345 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/3eb34f58-6fc9-4220-87a6-74090d0f1874-webhook-cert\") pod \"metallb-operator-controller-manager-84ddbcdd65-dqrq8\" (UID: \"3eb34f58-6fc9-4220-87a6-74090d0f1874\") " pod="metallb-system/metallb-operator-controller-manager-84ddbcdd65-dqrq8" Nov 28 10:11:35 crc kubenswrapper[4838]: I1128 10:11:35.560410 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-slzxr\" (UniqueName: \"kubernetes.io/projected/3eb34f58-6fc9-4220-87a6-74090d0f1874-kube-api-access-slzxr\") pod \"metallb-operator-controller-manager-84ddbcdd65-dqrq8\" (UID: \"3eb34f58-6fc9-4220-87a6-74090d0f1874\") " pod="metallb-system/metallb-operator-controller-manager-84ddbcdd65-dqrq8" Nov 28 10:11:35 crc kubenswrapper[4838]: I1128 10:11:35.560440 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/3eb34f58-6fc9-4220-87a6-74090d0f1874-apiservice-cert\") pod \"metallb-operator-controller-manager-84ddbcdd65-dqrq8\" (UID: \"3eb34f58-6fc9-4220-87a6-74090d0f1874\") " pod="metallb-system/metallb-operator-controller-manager-84ddbcdd65-dqrq8" Nov 28 10:11:35 crc kubenswrapper[4838]: I1128 10:11:35.568267 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/3eb34f58-6fc9-4220-87a6-74090d0f1874-apiservice-cert\") pod \"metallb-operator-controller-manager-84ddbcdd65-dqrq8\" (UID: \"3eb34f58-6fc9-4220-87a6-74090d0f1874\") " pod="metallb-system/metallb-operator-controller-manager-84ddbcdd65-dqrq8" Nov 28 10:11:35 crc kubenswrapper[4838]: I1128 10:11:35.572710 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/3eb34f58-6fc9-4220-87a6-74090d0f1874-webhook-cert\") pod \"metallb-operator-controller-manager-84ddbcdd65-dqrq8\" (UID: \"3eb34f58-6fc9-4220-87a6-74090d0f1874\") " pod="metallb-system/metallb-operator-controller-manager-84ddbcdd65-dqrq8" Nov 28 10:11:35 crc kubenswrapper[4838]: I1128 10:11:35.593771 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-slzxr\" (UniqueName: \"kubernetes.io/projected/3eb34f58-6fc9-4220-87a6-74090d0f1874-kube-api-access-slzxr\") pod \"metallb-operator-controller-manager-84ddbcdd65-dqrq8\" (UID: \"3eb34f58-6fc9-4220-87a6-74090d0f1874\") " pod="metallb-system/metallb-operator-controller-manager-84ddbcdd65-dqrq8" Nov 28 10:11:35 crc kubenswrapper[4838]: I1128 10:11:35.628287 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-webhook-server-5bf7d9b684-tdkmz"] Nov 28 10:11:35 crc kubenswrapper[4838]: I1128 10:11:35.629421 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-5bf7d9b684-tdkmz" Nov 28 10:11:35 crc kubenswrapper[4838]: I1128 10:11:35.632701 4838 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-webhook-cert" Nov 28 10:11:35 crc kubenswrapper[4838]: I1128 10:11:35.632898 4838 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-dockercfg-l4xwm" Nov 28 10:11:35 crc kubenswrapper[4838]: I1128 10:11:35.633791 4838 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-service-cert" Nov 28 10:11:35 crc kubenswrapper[4838]: I1128 10:11:35.668488 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-5bf7d9b684-tdkmz"] Nov 28 10:11:35 crc kubenswrapper[4838]: I1128 10:11:35.690362 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-84ddbcdd65-dqrq8" Nov 28 10:11:35 crc kubenswrapper[4838]: I1128 10:11:35.773320 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nbh7q\" (UniqueName: \"kubernetes.io/projected/4588ca95-c23d-4709-8160-ab77a17f858d-kube-api-access-nbh7q\") pod \"metallb-operator-webhook-server-5bf7d9b684-tdkmz\" (UID: \"4588ca95-c23d-4709-8160-ab77a17f858d\") " pod="metallb-system/metallb-operator-webhook-server-5bf7d9b684-tdkmz" Nov 28 10:11:35 crc kubenswrapper[4838]: I1128 10:11:35.773398 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/4588ca95-c23d-4709-8160-ab77a17f858d-webhook-cert\") pod \"metallb-operator-webhook-server-5bf7d9b684-tdkmz\" (UID: \"4588ca95-c23d-4709-8160-ab77a17f858d\") " pod="metallb-system/metallb-operator-webhook-server-5bf7d9b684-tdkmz" Nov 28 10:11:35 crc kubenswrapper[4838]: I1128 10:11:35.773445 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/4588ca95-c23d-4709-8160-ab77a17f858d-apiservice-cert\") pod \"metallb-operator-webhook-server-5bf7d9b684-tdkmz\" (UID: \"4588ca95-c23d-4709-8160-ab77a17f858d\") " pod="metallb-system/metallb-operator-webhook-server-5bf7d9b684-tdkmz" Nov 28 10:11:35 crc kubenswrapper[4838]: I1128 10:11:35.878814 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nbh7q\" (UniqueName: \"kubernetes.io/projected/4588ca95-c23d-4709-8160-ab77a17f858d-kube-api-access-nbh7q\") pod \"metallb-operator-webhook-server-5bf7d9b684-tdkmz\" (UID: \"4588ca95-c23d-4709-8160-ab77a17f858d\") " pod="metallb-system/metallb-operator-webhook-server-5bf7d9b684-tdkmz" Nov 28 10:11:35 crc kubenswrapper[4838]: I1128 10:11:35.879188 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/4588ca95-c23d-4709-8160-ab77a17f858d-webhook-cert\") pod \"metallb-operator-webhook-server-5bf7d9b684-tdkmz\" (UID: \"4588ca95-c23d-4709-8160-ab77a17f858d\") " pod="metallb-system/metallb-operator-webhook-server-5bf7d9b684-tdkmz" Nov 28 10:11:35 crc kubenswrapper[4838]: I1128 10:11:35.879238 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/4588ca95-c23d-4709-8160-ab77a17f858d-apiservice-cert\") pod \"metallb-operator-webhook-server-5bf7d9b684-tdkmz\" (UID: \"4588ca95-c23d-4709-8160-ab77a17f858d\") " pod="metallb-system/metallb-operator-webhook-server-5bf7d9b684-tdkmz" Nov 28 10:11:35 crc kubenswrapper[4838]: I1128 10:11:35.884392 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/4588ca95-c23d-4709-8160-ab77a17f858d-apiservice-cert\") pod \"metallb-operator-webhook-server-5bf7d9b684-tdkmz\" (UID: \"4588ca95-c23d-4709-8160-ab77a17f858d\") " pod="metallb-system/metallb-operator-webhook-server-5bf7d9b684-tdkmz" Nov 28 10:11:35 crc kubenswrapper[4838]: I1128 10:11:35.889548 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/4588ca95-c23d-4709-8160-ab77a17f858d-webhook-cert\") pod \"metallb-operator-webhook-server-5bf7d9b684-tdkmz\" (UID: \"4588ca95-c23d-4709-8160-ab77a17f858d\") " pod="metallb-system/metallb-operator-webhook-server-5bf7d9b684-tdkmz" Nov 28 10:11:35 crc kubenswrapper[4838]: I1128 10:11:35.900005 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nbh7q\" (UniqueName: \"kubernetes.io/projected/4588ca95-c23d-4709-8160-ab77a17f858d-kube-api-access-nbh7q\") pod \"metallb-operator-webhook-server-5bf7d9b684-tdkmz\" (UID: \"4588ca95-c23d-4709-8160-ab77a17f858d\") " pod="metallb-system/metallb-operator-webhook-server-5bf7d9b684-tdkmz" Nov 28 10:11:35 crc kubenswrapper[4838]: I1128 10:11:35.944998 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-5bf7d9b684-tdkmz" Nov 28 10:11:35 crc kubenswrapper[4838]: I1128 10:11:35.998869 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-84ddbcdd65-dqrq8"] Nov 28 10:11:36 crc kubenswrapper[4838]: W1128 10:11:36.013878 4838 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3eb34f58_6fc9_4220_87a6_74090d0f1874.slice/crio-0052c98d64abc8617766cf4beed851d76865888d0e710175c6458e6c5ea5cef9 WatchSource:0}: Error finding container 0052c98d64abc8617766cf4beed851d76865888d0e710175c6458e6c5ea5cef9: Status 404 returned error can't find the container with id 0052c98d64abc8617766cf4beed851d76865888d0e710175c6458e6c5ea5cef9 Nov 28 10:11:36 crc kubenswrapper[4838]: I1128 10:11:36.405862 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-5bf7d9b684-tdkmz"] Nov 28 10:11:36 crc kubenswrapper[4838]: W1128 10:11:36.420837 4838 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4588ca95_c23d_4709_8160_ab77a17f858d.slice/crio-d59a9a9579868a67b36b36b6fbdc598cb98da6e2dea188898a47a59d9d765197 WatchSource:0}: Error finding container d59a9a9579868a67b36b36b6fbdc598cb98da6e2dea188898a47a59d9d765197: Status 404 returned error can't find the container with id d59a9a9579868a67b36b36b6fbdc598cb98da6e2dea188898a47a59d9d765197 Nov 28 10:11:36 crc kubenswrapper[4838]: I1128 10:11:36.712268 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-84ddbcdd65-dqrq8" event={"ID":"3eb34f58-6fc9-4220-87a6-74090d0f1874","Type":"ContainerStarted","Data":"0052c98d64abc8617766cf4beed851d76865888d0e710175c6458e6c5ea5cef9"} Nov 28 10:11:36 crc kubenswrapper[4838]: I1128 10:11:36.713981 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-5bf7d9b684-tdkmz" event={"ID":"4588ca95-c23d-4709-8160-ab77a17f858d","Type":"ContainerStarted","Data":"d59a9a9579868a67b36b36b6fbdc598cb98da6e2dea188898a47a59d9d765197"} Nov 28 10:11:42 crc kubenswrapper[4838]: I1128 10:11:42.000155 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-5bf7d9b684-tdkmz" event={"ID":"4588ca95-c23d-4709-8160-ab77a17f858d","Type":"ContainerStarted","Data":"d2db62914b717db6d9f3293c294d037a06f647c5dda72d7ffdef8053003541b7"} Nov 28 10:11:42 crc kubenswrapper[4838]: I1128 10:11:42.000828 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-webhook-server-5bf7d9b684-tdkmz" Nov 28 10:11:42 crc kubenswrapper[4838]: I1128 10:11:42.001693 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-84ddbcdd65-dqrq8" event={"ID":"3eb34f58-6fc9-4220-87a6-74090d0f1874","Type":"ContainerStarted","Data":"1bb1ba52fe0eea2811b1d436341d975db59a7c6c7f3cee550a5e1602d9187898"} Nov 28 10:11:42 crc kubenswrapper[4838]: I1128 10:11:42.002487 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-controller-manager-84ddbcdd65-dqrq8" Nov 28 10:11:42 crc kubenswrapper[4838]: I1128 10:11:42.034250 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-webhook-server-5bf7d9b684-tdkmz" podStartSLOduration=2.285384305 podStartE2EDuration="7.034233782s" podCreationTimestamp="2025-11-28 10:11:35 +0000 UTC" firstStartedPulling="2025-11-28 10:11:36.425844171 +0000 UTC m=+868.124818391" lastFinishedPulling="2025-11-28 10:11:41.174693688 +0000 UTC m=+872.873667868" observedRunningTime="2025-11-28 10:11:42.027648494 +0000 UTC m=+873.726622684" watchObservedRunningTime="2025-11-28 10:11:42.034233782 +0000 UTC m=+873.733207962" Nov 28 10:11:42 crc kubenswrapper[4838]: I1128 10:11:42.059023 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-controller-manager-84ddbcdd65-dqrq8" podStartSLOduration=1.962554285 podStartE2EDuration="7.059007412s" podCreationTimestamp="2025-11-28 10:11:35 +0000 UTC" firstStartedPulling="2025-11-28 10:11:36.01644163 +0000 UTC m=+867.715415800" lastFinishedPulling="2025-11-28 10:11:41.112894737 +0000 UTC m=+872.811868927" observedRunningTime="2025-11-28 10:11:42.054924552 +0000 UTC m=+873.753898722" watchObservedRunningTime="2025-11-28 10:11:42.059007412 +0000 UTC m=+873.757981582" Nov 28 10:11:53 crc kubenswrapper[4838]: I1128 10:11:53.940680 4838 patch_prober.go:28] interesting pod/machine-config-daemon-5dxdd container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 10:11:54 crc kubenswrapper[4838]: I1128 10:11:53.941619 4838 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 10:11:54 crc kubenswrapper[4838]: I1128 10:11:53.941708 4838 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" Nov 28 10:11:54 crc kubenswrapper[4838]: I1128 10:11:53.942973 4838 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"e44c3d5f2db51d0905366ef1f77dd84ac3c3e496e157394cd047f58af85a3fca"} pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 10:11:54 crc kubenswrapper[4838]: I1128 10:11:53.943144 4838 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" containerName="machine-config-daemon" containerID="cri-o://e44c3d5f2db51d0905366ef1f77dd84ac3c3e496e157394cd047f58af85a3fca" gracePeriod=600 Nov 28 10:11:55 crc kubenswrapper[4838]: I1128 10:11:55.094155 4838 generic.go:334] "Generic (PLEG): container finished" podID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" containerID="e44c3d5f2db51d0905366ef1f77dd84ac3c3e496e157394cd047f58af85a3fca" exitCode=0 Nov 28 10:11:55 crc kubenswrapper[4838]: I1128 10:11:55.094216 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" event={"ID":"5c3daa53-8c4e-4e30-aeba-146602dd45cd","Type":"ContainerDied","Data":"e44c3d5f2db51d0905366ef1f77dd84ac3c3e496e157394cd047f58af85a3fca"} Nov 28 10:11:55 crc kubenswrapper[4838]: I1128 10:11:55.094842 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" event={"ID":"5c3daa53-8c4e-4e30-aeba-146602dd45cd","Type":"ContainerStarted","Data":"3ce7b45b9fd71f6cdec20d6a8542bb19cf78bbe5928e243b6058e07f9eb4cc79"} Nov 28 10:11:55 crc kubenswrapper[4838]: I1128 10:11:55.094879 4838 scope.go:117] "RemoveContainer" containerID="1bdd37224d3ec9a4a6830c87550bdad8405cc7c49db8d4116d7a4a00dc4a3cf3" Nov 28 10:11:55 crc kubenswrapper[4838]: I1128 10:11:55.983650 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-webhook-server-5bf7d9b684-tdkmz" Nov 28 10:12:15 crc kubenswrapper[4838]: I1128 10:12:15.693605 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-controller-manager-84ddbcdd65-dqrq8" Nov 28 10:12:16 crc kubenswrapper[4838]: I1128 10:12:16.431008 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-webhook-server-7fcb986d4-8frbg"] Nov 28 10:12:16 crc kubenswrapper[4838]: I1128 10:12:16.431953 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-8frbg" Nov 28 10:12:16 crc kubenswrapper[4838]: I1128 10:12:16.433743 4838 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-webhook-server-cert" Nov 28 10:12:16 crc kubenswrapper[4838]: I1128 10:12:16.434031 4838 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-daemon-dockercfg-jr5fx" Nov 28 10:12:16 crc kubenswrapper[4838]: I1128 10:12:16.440945 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-hdb2b"] Nov 28 10:12:16 crc kubenswrapper[4838]: I1128 10:12:16.443299 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-hdb2b" Nov 28 10:12:16 crc kubenswrapper[4838]: I1128 10:12:16.446897 4838 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-certs-secret" Nov 28 10:12:16 crc kubenswrapper[4838]: I1128 10:12:16.447018 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"frr-startup" Nov 28 10:12:16 crc kubenswrapper[4838]: I1128 10:12:16.448905 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-7fcb986d4-8frbg"] Nov 28 10:12:16 crc kubenswrapper[4838]: I1128 10:12:16.533547 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/speaker-xrbxd"] Nov 28 10:12:16 crc kubenswrapper[4838]: I1128 10:12:16.537591 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-xrbxd" Nov 28 10:12:16 crc kubenswrapper[4838]: I1128 10:12:16.539874 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/9e6ea3f2-6774-4bbb-b0fc-fcb8b6106b53-frr-startup\") pod \"frr-k8s-hdb2b\" (UID: \"9e6ea3f2-6774-4bbb-b0fc-fcb8b6106b53\") " pod="metallb-system/frr-k8s-hdb2b" Nov 28 10:12:16 crc kubenswrapper[4838]: I1128 10:12:16.539915 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/9e6ea3f2-6774-4bbb-b0fc-fcb8b6106b53-metrics\") pod \"frr-k8s-hdb2b\" (UID: \"9e6ea3f2-6774-4bbb-b0fc-fcb8b6106b53\") " pod="metallb-system/frr-k8s-hdb2b" Nov 28 10:12:16 crc kubenswrapper[4838]: I1128 10:12:16.539945 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4rrpf\" (UniqueName: \"kubernetes.io/projected/8ef825e1-e9dc-4a29-94cd-722613098926-kube-api-access-4rrpf\") pod \"frr-k8s-webhook-server-7fcb986d4-8frbg\" (UID: \"8ef825e1-e9dc-4a29-94cd-722613098926\") " pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-8frbg" Nov 28 10:12:16 crc kubenswrapper[4838]: I1128 10:12:16.539990 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/9e6ea3f2-6774-4bbb-b0fc-fcb8b6106b53-reloader\") pod \"frr-k8s-hdb2b\" (UID: \"9e6ea3f2-6774-4bbb-b0fc-fcb8b6106b53\") " pod="metallb-system/frr-k8s-hdb2b" Nov 28 10:12:16 crc kubenswrapper[4838]: I1128 10:12:16.540012 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/9e6ea3f2-6774-4bbb-b0fc-fcb8b6106b53-frr-sockets\") pod \"frr-k8s-hdb2b\" (UID: \"9e6ea3f2-6774-4bbb-b0fc-fcb8b6106b53\") " pod="metallb-system/frr-k8s-hdb2b" Nov 28 10:12:16 crc kubenswrapper[4838]: I1128 10:12:16.540031 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/9e6ea3f2-6774-4bbb-b0fc-fcb8b6106b53-metrics-certs\") pod \"frr-k8s-hdb2b\" (UID: \"9e6ea3f2-6774-4bbb-b0fc-fcb8b6106b53\") " pod="metallb-system/frr-k8s-hdb2b" Nov 28 10:12:16 crc kubenswrapper[4838]: I1128 10:12:16.540051 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/9e6ea3f2-6774-4bbb-b0fc-fcb8b6106b53-frr-conf\") pod \"frr-k8s-hdb2b\" (UID: \"9e6ea3f2-6774-4bbb-b0fc-fcb8b6106b53\") " pod="metallb-system/frr-k8s-hdb2b" Nov 28 10:12:16 crc kubenswrapper[4838]: I1128 10:12:16.540068 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/8ef825e1-e9dc-4a29-94cd-722613098926-cert\") pod \"frr-k8s-webhook-server-7fcb986d4-8frbg\" (UID: \"8ef825e1-e9dc-4a29-94cd-722613098926\") " pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-8frbg" Nov 28 10:12:16 crc kubenswrapper[4838]: I1128 10:12:16.540165 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-skck8\" (UniqueName: \"kubernetes.io/projected/9e6ea3f2-6774-4bbb-b0fc-fcb8b6106b53-kube-api-access-skck8\") pod \"frr-k8s-hdb2b\" (UID: \"9e6ea3f2-6774-4bbb-b0fc-fcb8b6106b53\") " pod="metallb-system/frr-k8s-hdb2b" Nov 28 10:12:16 crc kubenswrapper[4838]: I1128 10:12:16.542158 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"metallb-excludel2" Nov 28 10:12:16 crc kubenswrapper[4838]: I1128 10:12:16.542196 4838 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-certs-secret" Nov 28 10:12:16 crc kubenswrapper[4838]: I1128 10:12:16.542376 4838 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-dockercfg-bls54" Nov 28 10:12:16 crc kubenswrapper[4838]: I1128 10:12:16.542427 4838 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-memberlist" Nov 28 10:12:16 crc kubenswrapper[4838]: I1128 10:12:16.545051 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/controller-f8648f98b-p4xws"] Nov 28 10:12:16 crc kubenswrapper[4838]: I1128 10:12:16.547584 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-f8648f98b-p4xws" Nov 28 10:12:16 crc kubenswrapper[4838]: I1128 10:12:16.552683 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-f8648f98b-p4xws"] Nov 28 10:12:16 crc kubenswrapper[4838]: I1128 10:12:16.553587 4838 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-certs-secret" Nov 28 10:12:16 crc kubenswrapper[4838]: I1128 10:12:16.641384 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/9e6ea3f2-6774-4bbb-b0fc-fcb8b6106b53-reloader\") pod \"frr-k8s-hdb2b\" (UID: \"9e6ea3f2-6774-4bbb-b0fc-fcb8b6106b53\") " pod="metallb-system/frr-k8s-hdb2b" Nov 28 10:12:16 crc kubenswrapper[4838]: I1128 10:12:16.641458 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/9e6ea3f2-6774-4bbb-b0fc-fcb8b6106b53-frr-sockets\") pod \"frr-k8s-hdb2b\" (UID: \"9e6ea3f2-6774-4bbb-b0fc-fcb8b6106b53\") " pod="metallb-system/frr-k8s-hdb2b" Nov 28 10:12:16 crc kubenswrapper[4838]: I1128 10:12:16.641486 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/9e6ea3f2-6774-4bbb-b0fc-fcb8b6106b53-metrics-certs\") pod \"frr-k8s-hdb2b\" (UID: \"9e6ea3f2-6774-4bbb-b0fc-fcb8b6106b53\") " pod="metallb-system/frr-k8s-hdb2b" Nov 28 10:12:16 crc kubenswrapper[4838]: I1128 10:12:16.641515 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/7f764b03-48a1-45af-b406-40c60c1e912c-metallb-excludel2\") pod \"speaker-xrbxd\" (UID: \"7f764b03-48a1-45af-b406-40c60c1e912c\") " pod="metallb-system/speaker-xrbxd" Nov 28 10:12:16 crc kubenswrapper[4838]: I1128 10:12:16.641533 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/9e6ea3f2-6774-4bbb-b0fc-fcb8b6106b53-frr-conf\") pod \"frr-k8s-hdb2b\" (UID: \"9e6ea3f2-6774-4bbb-b0fc-fcb8b6106b53\") " pod="metallb-system/frr-k8s-hdb2b" Nov 28 10:12:16 crc kubenswrapper[4838]: I1128 10:12:16.641550 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/8ef825e1-e9dc-4a29-94cd-722613098926-cert\") pod \"frr-k8s-webhook-server-7fcb986d4-8frbg\" (UID: \"8ef825e1-e9dc-4a29-94cd-722613098926\") " pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-8frbg" Nov 28 10:12:16 crc kubenswrapper[4838]: I1128 10:12:16.641573 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/7f764b03-48a1-45af-b406-40c60c1e912c-memberlist\") pod \"speaker-xrbxd\" (UID: \"7f764b03-48a1-45af-b406-40c60c1e912c\") " pod="metallb-system/speaker-xrbxd" Nov 28 10:12:16 crc kubenswrapper[4838]: I1128 10:12:16.641590 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h67pq\" (UniqueName: \"kubernetes.io/projected/7f764b03-48a1-45af-b406-40c60c1e912c-kube-api-access-h67pq\") pod \"speaker-xrbxd\" (UID: \"7f764b03-48a1-45af-b406-40c60c1e912c\") " pod="metallb-system/speaker-xrbxd" Nov 28 10:12:16 crc kubenswrapper[4838]: I1128 10:12:16.641609 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-skck8\" (UniqueName: \"kubernetes.io/projected/9e6ea3f2-6774-4bbb-b0fc-fcb8b6106b53-kube-api-access-skck8\") pod \"frr-k8s-hdb2b\" (UID: \"9e6ea3f2-6774-4bbb-b0fc-fcb8b6106b53\") " pod="metallb-system/frr-k8s-hdb2b" Nov 28 10:12:16 crc kubenswrapper[4838]: I1128 10:12:16.641632 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/ef168536-335c-417d-b9d5-7dc2affb0b62-metrics-certs\") pod \"controller-f8648f98b-p4xws\" (UID: \"ef168536-335c-417d-b9d5-7dc2affb0b62\") " pod="metallb-system/controller-f8648f98b-p4xws" Nov 28 10:12:16 crc kubenswrapper[4838]: E1128 10:12:16.641639 4838 secret.go:188] Couldn't get secret metallb-system/frr-k8s-certs-secret: secret "frr-k8s-certs-secret" not found Nov 28 10:12:16 crc kubenswrapper[4838]: I1128 10:12:16.641646 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/7f764b03-48a1-45af-b406-40c60c1e912c-metrics-certs\") pod \"speaker-xrbxd\" (UID: \"7f764b03-48a1-45af-b406-40c60c1e912c\") " pod="metallb-system/speaker-xrbxd" Nov 28 10:12:16 crc kubenswrapper[4838]: E1128 10:12:16.641703 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/9e6ea3f2-6774-4bbb-b0fc-fcb8b6106b53-metrics-certs podName:9e6ea3f2-6774-4bbb-b0fc-fcb8b6106b53 nodeName:}" failed. No retries permitted until 2025-11-28 10:12:17.141686119 +0000 UTC m=+908.840660289 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/9e6ea3f2-6774-4bbb-b0fc-fcb8b6106b53-metrics-certs") pod "frr-k8s-hdb2b" (UID: "9e6ea3f2-6774-4bbb-b0fc-fcb8b6106b53") : secret "frr-k8s-certs-secret" not found Nov 28 10:12:16 crc kubenswrapper[4838]: I1128 10:12:16.641853 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/ef168536-335c-417d-b9d5-7dc2affb0b62-cert\") pod \"controller-f8648f98b-p4xws\" (UID: \"ef168536-335c-417d-b9d5-7dc2affb0b62\") " pod="metallb-system/controller-f8648f98b-p4xws" Nov 28 10:12:16 crc kubenswrapper[4838]: I1128 10:12:16.641886 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/9e6ea3f2-6774-4bbb-b0fc-fcb8b6106b53-frr-startup\") pod \"frr-k8s-hdb2b\" (UID: \"9e6ea3f2-6774-4bbb-b0fc-fcb8b6106b53\") " pod="metallb-system/frr-k8s-hdb2b" Nov 28 10:12:16 crc kubenswrapper[4838]: I1128 10:12:16.641880 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/9e6ea3f2-6774-4bbb-b0fc-fcb8b6106b53-reloader\") pod \"frr-k8s-hdb2b\" (UID: \"9e6ea3f2-6774-4bbb-b0fc-fcb8b6106b53\") " pod="metallb-system/frr-k8s-hdb2b" Nov 28 10:12:16 crc kubenswrapper[4838]: I1128 10:12:16.641945 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/9e6ea3f2-6774-4bbb-b0fc-fcb8b6106b53-metrics\") pod \"frr-k8s-hdb2b\" (UID: \"9e6ea3f2-6774-4bbb-b0fc-fcb8b6106b53\") " pod="metallb-system/frr-k8s-hdb2b" Nov 28 10:12:16 crc kubenswrapper[4838]: I1128 10:12:16.641951 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/9e6ea3f2-6774-4bbb-b0fc-fcb8b6106b53-frr-sockets\") pod \"frr-k8s-hdb2b\" (UID: \"9e6ea3f2-6774-4bbb-b0fc-fcb8b6106b53\") " pod="metallb-system/frr-k8s-hdb2b" Nov 28 10:12:16 crc kubenswrapper[4838]: I1128 10:12:16.641986 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4rrpf\" (UniqueName: \"kubernetes.io/projected/8ef825e1-e9dc-4a29-94cd-722613098926-kube-api-access-4rrpf\") pod \"frr-k8s-webhook-server-7fcb986d4-8frbg\" (UID: \"8ef825e1-e9dc-4a29-94cd-722613098926\") " pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-8frbg" Nov 28 10:12:16 crc kubenswrapper[4838]: I1128 10:12:16.642039 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s5nz5\" (UniqueName: \"kubernetes.io/projected/ef168536-335c-417d-b9d5-7dc2affb0b62-kube-api-access-s5nz5\") pod \"controller-f8648f98b-p4xws\" (UID: \"ef168536-335c-417d-b9d5-7dc2affb0b62\") " pod="metallb-system/controller-f8648f98b-p4xws" Nov 28 10:12:16 crc kubenswrapper[4838]: I1128 10:12:16.642457 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/9e6ea3f2-6774-4bbb-b0fc-fcb8b6106b53-frr-conf\") pod \"frr-k8s-hdb2b\" (UID: \"9e6ea3f2-6774-4bbb-b0fc-fcb8b6106b53\") " pod="metallb-system/frr-k8s-hdb2b" Nov 28 10:12:16 crc kubenswrapper[4838]: I1128 10:12:16.642557 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/9e6ea3f2-6774-4bbb-b0fc-fcb8b6106b53-metrics\") pod \"frr-k8s-hdb2b\" (UID: \"9e6ea3f2-6774-4bbb-b0fc-fcb8b6106b53\") " pod="metallb-system/frr-k8s-hdb2b" Nov 28 10:12:16 crc kubenswrapper[4838]: I1128 10:12:16.643330 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/9e6ea3f2-6774-4bbb-b0fc-fcb8b6106b53-frr-startup\") pod \"frr-k8s-hdb2b\" (UID: \"9e6ea3f2-6774-4bbb-b0fc-fcb8b6106b53\") " pod="metallb-system/frr-k8s-hdb2b" Nov 28 10:12:16 crc kubenswrapper[4838]: I1128 10:12:16.651063 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/8ef825e1-e9dc-4a29-94cd-722613098926-cert\") pod \"frr-k8s-webhook-server-7fcb986d4-8frbg\" (UID: \"8ef825e1-e9dc-4a29-94cd-722613098926\") " pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-8frbg" Nov 28 10:12:16 crc kubenswrapper[4838]: I1128 10:12:16.658371 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-skck8\" (UniqueName: \"kubernetes.io/projected/9e6ea3f2-6774-4bbb-b0fc-fcb8b6106b53-kube-api-access-skck8\") pod \"frr-k8s-hdb2b\" (UID: \"9e6ea3f2-6774-4bbb-b0fc-fcb8b6106b53\") " pod="metallb-system/frr-k8s-hdb2b" Nov 28 10:12:16 crc kubenswrapper[4838]: I1128 10:12:16.661847 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4rrpf\" (UniqueName: \"kubernetes.io/projected/8ef825e1-e9dc-4a29-94cd-722613098926-kube-api-access-4rrpf\") pod \"frr-k8s-webhook-server-7fcb986d4-8frbg\" (UID: \"8ef825e1-e9dc-4a29-94cd-722613098926\") " pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-8frbg" Nov 28 10:12:16 crc kubenswrapper[4838]: I1128 10:12:16.743647 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/7f764b03-48a1-45af-b406-40c60c1e912c-metrics-certs\") pod \"speaker-xrbxd\" (UID: \"7f764b03-48a1-45af-b406-40c60c1e912c\") " pod="metallb-system/speaker-xrbxd" Nov 28 10:12:16 crc kubenswrapper[4838]: I1128 10:12:16.743697 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/ef168536-335c-417d-b9d5-7dc2affb0b62-metrics-certs\") pod \"controller-f8648f98b-p4xws\" (UID: \"ef168536-335c-417d-b9d5-7dc2affb0b62\") " pod="metallb-system/controller-f8648f98b-p4xws" Nov 28 10:12:16 crc kubenswrapper[4838]: I1128 10:12:16.743735 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/ef168536-335c-417d-b9d5-7dc2affb0b62-cert\") pod \"controller-f8648f98b-p4xws\" (UID: \"ef168536-335c-417d-b9d5-7dc2affb0b62\") " pod="metallb-system/controller-f8648f98b-p4xws" Nov 28 10:12:16 crc kubenswrapper[4838]: I1128 10:12:16.743784 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s5nz5\" (UniqueName: \"kubernetes.io/projected/ef168536-335c-417d-b9d5-7dc2affb0b62-kube-api-access-s5nz5\") pod \"controller-f8648f98b-p4xws\" (UID: \"ef168536-335c-417d-b9d5-7dc2affb0b62\") " pod="metallb-system/controller-f8648f98b-p4xws" Nov 28 10:12:16 crc kubenswrapper[4838]: I1128 10:12:16.743975 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/7f764b03-48a1-45af-b406-40c60c1e912c-metallb-excludel2\") pod \"speaker-xrbxd\" (UID: \"7f764b03-48a1-45af-b406-40c60c1e912c\") " pod="metallb-system/speaker-xrbxd" Nov 28 10:12:16 crc kubenswrapper[4838]: I1128 10:12:16.744000 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/7f764b03-48a1-45af-b406-40c60c1e912c-memberlist\") pod \"speaker-xrbxd\" (UID: \"7f764b03-48a1-45af-b406-40c60c1e912c\") " pod="metallb-system/speaker-xrbxd" Nov 28 10:12:16 crc kubenswrapper[4838]: I1128 10:12:16.744015 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h67pq\" (UniqueName: \"kubernetes.io/projected/7f764b03-48a1-45af-b406-40c60c1e912c-kube-api-access-h67pq\") pod \"speaker-xrbxd\" (UID: \"7f764b03-48a1-45af-b406-40c60c1e912c\") " pod="metallb-system/speaker-xrbxd" Nov 28 10:12:16 crc kubenswrapper[4838]: E1128 10:12:16.744132 4838 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Nov 28 10:12:16 crc kubenswrapper[4838]: E1128 10:12:16.744192 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/7f764b03-48a1-45af-b406-40c60c1e912c-memberlist podName:7f764b03-48a1-45af-b406-40c60c1e912c nodeName:}" failed. No retries permitted until 2025-11-28 10:12:17.244173751 +0000 UTC m=+908.943147921 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/7f764b03-48a1-45af-b406-40c60c1e912c-memberlist") pod "speaker-xrbxd" (UID: "7f764b03-48a1-45af-b406-40c60c1e912c") : secret "metallb-memberlist" not found Nov 28 10:12:16 crc kubenswrapper[4838]: I1128 10:12:16.745017 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/7f764b03-48a1-45af-b406-40c60c1e912c-metallb-excludel2\") pod \"speaker-xrbxd\" (UID: \"7f764b03-48a1-45af-b406-40c60c1e912c\") " pod="metallb-system/speaker-xrbxd" Nov 28 10:12:16 crc kubenswrapper[4838]: I1128 10:12:16.745496 4838 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-webhook-cert" Nov 28 10:12:16 crc kubenswrapper[4838]: I1128 10:12:16.747732 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/7f764b03-48a1-45af-b406-40c60c1e912c-metrics-certs\") pod \"speaker-xrbxd\" (UID: \"7f764b03-48a1-45af-b406-40c60c1e912c\") " pod="metallb-system/speaker-xrbxd" Nov 28 10:12:16 crc kubenswrapper[4838]: I1128 10:12:16.747861 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/ef168536-335c-417d-b9d5-7dc2affb0b62-metrics-certs\") pod \"controller-f8648f98b-p4xws\" (UID: \"ef168536-335c-417d-b9d5-7dc2affb0b62\") " pod="metallb-system/controller-f8648f98b-p4xws" Nov 28 10:12:16 crc kubenswrapper[4838]: I1128 10:12:16.758192 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-8frbg" Nov 28 10:12:16 crc kubenswrapper[4838]: I1128 10:12:16.760286 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/ef168536-335c-417d-b9d5-7dc2affb0b62-cert\") pod \"controller-f8648f98b-p4xws\" (UID: \"ef168536-335c-417d-b9d5-7dc2affb0b62\") " pod="metallb-system/controller-f8648f98b-p4xws" Nov 28 10:12:16 crc kubenswrapper[4838]: I1128 10:12:16.772413 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s5nz5\" (UniqueName: \"kubernetes.io/projected/ef168536-335c-417d-b9d5-7dc2affb0b62-kube-api-access-s5nz5\") pod \"controller-f8648f98b-p4xws\" (UID: \"ef168536-335c-417d-b9d5-7dc2affb0b62\") " pod="metallb-system/controller-f8648f98b-p4xws" Nov 28 10:12:16 crc kubenswrapper[4838]: I1128 10:12:16.773088 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h67pq\" (UniqueName: \"kubernetes.io/projected/7f764b03-48a1-45af-b406-40c60c1e912c-kube-api-access-h67pq\") pod \"speaker-xrbxd\" (UID: \"7f764b03-48a1-45af-b406-40c60c1e912c\") " pod="metallb-system/speaker-xrbxd" Nov 28 10:12:16 crc kubenswrapper[4838]: I1128 10:12:16.867836 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-f8648f98b-p4xws" Nov 28 10:12:17 crc kubenswrapper[4838]: I1128 10:12:17.057336 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-f8648f98b-p4xws"] Nov 28 10:12:17 crc kubenswrapper[4838]: I1128 10:12:17.149705 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/9e6ea3f2-6774-4bbb-b0fc-fcb8b6106b53-metrics-certs\") pod \"frr-k8s-hdb2b\" (UID: \"9e6ea3f2-6774-4bbb-b0fc-fcb8b6106b53\") " pod="metallb-system/frr-k8s-hdb2b" Nov 28 10:12:17 crc kubenswrapper[4838]: I1128 10:12:17.150352 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-7fcb986d4-8frbg"] Nov 28 10:12:17 crc kubenswrapper[4838]: I1128 10:12:17.155522 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/9e6ea3f2-6774-4bbb-b0fc-fcb8b6106b53-metrics-certs\") pod \"frr-k8s-hdb2b\" (UID: \"9e6ea3f2-6774-4bbb-b0fc-fcb8b6106b53\") " pod="metallb-system/frr-k8s-hdb2b" Nov 28 10:12:17 crc kubenswrapper[4838]: I1128 10:12:17.236200 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-8frbg" event={"ID":"8ef825e1-e9dc-4a29-94cd-722613098926","Type":"ContainerStarted","Data":"c9f137acd69ce21e979aa1a8c238264cca4ca5efe205e9847da51eec13933e00"} Nov 28 10:12:17 crc kubenswrapper[4838]: I1128 10:12:17.237521 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-f8648f98b-p4xws" event={"ID":"ef168536-335c-417d-b9d5-7dc2affb0b62","Type":"ContainerStarted","Data":"e94cf5d0a9007ab4eb2b41ec01799c00fd752c45e8aab7f6641e25d4b7aa486a"} Nov 28 10:12:17 crc kubenswrapper[4838]: I1128 10:12:17.237568 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-f8648f98b-p4xws" event={"ID":"ef168536-335c-417d-b9d5-7dc2affb0b62","Type":"ContainerStarted","Data":"c28ca434fe5d16a333ad230802072083bc67091446a38799d6c230c566004b89"} Nov 28 10:12:17 crc kubenswrapper[4838]: I1128 10:12:17.251047 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/7f764b03-48a1-45af-b406-40c60c1e912c-memberlist\") pod \"speaker-xrbxd\" (UID: \"7f764b03-48a1-45af-b406-40c60c1e912c\") " pod="metallb-system/speaker-xrbxd" Nov 28 10:12:17 crc kubenswrapper[4838]: E1128 10:12:17.251170 4838 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Nov 28 10:12:17 crc kubenswrapper[4838]: E1128 10:12:17.251523 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/7f764b03-48a1-45af-b406-40c60c1e912c-memberlist podName:7f764b03-48a1-45af-b406-40c60c1e912c nodeName:}" failed. No retries permitted until 2025-11-28 10:12:18.251499921 +0000 UTC m=+909.950474111 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/7f764b03-48a1-45af-b406-40c60c1e912c-memberlist") pod "speaker-xrbxd" (UID: "7f764b03-48a1-45af-b406-40c60c1e912c") : secret "metallb-memberlist" not found Nov 28 10:12:17 crc kubenswrapper[4838]: I1128 10:12:17.361112 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-hdb2b" Nov 28 10:12:18 crc kubenswrapper[4838]: I1128 10:12:18.245917 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-f8648f98b-p4xws" event={"ID":"ef168536-335c-417d-b9d5-7dc2affb0b62","Type":"ContainerStarted","Data":"f552010edec2b3e519b054ec0475f2efc0c5047f7c274475de75ea5ac159c2a0"} Nov 28 10:12:18 crc kubenswrapper[4838]: I1128 10:12:18.246316 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/controller-f8648f98b-p4xws" Nov 28 10:12:18 crc kubenswrapper[4838]: I1128 10:12:18.247066 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-hdb2b" event={"ID":"9e6ea3f2-6774-4bbb-b0fc-fcb8b6106b53","Type":"ContainerStarted","Data":"520e673148c7efef3484da2a62544e531ca9cc91ca7f68cbe97b0962569f88fe"} Nov 28 10:12:18 crc kubenswrapper[4838]: I1128 10:12:18.266133 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/7f764b03-48a1-45af-b406-40c60c1e912c-memberlist\") pod \"speaker-xrbxd\" (UID: \"7f764b03-48a1-45af-b406-40c60c1e912c\") " pod="metallb-system/speaker-xrbxd" Nov 28 10:12:18 crc kubenswrapper[4838]: I1128 10:12:18.267501 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/controller-f8648f98b-p4xws" podStartSLOduration=2.267470015 podStartE2EDuration="2.267470015s" podCreationTimestamp="2025-11-28 10:12:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 10:12:18.26353502 +0000 UTC m=+909.962509200" watchObservedRunningTime="2025-11-28 10:12:18.267470015 +0000 UTC m=+909.966444225" Nov 28 10:12:18 crc kubenswrapper[4838]: I1128 10:12:18.270671 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/7f764b03-48a1-45af-b406-40c60c1e912c-memberlist\") pod \"speaker-xrbxd\" (UID: \"7f764b03-48a1-45af-b406-40c60c1e912c\") " pod="metallb-system/speaker-xrbxd" Nov 28 10:12:18 crc kubenswrapper[4838]: I1128 10:12:18.355236 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-xrbxd" Nov 28 10:12:19 crc kubenswrapper[4838]: I1128 10:12:19.259773 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-xrbxd" event={"ID":"7f764b03-48a1-45af-b406-40c60c1e912c","Type":"ContainerStarted","Data":"7ad312adc64972d9a113a84e316340a9662eda25a65ebf64ea0dd24007fd1756"} Nov 28 10:12:19 crc kubenswrapper[4838]: I1128 10:12:19.260090 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-xrbxd" event={"ID":"7f764b03-48a1-45af-b406-40c60c1e912c","Type":"ContainerStarted","Data":"901efebf8bb656a93404a73ff721b2ac868b4909c0cbe4ec8247c688af0d5d8d"} Nov 28 10:12:19 crc kubenswrapper[4838]: I1128 10:12:19.260104 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-xrbxd" event={"ID":"7f764b03-48a1-45af-b406-40c60c1e912c","Type":"ContainerStarted","Data":"507f1eb6e958024cc568c971082fdce10e894e1b00399045eb1554c7ec3bf828"} Nov 28 10:12:19 crc kubenswrapper[4838]: I1128 10:12:19.260787 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/speaker-xrbxd" Nov 28 10:12:19 crc kubenswrapper[4838]: I1128 10:12:19.290083 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/speaker-xrbxd" podStartSLOduration=3.290067479 podStartE2EDuration="3.290067479s" podCreationTimestamp="2025-11-28 10:12:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 10:12:19.28753928 +0000 UTC m=+910.986513460" watchObservedRunningTime="2025-11-28 10:12:19.290067479 +0000 UTC m=+910.989041649" Nov 28 10:12:25 crc kubenswrapper[4838]: I1128 10:12:25.303915 4838 generic.go:334] "Generic (PLEG): container finished" podID="9e6ea3f2-6774-4bbb-b0fc-fcb8b6106b53" containerID="3762c9b58b7d1de44d41635d335e40fa6945d4838046cc96fac23e51ba274250" exitCode=0 Nov 28 10:12:25 crc kubenswrapper[4838]: I1128 10:12:25.303995 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-hdb2b" event={"ID":"9e6ea3f2-6774-4bbb-b0fc-fcb8b6106b53","Type":"ContainerDied","Data":"3762c9b58b7d1de44d41635d335e40fa6945d4838046cc96fac23e51ba274250"} Nov 28 10:12:25 crc kubenswrapper[4838]: I1128 10:12:25.307133 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-8frbg" event={"ID":"8ef825e1-e9dc-4a29-94cd-722613098926","Type":"ContainerStarted","Data":"dd5fe6931f9f986582ad9717ad9bdf64a8cff419a954e3f894ecaf30c6f3ddf9"} Nov 28 10:12:25 crc kubenswrapper[4838]: I1128 10:12:25.307415 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-8frbg" Nov 28 10:12:25 crc kubenswrapper[4838]: I1128 10:12:25.369337 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-8frbg" podStartSLOduration=2.2506307420000002 podStartE2EDuration="9.36931505s" podCreationTimestamp="2025-11-28 10:12:16 +0000 UTC" firstStartedPulling="2025-11-28 10:12:17.151728653 +0000 UTC m=+908.850702823" lastFinishedPulling="2025-11-28 10:12:24.270412921 +0000 UTC m=+915.969387131" observedRunningTime="2025-11-28 10:12:25.368902619 +0000 UTC m=+917.067876789" watchObservedRunningTime="2025-11-28 10:12:25.36931505 +0000 UTC m=+917.068289220" Nov 28 10:12:26 crc kubenswrapper[4838]: I1128 10:12:26.319458 4838 generic.go:334] "Generic (PLEG): container finished" podID="9e6ea3f2-6774-4bbb-b0fc-fcb8b6106b53" containerID="30fce6a2485726ff3bc7b3b56bb284fa26696d363fbf1bee20859ab3c2214d4e" exitCode=0 Nov 28 10:12:26 crc kubenswrapper[4838]: I1128 10:12:26.319539 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-hdb2b" event={"ID":"9e6ea3f2-6774-4bbb-b0fc-fcb8b6106b53","Type":"ContainerDied","Data":"30fce6a2485726ff3bc7b3b56bb284fa26696d363fbf1bee20859ab3c2214d4e"} Nov 28 10:12:27 crc kubenswrapper[4838]: I1128 10:12:27.328056 4838 generic.go:334] "Generic (PLEG): container finished" podID="9e6ea3f2-6774-4bbb-b0fc-fcb8b6106b53" containerID="6a9c8656b9ee9de7af1625a6f494c6cfe23f6e22ee7625dade79b8a7df619833" exitCode=0 Nov 28 10:12:27 crc kubenswrapper[4838]: I1128 10:12:27.328113 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-hdb2b" event={"ID":"9e6ea3f2-6774-4bbb-b0fc-fcb8b6106b53","Type":"ContainerDied","Data":"6a9c8656b9ee9de7af1625a6f494c6cfe23f6e22ee7625dade79b8a7df619833"} Nov 28 10:12:28 crc kubenswrapper[4838]: I1128 10:12:28.341132 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-hdb2b" event={"ID":"9e6ea3f2-6774-4bbb-b0fc-fcb8b6106b53","Type":"ContainerStarted","Data":"ba5c39f329814e7ef2d20a5a7317a9be26c743d57ef027d3353035a49cb0809a"} Nov 28 10:12:28 crc kubenswrapper[4838]: I1128 10:12:28.341569 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-hdb2b" event={"ID":"9e6ea3f2-6774-4bbb-b0fc-fcb8b6106b53","Type":"ContainerStarted","Data":"eca5653bde7a21255036a98c112b50d378c863b093cd4d69b61f446df18fe44c"} Nov 28 10:12:28 crc kubenswrapper[4838]: I1128 10:12:28.341587 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-hdb2b" event={"ID":"9e6ea3f2-6774-4bbb-b0fc-fcb8b6106b53","Type":"ContainerStarted","Data":"09b2eb0dde759e53121e0d0451b4a624364460b64de616b6111e9f041fd57371"} Nov 28 10:12:28 crc kubenswrapper[4838]: I1128 10:12:28.359334 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/speaker-xrbxd" Nov 28 10:12:29 crc kubenswrapper[4838]: I1128 10:12:29.351903 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-hdb2b" event={"ID":"9e6ea3f2-6774-4bbb-b0fc-fcb8b6106b53","Type":"ContainerStarted","Data":"3cfd89da9f193a709af1a2ad045fa78e17ac44f1a3f77d599bda96dbf6e55f29"} Nov 28 10:12:29 crc kubenswrapper[4838]: I1128 10:12:29.352279 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-hdb2b" Nov 28 10:12:29 crc kubenswrapper[4838]: I1128 10:12:29.352296 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-hdb2b" event={"ID":"9e6ea3f2-6774-4bbb-b0fc-fcb8b6106b53","Type":"ContainerStarted","Data":"1d02678e02b016b00d508953cc9c00b74ef93c89ad11eadce23870c1f04d7753"} Nov 28 10:12:29 crc kubenswrapper[4838]: I1128 10:12:29.352312 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-hdb2b" event={"ID":"9e6ea3f2-6774-4bbb-b0fc-fcb8b6106b53","Type":"ContainerStarted","Data":"629ea4cf3d8a9823530599c88e022910a6c768eea11547556dc4c703a0f38a1b"} Nov 28 10:12:29 crc kubenswrapper[4838]: I1128 10:12:29.382797 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-hdb2b" podStartSLOduration=6.637106673 podStartE2EDuration="13.382774921s" podCreationTimestamp="2025-11-28 10:12:16 +0000 UTC" firstStartedPulling="2025-11-28 10:12:17.479593187 +0000 UTC m=+909.178567357" lastFinishedPulling="2025-11-28 10:12:24.225261435 +0000 UTC m=+915.924235605" observedRunningTime="2025-11-28 10:12:29.377295913 +0000 UTC m=+921.076270123" watchObservedRunningTime="2025-11-28 10:12:29.382774921 +0000 UTC m=+921.081749111" Nov 28 10:12:31 crc kubenswrapper[4838]: I1128 10:12:31.242057 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-index-wwzv8"] Nov 28 10:12:31 crc kubenswrapper[4838]: I1128 10:12:31.255498 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-wwzv8"] Nov 28 10:12:31 crc kubenswrapper[4838]: I1128 10:12:31.255610 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-wwzv8" Nov 28 10:12:31 crc kubenswrapper[4838]: I1128 10:12:31.259844 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-45nkx\" (UniqueName: \"kubernetes.io/projected/3f11c928-71e8-45db-8780-9e8977aed314-kube-api-access-45nkx\") pod \"openstack-operator-index-wwzv8\" (UID: \"3f11c928-71e8-45db-8780-9e8977aed314\") " pod="openstack-operators/openstack-operator-index-wwzv8" Nov 28 10:12:31 crc kubenswrapper[4838]: I1128 10:12:31.262646 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-index-dockercfg-b42rl" Nov 28 10:12:31 crc kubenswrapper[4838]: I1128 10:12:31.270045 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"openshift-service-ca.crt" Nov 28 10:12:31 crc kubenswrapper[4838]: I1128 10:12:31.270141 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"kube-root-ca.crt" Nov 28 10:12:31 crc kubenswrapper[4838]: I1128 10:12:31.360439 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-45nkx\" (UniqueName: \"kubernetes.io/projected/3f11c928-71e8-45db-8780-9e8977aed314-kube-api-access-45nkx\") pod \"openstack-operator-index-wwzv8\" (UID: \"3f11c928-71e8-45db-8780-9e8977aed314\") " pod="openstack-operators/openstack-operator-index-wwzv8" Nov 28 10:12:31 crc kubenswrapper[4838]: I1128 10:12:31.383546 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-45nkx\" (UniqueName: \"kubernetes.io/projected/3f11c928-71e8-45db-8780-9e8977aed314-kube-api-access-45nkx\") pod \"openstack-operator-index-wwzv8\" (UID: \"3f11c928-71e8-45db-8780-9e8977aed314\") " pod="openstack-operators/openstack-operator-index-wwzv8" Nov 28 10:12:31 crc kubenswrapper[4838]: I1128 10:12:31.578265 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-wwzv8" Nov 28 10:12:32 crc kubenswrapper[4838]: I1128 10:12:32.068338 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-wwzv8"] Nov 28 10:12:32 crc kubenswrapper[4838]: I1128 10:12:32.361864 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="metallb-system/frr-k8s-hdb2b" Nov 28 10:12:32 crc kubenswrapper[4838]: I1128 10:12:32.373352 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-wwzv8" event={"ID":"3f11c928-71e8-45db-8780-9e8977aed314","Type":"ContainerStarted","Data":"62afe8c6a93d85bb8e2e9a1c21b1976d43fc338b14781b58c7a623ff6c77acef"} Nov 28 10:12:32 crc kubenswrapper[4838]: I1128 10:12:32.437263 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="metallb-system/frr-k8s-hdb2b" Nov 28 10:12:34 crc kubenswrapper[4838]: I1128 10:12:34.386906 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/openstack-operator-index-wwzv8"] Nov 28 10:12:34 crc kubenswrapper[4838]: I1128 10:12:34.997854 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-index-8j4q4"] Nov 28 10:12:34 crc kubenswrapper[4838]: I1128 10:12:34.999025 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-8j4q4" Nov 28 10:12:35 crc kubenswrapper[4838]: I1128 10:12:35.022372 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-8j4q4"] Nov 28 10:12:35 crc kubenswrapper[4838]: I1128 10:12:35.117079 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wc45b\" (UniqueName: \"kubernetes.io/projected/c89c4ded-08e4-4d58-bb51-0f0e997b76a6-kube-api-access-wc45b\") pod \"openstack-operator-index-8j4q4\" (UID: \"c89c4ded-08e4-4d58-bb51-0f0e997b76a6\") " pod="openstack-operators/openstack-operator-index-8j4q4" Nov 28 10:12:35 crc kubenswrapper[4838]: I1128 10:12:35.218624 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wc45b\" (UniqueName: \"kubernetes.io/projected/c89c4ded-08e4-4d58-bb51-0f0e997b76a6-kube-api-access-wc45b\") pod \"openstack-operator-index-8j4q4\" (UID: \"c89c4ded-08e4-4d58-bb51-0f0e997b76a6\") " pod="openstack-operators/openstack-operator-index-8j4q4" Nov 28 10:12:35 crc kubenswrapper[4838]: I1128 10:12:35.241792 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wc45b\" (UniqueName: \"kubernetes.io/projected/c89c4ded-08e4-4d58-bb51-0f0e997b76a6-kube-api-access-wc45b\") pod \"openstack-operator-index-8j4q4\" (UID: \"c89c4ded-08e4-4d58-bb51-0f0e997b76a6\") " pod="openstack-operators/openstack-operator-index-8j4q4" Nov 28 10:12:35 crc kubenswrapper[4838]: I1128 10:12:35.335562 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-8j4q4" Nov 28 10:12:35 crc kubenswrapper[4838]: I1128 10:12:35.418437 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-wwzv8" event={"ID":"3f11c928-71e8-45db-8780-9e8977aed314","Type":"ContainerStarted","Data":"98457e5b872b4bad043f6a3e0f1af5ee07d75f7f0ca16c9843de69dff3df3c96"} Nov 28 10:12:35 crc kubenswrapper[4838]: I1128 10:12:35.418584 4838 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack-operators/openstack-operator-index-wwzv8" podUID="3f11c928-71e8-45db-8780-9e8977aed314" containerName="registry-server" containerID="cri-o://98457e5b872b4bad043f6a3e0f1af5ee07d75f7f0ca16c9843de69dff3df3c96" gracePeriod=2 Nov 28 10:12:35 crc kubenswrapper[4838]: I1128 10:12:35.798979 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-index-wwzv8" podStartSLOduration=2.121444265 podStartE2EDuration="4.79895383s" podCreationTimestamp="2025-11-28 10:12:31 +0000 UTC" firstStartedPulling="2025-11-28 10:12:32.085478203 +0000 UTC m=+923.784452383" lastFinishedPulling="2025-11-28 10:12:34.762987738 +0000 UTC m=+926.461961948" observedRunningTime="2025-11-28 10:12:35.446526465 +0000 UTC m=+927.145500645" watchObservedRunningTime="2025-11-28 10:12:35.79895383 +0000 UTC m=+927.497928040" Nov 28 10:12:35 crc kubenswrapper[4838]: I1128 10:12:35.803918 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-8j4q4"] Nov 28 10:12:35 crc kubenswrapper[4838]: W1128 10:12:35.814916 4838 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc89c4ded_08e4_4d58_bb51_0f0e997b76a6.slice/crio-18252fca6051c39ad7aef244c6f8ad52857a4650c292e94fcfda6cd89c3fe289 WatchSource:0}: Error finding container 18252fca6051c39ad7aef244c6f8ad52857a4650c292e94fcfda6cd89c3fe289: Status 404 returned error can't find the container with id 18252fca6051c39ad7aef244c6f8ad52857a4650c292e94fcfda6cd89c3fe289 Nov 28 10:12:36 crc kubenswrapper[4838]: I1128 10:12:36.429794 4838 generic.go:334] "Generic (PLEG): container finished" podID="3f11c928-71e8-45db-8780-9e8977aed314" containerID="98457e5b872b4bad043f6a3e0f1af5ee07d75f7f0ca16c9843de69dff3df3c96" exitCode=0 Nov 28 10:12:36 crc kubenswrapper[4838]: I1128 10:12:36.429924 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-wwzv8" event={"ID":"3f11c928-71e8-45db-8780-9e8977aed314","Type":"ContainerDied","Data":"98457e5b872b4bad043f6a3e0f1af5ee07d75f7f0ca16c9843de69dff3df3c96"} Nov 28 10:12:36 crc kubenswrapper[4838]: I1128 10:12:36.432262 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-8j4q4" event={"ID":"c89c4ded-08e4-4d58-bb51-0f0e997b76a6","Type":"ContainerStarted","Data":"18252fca6051c39ad7aef244c6f8ad52857a4650c292e94fcfda6cd89c3fe289"} Nov 28 10:12:36 crc kubenswrapper[4838]: I1128 10:12:36.768550 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-8frbg" Nov 28 10:12:36 crc kubenswrapper[4838]: I1128 10:12:36.880032 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/controller-f8648f98b-p4xws" Nov 28 10:12:37 crc kubenswrapper[4838]: I1128 10:12:37.029345 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-wwzv8" Nov 28 10:12:37 crc kubenswrapper[4838]: I1128 10:12:37.047504 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-45nkx\" (UniqueName: \"kubernetes.io/projected/3f11c928-71e8-45db-8780-9e8977aed314-kube-api-access-45nkx\") pod \"3f11c928-71e8-45db-8780-9e8977aed314\" (UID: \"3f11c928-71e8-45db-8780-9e8977aed314\") " Nov 28 10:12:37 crc kubenswrapper[4838]: I1128 10:12:37.059450 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3f11c928-71e8-45db-8780-9e8977aed314-kube-api-access-45nkx" (OuterVolumeSpecName: "kube-api-access-45nkx") pod "3f11c928-71e8-45db-8780-9e8977aed314" (UID: "3f11c928-71e8-45db-8780-9e8977aed314"). InnerVolumeSpecName "kube-api-access-45nkx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:12:37 crc kubenswrapper[4838]: I1128 10:12:37.148943 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-45nkx\" (UniqueName: \"kubernetes.io/projected/3f11c928-71e8-45db-8780-9e8977aed314-kube-api-access-45nkx\") on node \"crc\" DevicePath \"\"" Nov 28 10:12:37 crc kubenswrapper[4838]: I1128 10:12:37.366662 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-hdb2b" Nov 28 10:12:37 crc kubenswrapper[4838]: I1128 10:12:37.442427 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-8j4q4" event={"ID":"c89c4ded-08e4-4d58-bb51-0f0e997b76a6","Type":"ContainerStarted","Data":"2bd038a94019fa9cf1b57516363cfa579f984fad78fefecd7d8a36e3c20f344d"} Nov 28 10:12:37 crc kubenswrapper[4838]: I1128 10:12:37.444737 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-wwzv8" event={"ID":"3f11c928-71e8-45db-8780-9e8977aed314","Type":"ContainerDied","Data":"62afe8c6a93d85bb8e2e9a1c21b1976d43fc338b14781b58c7a623ff6c77acef"} Nov 28 10:12:37 crc kubenswrapper[4838]: I1128 10:12:37.444833 4838 scope.go:117] "RemoveContainer" containerID="98457e5b872b4bad043f6a3e0f1af5ee07d75f7f0ca16c9843de69dff3df3c96" Nov 28 10:12:37 crc kubenswrapper[4838]: I1128 10:12:37.444843 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-wwzv8" Nov 28 10:12:37 crc kubenswrapper[4838]: I1128 10:12:37.472511 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-index-8j4q4" podStartSLOduration=2.737720915 podStartE2EDuration="3.472484393s" podCreationTimestamp="2025-11-28 10:12:34 +0000 UTC" firstStartedPulling="2025-11-28 10:12:35.81933403 +0000 UTC m=+927.518308240" lastFinishedPulling="2025-11-28 10:12:36.554097518 +0000 UTC m=+928.253071718" observedRunningTime="2025-11-28 10:12:37.463178203 +0000 UTC m=+929.162152383" watchObservedRunningTime="2025-11-28 10:12:37.472484393 +0000 UTC m=+929.171458603" Nov 28 10:12:37 crc kubenswrapper[4838]: I1128 10:12:37.498365 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/openstack-operator-index-wwzv8"] Nov 28 10:12:37 crc kubenswrapper[4838]: I1128 10:12:37.506548 4838 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack-operators/openstack-operator-index-wwzv8"] Nov 28 10:12:38 crc kubenswrapper[4838]: I1128 10:12:38.575529 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3f11c928-71e8-45db-8780-9e8977aed314" path="/var/lib/kubelet/pods/3f11c928-71e8-45db-8780-9e8977aed314/volumes" Nov 28 10:12:45 crc kubenswrapper[4838]: I1128 10:12:45.336197 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-index-8j4q4" Nov 28 10:12:45 crc kubenswrapper[4838]: I1128 10:12:45.337340 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack-operators/openstack-operator-index-8j4q4" Nov 28 10:12:45 crc kubenswrapper[4838]: I1128 10:12:45.379366 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack-operators/openstack-operator-index-8j4q4" Nov 28 10:12:45 crc kubenswrapper[4838]: I1128 10:12:45.545205 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-index-8j4q4" Nov 28 10:12:51 crc kubenswrapper[4838]: I1128 10:12:51.647050 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/703c3d28e1f0f4fdc243d52db993b95e20d3eb98fc494e62d3c67e00b725j9s"] Nov 28 10:12:51 crc kubenswrapper[4838]: E1128 10:12:51.647921 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3f11c928-71e8-45db-8780-9e8977aed314" containerName="registry-server" Nov 28 10:12:51 crc kubenswrapper[4838]: I1128 10:12:51.647942 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="3f11c928-71e8-45db-8780-9e8977aed314" containerName="registry-server" Nov 28 10:12:51 crc kubenswrapper[4838]: I1128 10:12:51.648121 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="3f11c928-71e8-45db-8780-9e8977aed314" containerName="registry-server" Nov 28 10:12:51 crc kubenswrapper[4838]: I1128 10:12:51.649571 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/703c3d28e1f0f4fdc243d52db993b95e20d3eb98fc494e62d3c67e00b725j9s" Nov 28 10:12:51 crc kubenswrapper[4838]: I1128 10:12:51.651876 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"default-dockercfg-wg5bs" Nov 28 10:12:51 crc kubenswrapper[4838]: I1128 10:12:51.675249 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/703c3d28e1f0f4fdc243d52db993b95e20d3eb98fc494e62d3c67e00b725j9s"] Nov 28 10:12:51 crc kubenswrapper[4838]: I1128 10:12:51.767230 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6cd6q\" (UniqueName: \"kubernetes.io/projected/b2da9f3e-de9a-430d-887e-6b75fa6133d1-kube-api-access-6cd6q\") pod \"703c3d28e1f0f4fdc243d52db993b95e20d3eb98fc494e62d3c67e00b725j9s\" (UID: \"b2da9f3e-de9a-430d-887e-6b75fa6133d1\") " pod="openstack-operators/703c3d28e1f0f4fdc243d52db993b95e20d3eb98fc494e62d3c67e00b725j9s" Nov 28 10:12:51 crc kubenswrapper[4838]: I1128 10:12:51.767324 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/b2da9f3e-de9a-430d-887e-6b75fa6133d1-bundle\") pod \"703c3d28e1f0f4fdc243d52db993b95e20d3eb98fc494e62d3c67e00b725j9s\" (UID: \"b2da9f3e-de9a-430d-887e-6b75fa6133d1\") " pod="openstack-operators/703c3d28e1f0f4fdc243d52db993b95e20d3eb98fc494e62d3c67e00b725j9s" Nov 28 10:12:51 crc kubenswrapper[4838]: I1128 10:12:51.767380 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/b2da9f3e-de9a-430d-887e-6b75fa6133d1-util\") pod \"703c3d28e1f0f4fdc243d52db993b95e20d3eb98fc494e62d3c67e00b725j9s\" (UID: \"b2da9f3e-de9a-430d-887e-6b75fa6133d1\") " pod="openstack-operators/703c3d28e1f0f4fdc243d52db993b95e20d3eb98fc494e62d3c67e00b725j9s" Nov 28 10:12:51 crc kubenswrapper[4838]: I1128 10:12:51.868520 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/b2da9f3e-de9a-430d-887e-6b75fa6133d1-util\") pod \"703c3d28e1f0f4fdc243d52db993b95e20d3eb98fc494e62d3c67e00b725j9s\" (UID: \"b2da9f3e-de9a-430d-887e-6b75fa6133d1\") " pod="openstack-operators/703c3d28e1f0f4fdc243d52db993b95e20d3eb98fc494e62d3c67e00b725j9s" Nov 28 10:12:51 crc kubenswrapper[4838]: I1128 10:12:51.868639 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6cd6q\" (UniqueName: \"kubernetes.io/projected/b2da9f3e-de9a-430d-887e-6b75fa6133d1-kube-api-access-6cd6q\") pod \"703c3d28e1f0f4fdc243d52db993b95e20d3eb98fc494e62d3c67e00b725j9s\" (UID: \"b2da9f3e-de9a-430d-887e-6b75fa6133d1\") " pod="openstack-operators/703c3d28e1f0f4fdc243d52db993b95e20d3eb98fc494e62d3c67e00b725j9s" Nov 28 10:12:51 crc kubenswrapper[4838]: I1128 10:12:51.868776 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/b2da9f3e-de9a-430d-887e-6b75fa6133d1-bundle\") pod \"703c3d28e1f0f4fdc243d52db993b95e20d3eb98fc494e62d3c67e00b725j9s\" (UID: \"b2da9f3e-de9a-430d-887e-6b75fa6133d1\") " pod="openstack-operators/703c3d28e1f0f4fdc243d52db993b95e20d3eb98fc494e62d3c67e00b725j9s" Nov 28 10:12:51 crc kubenswrapper[4838]: I1128 10:12:51.869402 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/b2da9f3e-de9a-430d-887e-6b75fa6133d1-util\") pod \"703c3d28e1f0f4fdc243d52db993b95e20d3eb98fc494e62d3c67e00b725j9s\" (UID: \"b2da9f3e-de9a-430d-887e-6b75fa6133d1\") " pod="openstack-operators/703c3d28e1f0f4fdc243d52db993b95e20d3eb98fc494e62d3c67e00b725j9s" Nov 28 10:12:51 crc kubenswrapper[4838]: I1128 10:12:51.869495 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/b2da9f3e-de9a-430d-887e-6b75fa6133d1-bundle\") pod \"703c3d28e1f0f4fdc243d52db993b95e20d3eb98fc494e62d3c67e00b725j9s\" (UID: \"b2da9f3e-de9a-430d-887e-6b75fa6133d1\") " pod="openstack-operators/703c3d28e1f0f4fdc243d52db993b95e20d3eb98fc494e62d3c67e00b725j9s" Nov 28 10:12:51 crc kubenswrapper[4838]: I1128 10:12:51.902319 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6cd6q\" (UniqueName: \"kubernetes.io/projected/b2da9f3e-de9a-430d-887e-6b75fa6133d1-kube-api-access-6cd6q\") pod \"703c3d28e1f0f4fdc243d52db993b95e20d3eb98fc494e62d3c67e00b725j9s\" (UID: \"b2da9f3e-de9a-430d-887e-6b75fa6133d1\") " pod="openstack-operators/703c3d28e1f0f4fdc243d52db993b95e20d3eb98fc494e62d3c67e00b725j9s" Nov 28 10:12:52 crc kubenswrapper[4838]: I1128 10:12:52.012528 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/703c3d28e1f0f4fdc243d52db993b95e20d3eb98fc494e62d3c67e00b725j9s" Nov 28 10:12:52 crc kubenswrapper[4838]: I1128 10:12:52.489900 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/703c3d28e1f0f4fdc243d52db993b95e20d3eb98fc494e62d3c67e00b725j9s"] Nov 28 10:12:52 crc kubenswrapper[4838]: I1128 10:12:52.586565 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/703c3d28e1f0f4fdc243d52db993b95e20d3eb98fc494e62d3c67e00b725j9s" event={"ID":"b2da9f3e-de9a-430d-887e-6b75fa6133d1","Type":"ContainerStarted","Data":"0e28d55ddd955177ae191451a6063eb4b7274bc3a6e09a97300d79b05c7959bc"} Nov 28 10:12:53 crc kubenswrapper[4838]: I1128 10:12:53.005332 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-fhhjv"] Nov 28 10:12:53 crc kubenswrapper[4838]: I1128 10:12:53.007815 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-fhhjv" Nov 28 10:12:53 crc kubenswrapper[4838]: I1128 10:12:53.020982 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-fhhjv"] Nov 28 10:12:53 crc kubenswrapper[4838]: I1128 10:12:53.192331 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1f884d00-4676-401d-ba6a-69abf7e1b8a1-catalog-content\") pod \"community-operators-fhhjv\" (UID: \"1f884d00-4676-401d-ba6a-69abf7e1b8a1\") " pod="openshift-marketplace/community-operators-fhhjv" Nov 28 10:12:53 crc kubenswrapper[4838]: I1128 10:12:53.192402 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1f884d00-4676-401d-ba6a-69abf7e1b8a1-utilities\") pod \"community-operators-fhhjv\" (UID: \"1f884d00-4676-401d-ba6a-69abf7e1b8a1\") " pod="openshift-marketplace/community-operators-fhhjv" Nov 28 10:12:53 crc kubenswrapper[4838]: I1128 10:12:53.192464 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lzzng\" (UniqueName: \"kubernetes.io/projected/1f884d00-4676-401d-ba6a-69abf7e1b8a1-kube-api-access-lzzng\") pod \"community-operators-fhhjv\" (UID: \"1f884d00-4676-401d-ba6a-69abf7e1b8a1\") " pod="openshift-marketplace/community-operators-fhhjv" Nov 28 10:12:53 crc kubenswrapper[4838]: I1128 10:12:53.293665 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lzzng\" (UniqueName: \"kubernetes.io/projected/1f884d00-4676-401d-ba6a-69abf7e1b8a1-kube-api-access-lzzng\") pod \"community-operators-fhhjv\" (UID: \"1f884d00-4676-401d-ba6a-69abf7e1b8a1\") " pod="openshift-marketplace/community-operators-fhhjv" Nov 28 10:12:53 crc kubenswrapper[4838]: I1128 10:12:53.293805 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1f884d00-4676-401d-ba6a-69abf7e1b8a1-catalog-content\") pod \"community-operators-fhhjv\" (UID: \"1f884d00-4676-401d-ba6a-69abf7e1b8a1\") " pod="openshift-marketplace/community-operators-fhhjv" Nov 28 10:12:53 crc kubenswrapper[4838]: I1128 10:12:53.293859 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1f884d00-4676-401d-ba6a-69abf7e1b8a1-utilities\") pod \"community-operators-fhhjv\" (UID: \"1f884d00-4676-401d-ba6a-69abf7e1b8a1\") " pod="openshift-marketplace/community-operators-fhhjv" Nov 28 10:12:53 crc kubenswrapper[4838]: I1128 10:12:53.294548 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1f884d00-4676-401d-ba6a-69abf7e1b8a1-utilities\") pod \"community-operators-fhhjv\" (UID: \"1f884d00-4676-401d-ba6a-69abf7e1b8a1\") " pod="openshift-marketplace/community-operators-fhhjv" Nov 28 10:12:53 crc kubenswrapper[4838]: I1128 10:12:53.294601 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1f884d00-4676-401d-ba6a-69abf7e1b8a1-catalog-content\") pod \"community-operators-fhhjv\" (UID: \"1f884d00-4676-401d-ba6a-69abf7e1b8a1\") " pod="openshift-marketplace/community-operators-fhhjv" Nov 28 10:12:53 crc kubenswrapper[4838]: I1128 10:12:53.317794 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lzzng\" (UniqueName: \"kubernetes.io/projected/1f884d00-4676-401d-ba6a-69abf7e1b8a1-kube-api-access-lzzng\") pod \"community-operators-fhhjv\" (UID: \"1f884d00-4676-401d-ba6a-69abf7e1b8a1\") " pod="openshift-marketplace/community-operators-fhhjv" Nov 28 10:12:53 crc kubenswrapper[4838]: I1128 10:12:53.383726 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-fhhjv" Nov 28 10:12:53 crc kubenswrapper[4838]: I1128 10:12:53.584163 4838 generic.go:334] "Generic (PLEG): container finished" podID="b2da9f3e-de9a-430d-887e-6b75fa6133d1" containerID="c47fb7a64106ebbf9f46ada76fd7c0d27494b604ce7126f73fec8ea995479167" exitCode=0 Nov 28 10:12:53 crc kubenswrapper[4838]: I1128 10:12:53.584232 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/703c3d28e1f0f4fdc243d52db993b95e20d3eb98fc494e62d3c67e00b725j9s" event={"ID":"b2da9f3e-de9a-430d-887e-6b75fa6133d1","Type":"ContainerDied","Data":"c47fb7a64106ebbf9f46ada76fd7c0d27494b604ce7126f73fec8ea995479167"} Nov 28 10:12:53 crc kubenswrapper[4838]: I1128 10:12:53.730674 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-fhhjv"] Nov 28 10:12:54 crc kubenswrapper[4838]: I1128 10:12:54.594327 4838 generic.go:334] "Generic (PLEG): container finished" podID="1f884d00-4676-401d-ba6a-69abf7e1b8a1" containerID="e50e8898fac71cad0c513587149f2666346ead5a128bb7eea016812550a06bac" exitCode=0 Nov 28 10:12:54 crc kubenswrapper[4838]: I1128 10:12:54.594419 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-fhhjv" event={"ID":"1f884d00-4676-401d-ba6a-69abf7e1b8a1","Type":"ContainerDied","Data":"e50e8898fac71cad0c513587149f2666346ead5a128bb7eea016812550a06bac"} Nov 28 10:12:54 crc kubenswrapper[4838]: I1128 10:12:54.594788 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-fhhjv" event={"ID":"1f884d00-4676-401d-ba6a-69abf7e1b8a1","Type":"ContainerStarted","Data":"a47d9395063b74ec729d92ba3418e4aedaae9ec56a25e0ac31a58d9af270f919"} Nov 28 10:12:54 crc kubenswrapper[4838]: I1128 10:12:54.597783 4838 generic.go:334] "Generic (PLEG): container finished" podID="b2da9f3e-de9a-430d-887e-6b75fa6133d1" containerID="be8757c3991dadb443d9602349b2f076cc29aeb56bd3e9aa05fa36278ff387ba" exitCode=0 Nov 28 10:12:54 crc kubenswrapper[4838]: I1128 10:12:54.597823 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/703c3d28e1f0f4fdc243d52db993b95e20d3eb98fc494e62d3c67e00b725j9s" event={"ID":"b2da9f3e-de9a-430d-887e-6b75fa6133d1","Type":"ContainerDied","Data":"be8757c3991dadb443d9602349b2f076cc29aeb56bd3e9aa05fa36278ff387ba"} Nov 28 10:12:55 crc kubenswrapper[4838]: I1128 10:12:55.611817 4838 generic.go:334] "Generic (PLEG): container finished" podID="b2da9f3e-de9a-430d-887e-6b75fa6133d1" containerID="8f2bd1de4fed12af10844725c8acf21fdec76947f1ee77b62e2d0ddbe1fb55c9" exitCode=0 Nov 28 10:12:55 crc kubenswrapper[4838]: I1128 10:12:55.611898 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/703c3d28e1f0f4fdc243d52db993b95e20d3eb98fc494e62d3c67e00b725j9s" event={"ID":"b2da9f3e-de9a-430d-887e-6b75fa6133d1","Type":"ContainerDied","Data":"8f2bd1de4fed12af10844725c8acf21fdec76947f1ee77b62e2d0ddbe1fb55c9"} Nov 28 10:12:56 crc kubenswrapper[4838]: I1128 10:12:56.623277 4838 generic.go:334] "Generic (PLEG): container finished" podID="1f884d00-4676-401d-ba6a-69abf7e1b8a1" containerID="e74b83cda413cf588743faab3d1dc144aea65afcb3abfb49f692867171a88723" exitCode=0 Nov 28 10:12:56 crc kubenswrapper[4838]: I1128 10:12:56.623441 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-fhhjv" event={"ID":"1f884d00-4676-401d-ba6a-69abf7e1b8a1","Type":"ContainerDied","Data":"e74b83cda413cf588743faab3d1dc144aea65afcb3abfb49f692867171a88723"} Nov 28 10:12:56 crc kubenswrapper[4838]: I1128 10:12:56.955229 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/703c3d28e1f0f4fdc243d52db993b95e20d3eb98fc494e62d3c67e00b725j9s" Nov 28 10:12:57 crc kubenswrapper[4838]: I1128 10:12:57.156442 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6cd6q\" (UniqueName: \"kubernetes.io/projected/b2da9f3e-de9a-430d-887e-6b75fa6133d1-kube-api-access-6cd6q\") pod \"b2da9f3e-de9a-430d-887e-6b75fa6133d1\" (UID: \"b2da9f3e-de9a-430d-887e-6b75fa6133d1\") " Nov 28 10:12:57 crc kubenswrapper[4838]: I1128 10:12:57.158127 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/b2da9f3e-de9a-430d-887e-6b75fa6133d1-util\") pod \"b2da9f3e-de9a-430d-887e-6b75fa6133d1\" (UID: \"b2da9f3e-de9a-430d-887e-6b75fa6133d1\") " Nov 28 10:12:57 crc kubenswrapper[4838]: I1128 10:12:57.158319 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/b2da9f3e-de9a-430d-887e-6b75fa6133d1-bundle\") pod \"b2da9f3e-de9a-430d-887e-6b75fa6133d1\" (UID: \"b2da9f3e-de9a-430d-887e-6b75fa6133d1\") " Nov 28 10:12:57 crc kubenswrapper[4838]: I1128 10:12:57.160068 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b2da9f3e-de9a-430d-887e-6b75fa6133d1-bundle" (OuterVolumeSpecName: "bundle") pod "b2da9f3e-de9a-430d-887e-6b75fa6133d1" (UID: "b2da9f3e-de9a-430d-887e-6b75fa6133d1"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 10:12:57 crc kubenswrapper[4838]: I1128 10:12:57.165748 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b2da9f3e-de9a-430d-887e-6b75fa6133d1-kube-api-access-6cd6q" (OuterVolumeSpecName: "kube-api-access-6cd6q") pod "b2da9f3e-de9a-430d-887e-6b75fa6133d1" (UID: "b2da9f3e-de9a-430d-887e-6b75fa6133d1"). InnerVolumeSpecName "kube-api-access-6cd6q". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:12:57 crc kubenswrapper[4838]: I1128 10:12:57.198817 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b2da9f3e-de9a-430d-887e-6b75fa6133d1-util" (OuterVolumeSpecName: "util") pod "b2da9f3e-de9a-430d-887e-6b75fa6133d1" (UID: "b2da9f3e-de9a-430d-887e-6b75fa6133d1"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 10:12:57 crc kubenswrapper[4838]: I1128 10:12:57.260917 4838 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/b2da9f3e-de9a-430d-887e-6b75fa6133d1-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 10:12:57 crc kubenswrapper[4838]: I1128 10:12:57.260963 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6cd6q\" (UniqueName: \"kubernetes.io/projected/b2da9f3e-de9a-430d-887e-6b75fa6133d1-kube-api-access-6cd6q\") on node \"crc\" DevicePath \"\"" Nov 28 10:12:57 crc kubenswrapper[4838]: I1128 10:12:57.260984 4838 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/b2da9f3e-de9a-430d-887e-6b75fa6133d1-util\") on node \"crc\" DevicePath \"\"" Nov 28 10:12:57 crc kubenswrapper[4838]: I1128 10:12:57.632863 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-fhhjv" event={"ID":"1f884d00-4676-401d-ba6a-69abf7e1b8a1","Type":"ContainerStarted","Data":"a43e5ef2e3c419091cd4bb307d35293f07ad87517243813652ea6b48bf1170b9"} Nov 28 10:12:57 crc kubenswrapper[4838]: I1128 10:12:57.636664 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/703c3d28e1f0f4fdc243d52db993b95e20d3eb98fc494e62d3c67e00b725j9s" event={"ID":"b2da9f3e-de9a-430d-887e-6b75fa6133d1","Type":"ContainerDied","Data":"0e28d55ddd955177ae191451a6063eb4b7274bc3a6e09a97300d79b05c7959bc"} Nov 28 10:12:57 crc kubenswrapper[4838]: I1128 10:12:57.636707 4838 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0e28d55ddd955177ae191451a6063eb4b7274bc3a6e09a97300d79b05c7959bc" Nov 28 10:12:57 crc kubenswrapper[4838]: I1128 10:12:57.636769 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/703c3d28e1f0f4fdc243d52db993b95e20d3eb98fc494e62d3c67e00b725j9s" Nov 28 10:12:57 crc kubenswrapper[4838]: I1128 10:12:57.655059 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-fhhjv" podStartSLOduration=3.128864434 podStartE2EDuration="5.655037279s" podCreationTimestamp="2025-11-28 10:12:52 +0000 UTC" firstStartedPulling="2025-11-28 10:12:54.59653065 +0000 UTC m=+946.295504860" lastFinishedPulling="2025-11-28 10:12:57.122703505 +0000 UTC m=+948.821677705" observedRunningTime="2025-11-28 10:12:57.652096729 +0000 UTC m=+949.351070919" watchObservedRunningTime="2025-11-28 10:12:57.655037279 +0000 UTC m=+949.354011459" Nov 28 10:13:01 crc kubenswrapper[4838]: I1128 10:13:01.713292 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-controller-operator-5d66f99678-q8k4k"] Nov 28 10:13:01 crc kubenswrapper[4838]: E1128 10:13:01.714132 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b2da9f3e-de9a-430d-887e-6b75fa6133d1" containerName="util" Nov 28 10:13:01 crc kubenswrapper[4838]: I1128 10:13:01.714148 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="b2da9f3e-de9a-430d-887e-6b75fa6133d1" containerName="util" Nov 28 10:13:01 crc kubenswrapper[4838]: E1128 10:13:01.714168 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b2da9f3e-de9a-430d-887e-6b75fa6133d1" containerName="extract" Nov 28 10:13:01 crc kubenswrapper[4838]: I1128 10:13:01.714175 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="b2da9f3e-de9a-430d-887e-6b75fa6133d1" containerName="extract" Nov 28 10:13:01 crc kubenswrapper[4838]: E1128 10:13:01.714188 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b2da9f3e-de9a-430d-887e-6b75fa6133d1" containerName="pull" Nov 28 10:13:01 crc kubenswrapper[4838]: I1128 10:13:01.714197 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="b2da9f3e-de9a-430d-887e-6b75fa6133d1" containerName="pull" Nov 28 10:13:01 crc kubenswrapper[4838]: I1128 10:13:01.714358 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="b2da9f3e-de9a-430d-887e-6b75fa6133d1" containerName="extract" Nov 28 10:13:01 crc kubenswrapper[4838]: I1128 10:13:01.714820 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-operator-5d66f99678-q8k4k" Nov 28 10:13:01 crc kubenswrapper[4838]: I1128 10:13:01.717951 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-operator-dockercfg-rrrk7" Nov 28 10:13:01 crc kubenswrapper[4838]: I1128 10:13:01.751199 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-operator-5d66f99678-q8k4k"] Nov 28 10:13:01 crc kubenswrapper[4838]: I1128 10:13:01.824504 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8l4h8\" (UniqueName: \"kubernetes.io/projected/50baf569-8340-4264-8e08-28049728c9ad-kube-api-access-8l4h8\") pod \"openstack-operator-controller-operator-5d66f99678-q8k4k\" (UID: \"50baf569-8340-4264-8e08-28049728c9ad\") " pod="openstack-operators/openstack-operator-controller-operator-5d66f99678-q8k4k" Nov 28 10:13:01 crc kubenswrapper[4838]: I1128 10:13:01.925746 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8l4h8\" (UniqueName: \"kubernetes.io/projected/50baf569-8340-4264-8e08-28049728c9ad-kube-api-access-8l4h8\") pod \"openstack-operator-controller-operator-5d66f99678-q8k4k\" (UID: \"50baf569-8340-4264-8e08-28049728c9ad\") " pod="openstack-operators/openstack-operator-controller-operator-5d66f99678-q8k4k" Nov 28 10:13:01 crc kubenswrapper[4838]: I1128 10:13:01.958333 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8l4h8\" (UniqueName: \"kubernetes.io/projected/50baf569-8340-4264-8e08-28049728c9ad-kube-api-access-8l4h8\") pod \"openstack-operator-controller-operator-5d66f99678-q8k4k\" (UID: \"50baf569-8340-4264-8e08-28049728c9ad\") " pod="openstack-operators/openstack-operator-controller-operator-5d66f99678-q8k4k" Nov 28 10:13:02 crc kubenswrapper[4838]: I1128 10:13:02.035127 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-operator-5d66f99678-q8k4k" Nov 28 10:13:02 crc kubenswrapper[4838]: I1128 10:13:02.257465 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-operator-5d66f99678-q8k4k"] Nov 28 10:13:02 crc kubenswrapper[4838]: I1128 10:13:02.674641 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-operator-5d66f99678-q8k4k" event={"ID":"50baf569-8340-4264-8e08-28049728c9ad","Type":"ContainerStarted","Data":"0c4510bcfab9d0cc75edc567727190b8595156bba0ec914303857fd9671fe8cf"} Nov 28 10:13:03 crc kubenswrapper[4838]: I1128 10:13:03.385397 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-fhhjv" Nov 28 10:13:03 crc kubenswrapper[4838]: I1128 10:13:03.385438 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-fhhjv" Nov 28 10:13:03 crc kubenswrapper[4838]: I1128 10:13:03.434153 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-fhhjv" Nov 28 10:13:03 crc kubenswrapper[4838]: I1128 10:13:03.736257 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-fhhjv" Nov 28 10:13:05 crc kubenswrapper[4838]: I1128 10:13:05.787545 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-fhhjv"] Nov 28 10:13:05 crc kubenswrapper[4838]: I1128 10:13:05.788168 4838 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-fhhjv" podUID="1f884d00-4676-401d-ba6a-69abf7e1b8a1" containerName="registry-server" containerID="cri-o://a43e5ef2e3c419091cd4bb307d35293f07ad87517243813652ea6b48bf1170b9" gracePeriod=2 Nov 28 10:13:06 crc kubenswrapper[4838]: I1128 10:13:06.700526 4838 generic.go:334] "Generic (PLEG): container finished" podID="1f884d00-4676-401d-ba6a-69abf7e1b8a1" containerID="a43e5ef2e3c419091cd4bb307d35293f07ad87517243813652ea6b48bf1170b9" exitCode=0 Nov 28 10:13:06 crc kubenswrapper[4838]: I1128 10:13:06.700573 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-fhhjv" event={"ID":"1f884d00-4676-401d-ba6a-69abf7e1b8a1","Type":"ContainerDied","Data":"a43e5ef2e3c419091cd4bb307d35293f07ad87517243813652ea6b48bf1170b9"} Nov 28 10:13:07 crc kubenswrapper[4838]: I1128 10:13:07.867780 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-fhhjv" Nov 28 10:13:08 crc kubenswrapper[4838]: I1128 10:13:08.032898 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1f884d00-4676-401d-ba6a-69abf7e1b8a1-utilities\") pod \"1f884d00-4676-401d-ba6a-69abf7e1b8a1\" (UID: \"1f884d00-4676-401d-ba6a-69abf7e1b8a1\") " Nov 28 10:13:08 crc kubenswrapper[4838]: I1128 10:13:08.032998 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lzzng\" (UniqueName: \"kubernetes.io/projected/1f884d00-4676-401d-ba6a-69abf7e1b8a1-kube-api-access-lzzng\") pod \"1f884d00-4676-401d-ba6a-69abf7e1b8a1\" (UID: \"1f884d00-4676-401d-ba6a-69abf7e1b8a1\") " Nov 28 10:13:08 crc kubenswrapper[4838]: I1128 10:13:08.033055 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1f884d00-4676-401d-ba6a-69abf7e1b8a1-catalog-content\") pod \"1f884d00-4676-401d-ba6a-69abf7e1b8a1\" (UID: \"1f884d00-4676-401d-ba6a-69abf7e1b8a1\") " Nov 28 10:13:08 crc kubenswrapper[4838]: I1128 10:13:08.034019 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1f884d00-4676-401d-ba6a-69abf7e1b8a1-utilities" (OuterVolumeSpecName: "utilities") pod "1f884d00-4676-401d-ba6a-69abf7e1b8a1" (UID: "1f884d00-4676-401d-ba6a-69abf7e1b8a1"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 10:13:08 crc kubenswrapper[4838]: I1128 10:13:08.049395 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1f884d00-4676-401d-ba6a-69abf7e1b8a1-kube-api-access-lzzng" (OuterVolumeSpecName: "kube-api-access-lzzng") pod "1f884d00-4676-401d-ba6a-69abf7e1b8a1" (UID: "1f884d00-4676-401d-ba6a-69abf7e1b8a1"). InnerVolumeSpecName "kube-api-access-lzzng". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:13:08 crc kubenswrapper[4838]: I1128 10:13:08.093161 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1f884d00-4676-401d-ba6a-69abf7e1b8a1-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1f884d00-4676-401d-ba6a-69abf7e1b8a1" (UID: "1f884d00-4676-401d-ba6a-69abf7e1b8a1"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 10:13:08 crc kubenswrapper[4838]: I1128 10:13:08.134528 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lzzng\" (UniqueName: \"kubernetes.io/projected/1f884d00-4676-401d-ba6a-69abf7e1b8a1-kube-api-access-lzzng\") on node \"crc\" DevicePath \"\"" Nov 28 10:13:08 crc kubenswrapper[4838]: I1128 10:13:08.134566 4838 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1f884d00-4676-401d-ba6a-69abf7e1b8a1-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 10:13:08 crc kubenswrapper[4838]: I1128 10:13:08.134578 4838 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1f884d00-4676-401d-ba6a-69abf7e1b8a1-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 10:13:08 crc kubenswrapper[4838]: I1128 10:13:08.716146 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-fhhjv" event={"ID":"1f884d00-4676-401d-ba6a-69abf7e1b8a1","Type":"ContainerDied","Data":"a47d9395063b74ec729d92ba3418e4aedaae9ec56a25e0ac31a58d9af270f919"} Nov 28 10:13:08 crc kubenswrapper[4838]: I1128 10:13:08.716466 4838 scope.go:117] "RemoveContainer" containerID="a43e5ef2e3c419091cd4bb307d35293f07ad87517243813652ea6b48bf1170b9" Nov 28 10:13:08 crc kubenswrapper[4838]: I1128 10:13:08.716634 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-fhhjv" Nov 28 10:13:08 crc kubenswrapper[4838]: I1128 10:13:08.720827 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-operator-5d66f99678-q8k4k" event={"ID":"50baf569-8340-4264-8e08-28049728c9ad","Type":"ContainerStarted","Data":"c7c86491be7ef90ec0332743ff6201d5a6e197b4ac0e6f4944305118efd8e537"} Nov 28 10:13:08 crc kubenswrapper[4838]: I1128 10:13:08.721029 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-operator-5d66f99678-q8k4k" Nov 28 10:13:08 crc kubenswrapper[4838]: I1128 10:13:08.734373 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-fhhjv"] Nov 28 10:13:08 crc kubenswrapper[4838]: I1128 10:13:08.734815 4838 scope.go:117] "RemoveContainer" containerID="e74b83cda413cf588743faab3d1dc144aea65afcb3abfb49f692867171a88723" Nov 28 10:13:08 crc kubenswrapper[4838]: I1128 10:13:08.740692 4838 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-fhhjv"] Nov 28 10:13:08 crc kubenswrapper[4838]: I1128 10:13:08.751079 4838 scope.go:117] "RemoveContainer" containerID="e50e8898fac71cad0c513587149f2666346ead5a128bb7eea016812550a06bac" Nov 28 10:13:08 crc kubenswrapper[4838]: I1128 10:13:08.770173 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-controller-operator-5d66f99678-q8k4k" podStartSLOduration=1.8911908739999999 podStartE2EDuration="7.770155709s" podCreationTimestamp="2025-11-28 10:13:01 +0000 UTC" firstStartedPulling="2025-11-28 10:13:02.271883407 +0000 UTC m=+953.970857597" lastFinishedPulling="2025-11-28 10:13:08.150848262 +0000 UTC m=+959.849822432" observedRunningTime="2025-11-28 10:13:08.768465513 +0000 UTC m=+960.467439713" watchObservedRunningTime="2025-11-28 10:13:08.770155709 +0000 UTC m=+960.469129899" Nov 28 10:13:10 crc kubenswrapper[4838]: I1128 10:13:10.577508 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1f884d00-4676-401d-ba6a-69abf7e1b8a1" path="/var/lib/kubelet/pods/1f884d00-4676-401d-ba6a-69abf7e1b8a1/volumes" Nov 28 10:13:10 crc kubenswrapper[4838]: I1128 10:13:10.993474 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-7qqnm"] Nov 28 10:13:10 crc kubenswrapper[4838]: E1128 10:13:10.993702 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1f884d00-4676-401d-ba6a-69abf7e1b8a1" containerName="extract-utilities" Nov 28 10:13:10 crc kubenswrapper[4838]: I1128 10:13:10.993733 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="1f884d00-4676-401d-ba6a-69abf7e1b8a1" containerName="extract-utilities" Nov 28 10:13:10 crc kubenswrapper[4838]: E1128 10:13:10.993740 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1f884d00-4676-401d-ba6a-69abf7e1b8a1" containerName="extract-content" Nov 28 10:13:10 crc kubenswrapper[4838]: I1128 10:13:10.993746 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="1f884d00-4676-401d-ba6a-69abf7e1b8a1" containerName="extract-content" Nov 28 10:13:10 crc kubenswrapper[4838]: E1128 10:13:10.993759 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1f884d00-4676-401d-ba6a-69abf7e1b8a1" containerName="registry-server" Nov 28 10:13:10 crc kubenswrapper[4838]: I1128 10:13:10.993765 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="1f884d00-4676-401d-ba6a-69abf7e1b8a1" containerName="registry-server" Nov 28 10:13:10 crc kubenswrapper[4838]: I1128 10:13:10.993869 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="1f884d00-4676-401d-ba6a-69abf7e1b8a1" containerName="registry-server" Nov 28 10:13:10 crc kubenswrapper[4838]: I1128 10:13:10.994605 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7qqnm" Nov 28 10:13:11 crc kubenswrapper[4838]: I1128 10:13:11.017493 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-7qqnm"] Nov 28 10:13:11 crc kubenswrapper[4838]: I1128 10:13:11.171739 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3c89f373-4419-44d3-8339-7c41bb248c63-utilities\") pod \"certified-operators-7qqnm\" (UID: \"3c89f373-4419-44d3-8339-7c41bb248c63\") " pod="openshift-marketplace/certified-operators-7qqnm" Nov 28 10:13:11 crc kubenswrapper[4838]: I1128 10:13:11.171779 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3c89f373-4419-44d3-8339-7c41bb248c63-catalog-content\") pod \"certified-operators-7qqnm\" (UID: \"3c89f373-4419-44d3-8339-7c41bb248c63\") " pod="openshift-marketplace/certified-operators-7qqnm" Nov 28 10:13:11 crc kubenswrapper[4838]: I1128 10:13:11.171858 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bdw2l\" (UniqueName: \"kubernetes.io/projected/3c89f373-4419-44d3-8339-7c41bb248c63-kube-api-access-bdw2l\") pod \"certified-operators-7qqnm\" (UID: \"3c89f373-4419-44d3-8339-7c41bb248c63\") " pod="openshift-marketplace/certified-operators-7qqnm" Nov 28 10:13:11 crc kubenswrapper[4838]: I1128 10:13:11.283570 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bdw2l\" (UniqueName: \"kubernetes.io/projected/3c89f373-4419-44d3-8339-7c41bb248c63-kube-api-access-bdw2l\") pod \"certified-operators-7qqnm\" (UID: \"3c89f373-4419-44d3-8339-7c41bb248c63\") " pod="openshift-marketplace/certified-operators-7qqnm" Nov 28 10:13:11 crc kubenswrapper[4838]: I1128 10:13:11.283889 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3c89f373-4419-44d3-8339-7c41bb248c63-utilities\") pod \"certified-operators-7qqnm\" (UID: \"3c89f373-4419-44d3-8339-7c41bb248c63\") " pod="openshift-marketplace/certified-operators-7qqnm" Nov 28 10:13:11 crc kubenswrapper[4838]: I1128 10:13:11.283935 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3c89f373-4419-44d3-8339-7c41bb248c63-catalog-content\") pod \"certified-operators-7qqnm\" (UID: \"3c89f373-4419-44d3-8339-7c41bb248c63\") " pod="openshift-marketplace/certified-operators-7qqnm" Nov 28 10:13:11 crc kubenswrapper[4838]: I1128 10:13:11.284559 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3c89f373-4419-44d3-8339-7c41bb248c63-catalog-content\") pod \"certified-operators-7qqnm\" (UID: \"3c89f373-4419-44d3-8339-7c41bb248c63\") " pod="openshift-marketplace/certified-operators-7qqnm" Nov 28 10:13:11 crc kubenswrapper[4838]: I1128 10:13:11.284683 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3c89f373-4419-44d3-8339-7c41bb248c63-utilities\") pod \"certified-operators-7qqnm\" (UID: \"3c89f373-4419-44d3-8339-7c41bb248c63\") " pod="openshift-marketplace/certified-operators-7qqnm" Nov 28 10:13:11 crc kubenswrapper[4838]: I1128 10:13:11.303921 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bdw2l\" (UniqueName: \"kubernetes.io/projected/3c89f373-4419-44d3-8339-7c41bb248c63-kube-api-access-bdw2l\") pod \"certified-operators-7qqnm\" (UID: \"3c89f373-4419-44d3-8339-7c41bb248c63\") " pod="openshift-marketplace/certified-operators-7qqnm" Nov 28 10:13:11 crc kubenswrapper[4838]: I1128 10:13:11.327224 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7qqnm" Nov 28 10:13:11 crc kubenswrapper[4838]: I1128 10:13:11.768541 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-7qqnm"] Nov 28 10:13:11 crc kubenswrapper[4838]: W1128 10:13:11.775890 4838 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3c89f373_4419_44d3_8339_7c41bb248c63.slice/crio-7174e7e417a9bc827e3a7d7932f39e359ca4201e51b79a5aa32451d1178815d4 WatchSource:0}: Error finding container 7174e7e417a9bc827e3a7d7932f39e359ca4201e51b79a5aa32451d1178815d4: Status 404 returned error can't find the container with id 7174e7e417a9bc827e3a7d7932f39e359ca4201e51b79a5aa32451d1178815d4 Nov 28 10:13:12 crc kubenswrapper[4838]: I1128 10:13:12.749584 4838 generic.go:334] "Generic (PLEG): container finished" podID="3c89f373-4419-44d3-8339-7c41bb248c63" containerID="9898971db08b95b66f43d3268f71945dd459ff606926c9f64b59a81e476be1ef" exitCode=0 Nov 28 10:13:12 crc kubenswrapper[4838]: I1128 10:13:12.749635 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-7qqnm" event={"ID":"3c89f373-4419-44d3-8339-7c41bb248c63","Type":"ContainerDied","Data":"9898971db08b95b66f43d3268f71945dd459ff606926c9f64b59a81e476be1ef"} Nov 28 10:13:12 crc kubenswrapper[4838]: I1128 10:13:12.749903 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-7qqnm" event={"ID":"3c89f373-4419-44d3-8339-7c41bb248c63","Type":"ContainerStarted","Data":"7174e7e417a9bc827e3a7d7932f39e359ca4201e51b79a5aa32451d1178815d4"} Nov 28 10:13:13 crc kubenswrapper[4838]: I1128 10:13:13.757379 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-7qqnm" event={"ID":"3c89f373-4419-44d3-8339-7c41bb248c63","Type":"ContainerStarted","Data":"4a76466fe70913a3319b76339a4e880b814f2792988b2d44830fb4f7f1f30b31"} Nov 28 10:13:14 crc kubenswrapper[4838]: I1128 10:13:14.768776 4838 generic.go:334] "Generic (PLEG): container finished" podID="3c89f373-4419-44d3-8339-7c41bb248c63" containerID="4a76466fe70913a3319b76339a4e880b814f2792988b2d44830fb4f7f1f30b31" exitCode=0 Nov 28 10:13:14 crc kubenswrapper[4838]: I1128 10:13:14.768854 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-7qqnm" event={"ID":"3c89f373-4419-44d3-8339-7c41bb248c63","Type":"ContainerDied","Data":"4a76466fe70913a3319b76339a4e880b814f2792988b2d44830fb4f7f1f30b31"} Nov 28 10:13:15 crc kubenswrapper[4838]: I1128 10:13:15.778828 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-7qqnm" event={"ID":"3c89f373-4419-44d3-8339-7c41bb248c63","Type":"ContainerStarted","Data":"ea0b4f3ba5db301ed3353191126a3685b8168833e082d6241d7db9242c5f279c"} Nov 28 10:13:15 crc kubenswrapper[4838]: I1128 10:13:15.808153 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-7qqnm" podStartSLOduration=3.305987354 podStartE2EDuration="5.808132993s" podCreationTimestamp="2025-11-28 10:13:10 +0000 UTC" firstStartedPulling="2025-11-28 10:13:12.750961529 +0000 UTC m=+964.449935699" lastFinishedPulling="2025-11-28 10:13:15.253107168 +0000 UTC m=+966.952081338" observedRunningTime="2025-11-28 10:13:15.803730275 +0000 UTC m=+967.502704445" watchObservedRunningTime="2025-11-28 10:13:15.808132993 +0000 UTC m=+967.507107173" Nov 28 10:13:18 crc kubenswrapper[4838]: I1128 10:13:18.394649 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-l927l"] Nov 28 10:13:18 crc kubenswrapper[4838]: I1128 10:13:18.396297 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-l927l" Nov 28 10:13:18 crc kubenswrapper[4838]: I1128 10:13:18.425546 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-l927l"] Nov 28 10:13:18 crc kubenswrapper[4838]: I1128 10:13:18.484273 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z4xmr\" (UniqueName: \"kubernetes.io/projected/c3e77e1e-b7a8-41bd-bd41-251f25659691-kube-api-access-z4xmr\") pod \"redhat-marketplace-l927l\" (UID: \"c3e77e1e-b7a8-41bd-bd41-251f25659691\") " pod="openshift-marketplace/redhat-marketplace-l927l" Nov 28 10:13:18 crc kubenswrapper[4838]: I1128 10:13:18.484313 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c3e77e1e-b7a8-41bd-bd41-251f25659691-catalog-content\") pod \"redhat-marketplace-l927l\" (UID: \"c3e77e1e-b7a8-41bd-bd41-251f25659691\") " pod="openshift-marketplace/redhat-marketplace-l927l" Nov 28 10:13:18 crc kubenswrapper[4838]: I1128 10:13:18.484361 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c3e77e1e-b7a8-41bd-bd41-251f25659691-utilities\") pod \"redhat-marketplace-l927l\" (UID: \"c3e77e1e-b7a8-41bd-bd41-251f25659691\") " pod="openshift-marketplace/redhat-marketplace-l927l" Nov 28 10:13:18 crc kubenswrapper[4838]: I1128 10:13:18.585940 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z4xmr\" (UniqueName: \"kubernetes.io/projected/c3e77e1e-b7a8-41bd-bd41-251f25659691-kube-api-access-z4xmr\") pod \"redhat-marketplace-l927l\" (UID: \"c3e77e1e-b7a8-41bd-bd41-251f25659691\") " pod="openshift-marketplace/redhat-marketplace-l927l" Nov 28 10:13:18 crc kubenswrapper[4838]: I1128 10:13:18.585997 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c3e77e1e-b7a8-41bd-bd41-251f25659691-catalog-content\") pod \"redhat-marketplace-l927l\" (UID: \"c3e77e1e-b7a8-41bd-bd41-251f25659691\") " pod="openshift-marketplace/redhat-marketplace-l927l" Nov 28 10:13:18 crc kubenswrapper[4838]: I1128 10:13:18.586085 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c3e77e1e-b7a8-41bd-bd41-251f25659691-utilities\") pod \"redhat-marketplace-l927l\" (UID: \"c3e77e1e-b7a8-41bd-bd41-251f25659691\") " pod="openshift-marketplace/redhat-marketplace-l927l" Nov 28 10:13:18 crc kubenswrapper[4838]: I1128 10:13:18.586740 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c3e77e1e-b7a8-41bd-bd41-251f25659691-utilities\") pod \"redhat-marketplace-l927l\" (UID: \"c3e77e1e-b7a8-41bd-bd41-251f25659691\") " pod="openshift-marketplace/redhat-marketplace-l927l" Nov 28 10:13:18 crc kubenswrapper[4838]: I1128 10:13:18.587235 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c3e77e1e-b7a8-41bd-bd41-251f25659691-catalog-content\") pod \"redhat-marketplace-l927l\" (UID: \"c3e77e1e-b7a8-41bd-bd41-251f25659691\") " pod="openshift-marketplace/redhat-marketplace-l927l" Nov 28 10:13:18 crc kubenswrapper[4838]: I1128 10:13:18.621383 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z4xmr\" (UniqueName: \"kubernetes.io/projected/c3e77e1e-b7a8-41bd-bd41-251f25659691-kube-api-access-z4xmr\") pod \"redhat-marketplace-l927l\" (UID: \"c3e77e1e-b7a8-41bd-bd41-251f25659691\") " pod="openshift-marketplace/redhat-marketplace-l927l" Nov 28 10:13:18 crc kubenswrapper[4838]: I1128 10:13:18.735385 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-l927l" Nov 28 10:13:19 crc kubenswrapper[4838]: I1128 10:13:19.200235 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-l927l"] Nov 28 10:13:19 crc kubenswrapper[4838]: I1128 10:13:19.813039 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-l927l" event={"ID":"c3e77e1e-b7a8-41bd-bd41-251f25659691","Type":"ContainerStarted","Data":"8fd680d76e0ceb5fd04cf2ff206e63fa64abd1e7fce71f75d361fe6edb63e5af"} Nov 28 10:13:20 crc kubenswrapper[4838]: I1128 10:13:20.820158 4838 generic.go:334] "Generic (PLEG): container finished" podID="c3e77e1e-b7a8-41bd-bd41-251f25659691" containerID="6e987c1d3ba28fd444c0b39ed6632acc8011000fdbc590658c282827e238e6e5" exitCode=0 Nov 28 10:13:20 crc kubenswrapper[4838]: I1128 10:13:20.820203 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-l927l" event={"ID":"c3e77e1e-b7a8-41bd-bd41-251f25659691","Type":"ContainerDied","Data":"6e987c1d3ba28fd444c0b39ed6632acc8011000fdbc590658c282827e238e6e5"} Nov 28 10:13:21 crc kubenswrapper[4838]: I1128 10:13:21.330121 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-7qqnm" Nov 28 10:13:21 crc kubenswrapper[4838]: I1128 10:13:21.330638 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-7qqnm" Nov 28 10:13:21 crc kubenswrapper[4838]: I1128 10:13:21.408900 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-7qqnm" Nov 28 10:13:21 crc kubenswrapper[4838]: I1128 10:13:21.892633 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-7qqnm" Nov 28 10:13:22 crc kubenswrapper[4838]: I1128 10:13:22.038870 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-operator-5d66f99678-q8k4k" Nov 28 10:13:22 crc kubenswrapper[4838]: I1128 10:13:22.835380 4838 generic.go:334] "Generic (PLEG): container finished" podID="c3e77e1e-b7a8-41bd-bd41-251f25659691" containerID="5ad366f898628bebb693c14b9868afb3d599a86c77fbf4f5157e5e888e0897c7" exitCode=0 Nov 28 10:13:22 crc kubenswrapper[4838]: I1128 10:13:22.835453 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-l927l" event={"ID":"c3e77e1e-b7a8-41bd-bd41-251f25659691","Type":"ContainerDied","Data":"5ad366f898628bebb693c14b9868afb3d599a86c77fbf4f5157e5e888e0897c7"} Nov 28 10:13:23 crc kubenswrapper[4838]: I1128 10:13:23.801281 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-7qqnm"] Nov 28 10:13:23 crc kubenswrapper[4838]: I1128 10:13:23.844229 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-l927l" event={"ID":"c3e77e1e-b7a8-41bd-bd41-251f25659691","Type":"ContainerStarted","Data":"4d7a3d3620a9850f026d1ca497b0aa618fb51e36c3dfff1baadb551529e0bb86"} Nov 28 10:13:23 crc kubenswrapper[4838]: I1128 10:13:23.844399 4838 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-7qqnm" podUID="3c89f373-4419-44d3-8339-7c41bb248c63" containerName="registry-server" containerID="cri-o://ea0b4f3ba5db301ed3353191126a3685b8168833e082d6241d7db9242c5f279c" gracePeriod=2 Nov 28 10:13:23 crc kubenswrapper[4838]: I1128 10:13:23.872831 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-l927l" podStartSLOduration=3.4611609 podStartE2EDuration="5.87281047s" podCreationTimestamp="2025-11-28 10:13:18 +0000 UTC" firstStartedPulling="2025-11-28 10:13:20.821951217 +0000 UTC m=+972.520925397" lastFinishedPulling="2025-11-28 10:13:23.233600787 +0000 UTC m=+974.932574967" observedRunningTime="2025-11-28 10:13:23.872177393 +0000 UTC m=+975.571151573" watchObservedRunningTime="2025-11-28 10:13:23.87281047 +0000 UTC m=+975.571784650" Nov 28 10:13:24 crc kubenswrapper[4838]: I1128 10:13:24.233111 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7qqnm" Nov 28 10:13:24 crc kubenswrapper[4838]: I1128 10:13:24.267204 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3c89f373-4419-44d3-8339-7c41bb248c63-utilities\") pod \"3c89f373-4419-44d3-8339-7c41bb248c63\" (UID: \"3c89f373-4419-44d3-8339-7c41bb248c63\") " Nov 28 10:13:24 crc kubenswrapper[4838]: I1128 10:13:24.267290 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bdw2l\" (UniqueName: \"kubernetes.io/projected/3c89f373-4419-44d3-8339-7c41bb248c63-kube-api-access-bdw2l\") pod \"3c89f373-4419-44d3-8339-7c41bb248c63\" (UID: \"3c89f373-4419-44d3-8339-7c41bb248c63\") " Nov 28 10:13:24 crc kubenswrapper[4838]: I1128 10:13:24.267346 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3c89f373-4419-44d3-8339-7c41bb248c63-catalog-content\") pod \"3c89f373-4419-44d3-8339-7c41bb248c63\" (UID: \"3c89f373-4419-44d3-8339-7c41bb248c63\") " Nov 28 10:13:24 crc kubenswrapper[4838]: I1128 10:13:24.268461 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3c89f373-4419-44d3-8339-7c41bb248c63-utilities" (OuterVolumeSpecName: "utilities") pod "3c89f373-4419-44d3-8339-7c41bb248c63" (UID: "3c89f373-4419-44d3-8339-7c41bb248c63"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 10:13:24 crc kubenswrapper[4838]: I1128 10:13:24.286527 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3c89f373-4419-44d3-8339-7c41bb248c63-kube-api-access-bdw2l" (OuterVolumeSpecName: "kube-api-access-bdw2l") pod "3c89f373-4419-44d3-8339-7c41bb248c63" (UID: "3c89f373-4419-44d3-8339-7c41bb248c63"). InnerVolumeSpecName "kube-api-access-bdw2l". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:13:24 crc kubenswrapper[4838]: I1128 10:13:24.368827 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bdw2l\" (UniqueName: \"kubernetes.io/projected/3c89f373-4419-44d3-8339-7c41bb248c63-kube-api-access-bdw2l\") on node \"crc\" DevicePath \"\"" Nov 28 10:13:24 crc kubenswrapper[4838]: I1128 10:13:24.369176 4838 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3c89f373-4419-44d3-8339-7c41bb248c63-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 10:13:24 crc kubenswrapper[4838]: I1128 10:13:24.587891 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3c89f373-4419-44d3-8339-7c41bb248c63-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "3c89f373-4419-44d3-8339-7c41bb248c63" (UID: "3c89f373-4419-44d3-8339-7c41bb248c63"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 10:13:24 crc kubenswrapper[4838]: I1128 10:13:24.675495 4838 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3c89f373-4419-44d3-8339-7c41bb248c63-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 10:13:24 crc kubenswrapper[4838]: I1128 10:13:24.854937 4838 generic.go:334] "Generic (PLEG): container finished" podID="3c89f373-4419-44d3-8339-7c41bb248c63" containerID="ea0b4f3ba5db301ed3353191126a3685b8168833e082d6241d7db9242c5f279c" exitCode=0 Nov 28 10:13:24 crc kubenswrapper[4838]: I1128 10:13:24.855009 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-7qqnm" event={"ID":"3c89f373-4419-44d3-8339-7c41bb248c63","Type":"ContainerDied","Data":"ea0b4f3ba5db301ed3353191126a3685b8168833e082d6241d7db9242c5f279c"} Nov 28 10:13:24 crc kubenswrapper[4838]: I1128 10:13:24.855091 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-7qqnm" event={"ID":"3c89f373-4419-44d3-8339-7c41bb248c63","Type":"ContainerDied","Data":"7174e7e417a9bc827e3a7d7932f39e359ca4201e51b79a5aa32451d1178815d4"} Nov 28 10:13:24 crc kubenswrapper[4838]: I1128 10:13:24.855101 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7qqnm" Nov 28 10:13:24 crc kubenswrapper[4838]: I1128 10:13:24.855148 4838 scope.go:117] "RemoveContainer" containerID="ea0b4f3ba5db301ed3353191126a3685b8168833e082d6241d7db9242c5f279c" Nov 28 10:13:24 crc kubenswrapper[4838]: I1128 10:13:24.878847 4838 scope.go:117] "RemoveContainer" containerID="4a76466fe70913a3319b76339a4e880b814f2792988b2d44830fb4f7f1f30b31" Nov 28 10:13:24 crc kubenswrapper[4838]: I1128 10:13:24.904005 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-7qqnm"] Nov 28 10:13:24 crc kubenswrapper[4838]: I1128 10:13:24.910086 4838 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-7qqnm"] Nov 28 10:13:24 crc kubenswrapper[4838]: I1128 10:13:24.912379 4838 scope.go:117] "RemoveContainer" containerID="9898971db08b95b66f43d3268f71945dd459ff606926c9f64b59a81e476be1ef" Nov 28 10:13:24 crc kubenswrapper[4838]: I1128 10:13:24.931448 4838 scope.go:117] "RemoveContainer" containerID="ea0b4f3ba5db301ed3353191126a3685b8168833e082d6241d7db9242c5f279c" Nov 28 10:13:24 crc kubenswrapper[4838]: E1128 10:13:24.931845 4838 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ea0b4f3ba5db301ed3353191126a3685b8168833e082d6241d7db9242c5f279c\": container with ID starting with ea0b4f3ba5db301ed3353191126a3685b8168833e082d6241d7db9242c5f279c not found: ID does not exist" containerID="ea0b4f3ba5db301ed3353191126a3685b8168833e082d6241d7db9242c5f279c" Nov 28 10:13:24 crc kubenswrapper[4838]: I1128 10:13:24.931899 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ea0b4f3ba5db301ed3353191126a3685b8168833e082d6241d7db9242c5f279c"} err="failed to get container status \"ea0b4f3ba5db301ed3353191126a3685b8168833e082d6241d7db9242c5f279c\": rpc error: code = NotFound desc = could not find container \"ea0b4f3ba5db301ed3353191126a3685b8168833e082d6241d7db9242c5f279c\": container with ID starting with ea0b4f3ba5db301ed3353191126a3685b8168833e082d6241d7db9242c5f279c not found: ID does not exist" Nov 28 10:13:24 crc kubenswrapper[4838]: I1128 10:13:24.931923 4838 scope.go:117] "RemoveContainer" containerID="4a76466fe70913a3319b76339a4e880b814f2792988b2d44830fb4f7f1f30b31" Nov 28 10:13:24 crc kubenswrapper[4838]: E1128 10:13:24.932158 4838 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4a76466fe70913a3319b76339a4e880b814f2792988b2d44830fb4f7f1f30b31\": container with ID starting with 4a76466fe70913a3319b76339a4e880b814f2792988b2d44830fb4f7f1f30b31 not found: ID does not exist" containerID="4a76466fe70913a3319b76339a4e880b814f2792988b2d44830fb4f7f1f30b31" Nov 28 10:13:24 crc kubenswrapper[4838]: I1128 10:13:24.932197 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4a76466fe70913a3319b76339a4e880b814f2792988b2d44830fb4f7f1f30b31"} err="failed to get container status \"4a76466fe70913a3319b76339a4e880b814f2792988b2d44830fb4f7f1f30b31\": rpc error: code = NotFound desc = could not find container \"4a76466fe70913a3319b76339a4e880b814f2792988b2d44830fb4f7f1f30b31\": container with ID starting with 4a76466fe70913a3319b76339a4e880b814f2792988b2d44830fb4f7f1f30b31 not found: ID does not exist" Nov 28 10:13:24 crc kubenswrapper[4838]: I1128 10:13:24.932221 4838 scope.go:117] "RemoveContainer" containerID="9898971db08b95b66f43d3268f71945dd459ff606926c9f64b59a81e476be1ef" Nov 28 10:13:24 crc kubenswrapper[4838]: E1128 10:13:24.932602 4838 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9898971db08b95b66f43d3268f71945dd459ff606926c9f64b59a81e476be1ef\": container with ID starting with 9898971db08b95b66f43d3268f71945dd459ff606926c9f64b59a81e476be1ef not found: ID does not exist" containerID="9898971db08b95b66f43d3268f71945dd459ff606926c9f64b59a81e476be1ef" Nov 28 10:13:24 crc kubenswrapper[4838]: I1128 10:13:24.932650 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9898971db08b95b66f43d3268f71945dd459ff606926c9f64b59a81e476be1ef"} err="failed to get container status \"9898971db08b95b66f43d3268f71945dd459ff606926c9f64b59a81e476be1ef\": rpc error: code = NotFound desc = could not find container \"9898971db08b95b66f43d3268f71945dd459ff606926c9f64b59a81e476be1ef\": container with ID starting with 9898971db08b95b66f43d3268f71945dd459ff606926c9f64b59a81e476be1ef not found: ID does not exist" Nov 28 10:13:26 crc kubenswrapper[4838]: I1128 10:13:26.570535 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3c89f373-4419-44d3-8339-7c41bb248c63" path="/var/lib/kubelet/pods/3c89f373-4419-44d3-8339-7c41bb248c63/volumes" Nov 28 10:13:28 crc kubenswrapper[4838]: I1128 10:13:28.735805 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-l927l" Nov 28 10:13:28 crc kubenswrapper[4838]: I1128 10:13:28.736131 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-l927l" Nov 28 10:13:28 crc kubenswrapper[4838]: I1128 10:13:28.783483 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-l927l" Nov 28 10:13:28 crc kubenswrapper[4838]: I1128 10:13:28.953482 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-l927l" Nov 28 10:13:29 crc kubenswrapper[4838]: I1128 10:13:29.786499 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-l927l"] Nov 28 10:13:30 crc kubenswrapper[4838]: I1128 10:13:30.891036 4838 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-l927l" podUID="c3e77e1e-b7a8-41bd-bd41-251f25659691" containerName="registry-server" containerID="cri-o://4d7a3d3620a9850f026d1ca497b0aa618fb51e36c3dfff1baadb551529e0bb86" gracePeriod=2 Nov 28 10:13:31 crc kubenswrapper[4838]: I1128 10:13:31.336356 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-l927l" Nov 28 10:13:31 crc kubenswrapper[4838]: I1128 10:13:31.359359 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z4xmr\" (UniqueName: \"kubernetes.io/projected/c3e77e1e-b7a8-41bd-bd41-251f25659691-kube-api-access-z4xmr\") pod \"c3e77e1e-b7a8-41bd-bd41-251f25659691\" (UID: \"c3e77e1e-b7a8-41bd-bd41-251f25659691\") " Nov 28 10:13:31 crc kubenswrapper[4838]: I1128 10:13:31.359501 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c3e77e1e-b7a8-41bd-bd41-251f25659691-catalog-content\") pod \"c3e77e1e-b7a8-41bd-bd41-251f25659691\" (UID: \"c3e77e1e-b7a8-41bd-bd41-251f25659691\") " Nov 28 10:13:31 crc kubenswrapper[4838]: I1128 10:13:31.359562 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c3e77e1e-b7a8-41bd-bd41-251f25659691-utilities\") pod \"c3e77e1e-b7a8-41bd-bd41-251f25659691\" (UID: \"c3e77e1e-b7a8-41bd-bd41-251f25659691\") " Nov 28 10:13:31 crc kubenswrapper[4838]: I1128 10:13:31.360328 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c3e77e1e-b7a8-41bd-bd41-251f25659691-utilities" (OuterVolumeSpecName: "utilities") pod "c3e77e1e-b7a8-41bd-bd41-251f25659691" (UID: "c3e77e1e-b7a8-41bd-bd41-251f25659691"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 10:13:31 crc kubenswrapper[4838]: I1128 10:13:31.372910 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c3e77e1e-b7a8-41bd-bd41-251f25659691-kube-api-access-z4xmr" (OuterVolumeSpecName: "kube-api-access-z4xmr") pod "c3e77e1e-b7a8-41bd-bd41-251f25659691" (UID: "c3e77e1e-b7a8-41bd-bd41-251f25659691"). InnerVolumeSpecName "kube-api-access-z4xmr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:13:31 crc kubenswrapper[4838]: I1128 10:13:31.411273 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c3e77e1e-b7a8-41bd-bd41-251f25659691-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "c3e77e1e-b7a8-41bd-bd41-251f25659691" (UID: "c3e77e1e-b7a8-41bd-bd41-251f25659691"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 10:13:31 crc kubenswrapper[4838]: I1128 10:13:31.461368 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z4xmr\" (UniqueName: \"kubernetes.io/projected/c3e77e1e-b7a8-41bd-bd41-251f25659691-kube-api-access-z4xmr\") on node \"crc\" DevicePath \"\"" Nov 28 10:13:31 crc kubenswrapper[4838]: I1128 10:13:31.461413 4838 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c3e77e1e-b7a8-41bd-bd41-251f25659691-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 10:13:31 crc kubenswrapper[4838]: I1128 10:13:31.461427 4838 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c3e77e1e-b7a8-41bd-bd41-251f25659691-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 10:13:31 crc kubenswrapper[4838]: I1128 10:13:31.899257 4838 generic.go:334] "Generic (PLEG): container finished" podID="c3e77e1e-b7a8-41bd-bd41-251f25659691" containerID="4d7a3d3620a9850f026d1ca497b0aa618fb51e36c3dfff1baadb551529e0bb86" exitCode=0 Nov 28 10:13:31 crc kubenswrapper[4838]: I1128 10:13:31.899367 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-l927l" Nov 28 10:13:31 crc kubenswrapper[4838]: I1128 10:13:31.900621 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-l927l" event={"ID":"c3e77e1e-b7a8-41bd-bd41-251f25659691","Type":"ContainerDied","Data":"4d7a3d3620a9850f026d1ca497b0aa618fb51e36c3dfff1baadb551529e0bb86"} Nov 28 10:13:31 crc kubenswrapper[4838]: I1128 10:13:31.900769 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-l927l" event={"ID":"c3e77e1e-b7a8-41bd-bd41-251f25659691","Type":"ContainerDied","Data":"8fd680d76e0ceb5fd04cf2ff206e63fa64abd1e7fce71f75d361fe6edb63e5af"} Nov 28 10:13:31 crc kubenswrapper[4838]: I1128 10:13:31.900858 4838 scope.go:117] "RemoveContainer" containerID="4d7a3d3620a9850f026d1ca497b0aa618fb51e36c3dfff1baadb551529e0bb86" Nov 28 10:13:31 crc kubenswrapper[4838]: I1128 10:13:31.919739 4838 scope.go:117] "RemoveContainer" containerID="5ad366f898628bebb693c14b9868afb3d599a86c77fbf4f5157e5e888e0897c7" Nov 28 10:13:31 crc kubenswrapper[4838]: I1128 10:13:31.932355 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-l927l"] Nov 28 10:13:31 crc kubenswrapper[4838]: I1128 10:13:31.944226 4838 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-l927l"] Nov 28 10:13:31 crc kubenswrapper[4838]: I1128 10:13:31.952422 4838 scope.go:117] "RemoveContainer" containerID="6e987c1d3ba28fd444c0b39ed6632acc8011000fdbc590658c282827e238e6e5" Nov 28 10:13:31 crc kubenswrapper[4838]: I1128 10:13:31.966690 4838 scope.go:117] "RemoveContainer" containerID="4d7a3d3620a9850f026d1ca497b0aa618fb51e36c3dfff1baadb551529e0bb86" Nov 28 10:13:31 crc kubenswrapper[4838]: E1128 10:13:31.967111 4838 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4d7a3d3620a9850f026d1ca497b0aa618fb51e36c3dfff1baadb551529e0bb86\": container with ID starting with 4d7a3d3620a9850f026d1ca497b0aa618fb51e36c3dfff1baadb551529e0bb86 not found: ID does not exist" containerID="4d7a3d3620a9850f026d1ca497b0aa618fb51e36c3dfff1baadb551529e0bb86" Nov 28 10:13:31 crc kubenswrapper[4838]: I1128 10:13:31.967155 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4d7a3d3620a9850f026d1ca497b0aa618fb51e36c3dfff1baadb551529e0bb86"} err="failed to get container status \"4d7a3d3620a9850f026d1ca497b0aa618fb51e36c3dfff1baadb551529e0bb86\": rpc error: code = NotFound desc = could not find container \"4d7a3d3620a9850f026d1ca497b0aa618fb51e36c3dfff1baadb551529e0bb86\": container with ID starting with 4d7a3d3620a9850f026d1ca497b0aa618fb51e36c3dfff1baadb551529e0bb86 not found: ID does not exist" Nov 28 10:13:31 crc kubenswrapper[4838]: I1128 10:13:31.967181 4838 scope.go:117] "RemoveContainer" containerID="5ad366f898628bebb693c14b9868afb3d599a86c77fbf4f5157e5e888e0897c7" Nov 28 10:13:31 crc kubenswrapper[4838]: E1128 10:13:31.967542 4838 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5ad366f898628bebb693c14b9868afb3d599a86c77fbf4f5157e5e888e0897c7\": container with ID starting with 5ad366f898628bebb693c14b9868afb3d599a86c77fbf4f5157e5e888e0897c7 not found: ID does not exist" containerID="5ad366f898628bebb693c14b9868afb3d599a86c77fbf4f5157e5e888e0897c7" Nov 28 10:13:31 crc kubenswrapper[4838]: I1128 10:13:31.967654 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5ad366f898628bebb693c14b9868afb3d599a86c77fbf4f5157e5e888e0897c7"} err="failed to get container status \"5ad366f898628bebb693c14b9868afb3d599a86c77fbf4f5157e5e888e0897c7\": rpc error: code = NotFound desc = could not find container \"5ad366f898628bebb693c14b9868afb3d599a86c77fbf4f5157e5e888e0897c7\": container with ID starting with 5ad366f898628bebb693c14b9868afb3d599a86c77fbf4f5157e5e888e0897c7 not found: ID does not exist" Nov 28 10:13:31 crc kubenswrapper[4838]: I1128 10:13:31.967743 4838 scope.go:117] "RemoveContainer" containerID="6e987c1d3ba28fd444c0b39ed6632acc8011000fdbc590658c282827e238e6e5" Nov 28 10:13:31 crc kubenswrapper[4838]: E1128 10:13:31.968129 4838 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6e987c1d3ba28fd444c0b39ed6632acc8011000fdbc590658c282827e238e6e5\": container with ID starting with 6e987c1d3ba28fd444c0b39ed6632acc8011000fdbc590658c282827e238e6e5 not found: ID does not exist" containerID="6e987c1d3ba28fd444c0b39ed6632acc8011000fdbc590658c282827e238e6e5" Nov 28 10:13:31 crc kubenswrapper[4838]: I1128 10:13:31.968155 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6e987c1d3ba28fd444c0b39ed6632acc8011000fdbc590658c282827e238e6e5"} err="failed to get container status \"6e987c1d3ba28fd444c0b39ed6632acc8011000fdbc590658c282827e238e6e5\": rpc error: code = NotFound desc = could not find container \"6e987c1d3ba28fd444c0b39ed6632acc8011000fdbc590658c282827e238e6e5\": container with ID starting with 6e987c1d3ba28fd444c0b39ed6632acc8011000fdbc590658c282827e238e6e5 not found: ID does not exist" Nov 28 10:13:32 crc kubenswrapper[4838]: I1128 10:13:32.569902 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c3e77e1e-b7a8-41bd-bd41-251f25659691" path="/var/lib/kubelet/pods/c3e77e1e-b7a8-41bd-bd41-251f25659691/volumes" Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.235565 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/cinder-operator-controller-manager-6b7f75547b-z8m7s"] Nov 28 10:13:40 crc kubenswrapper[4838]: E1128 10:13:40.236202 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3c89f373-4419-44d3-8339-7c41bb248c63" containerName="extract-utilities" Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.236213 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="3c89f373-4419-44d3-8339-7c41bb248c63" containerName="extract-utilities" Nov 28 10:13:40 crc kubenswrapper[4838]: E1128 10:13:40.236223 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3c89f373-4419-44d3-8339-7c41bb248c63" containerName="registry-server" Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.236229 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="3c89f373-4419-44d3-8339-7c41bb248c63" containerName="registry-server" Nov 28 10:13:40 crc kubenswrapper[4838]: E1128 10:13:40.236239 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c3e77e1e-b7a8-41bd-bd41-251f25659691" containerName="extract-utilities" Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.236245 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="c3e77e1e-b7a8-41bd-bd41-251f25659691" containerName="extract-utilities" Nov 28 10:13:40 crc kubenswrapper[4838]: E1128 10:13:40.236256 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c3e77e1e-b7a8-41bd-bd41-251f25659691" containerName="registry-server" Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.236261 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="c3e77e1e-b7a8-41bd-bd41-251f25659691" containerName="registry-server" Nov 28 10:13:40 crc kubenswrapper[4838]: E1128 10:13:40.236273 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c3e77e1e-b7a8-41bd-bd41-251f25659691" containerName="extract-content" Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.236279 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="c3e77e1e-b7a8-41bd-bd41-251f25659691" containerName="extract-content" Nov 28 10:13:40 crc kubenswrapper[4838]: E1128 10:13:40.236287 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3c89f373-4419-44d3-8339-7c41bb248c63" containerName="extract-content" Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.236292 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="3c89f373-4419-44d3-8339-7c41bb248c63" containerName="extract-content" Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.236387 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="c3e77e1e-b7a8-41bd-bd41-251f25659691" containerName="registry-server" Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.236406 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="3c89f373-4419-44d3-8339-7c41bb248c63" containerName="registry-server" Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.237013 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-z8m7s" Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.244903 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/barbican-operator-controller-manager-7b64f4fb85-zsv5q"] Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.246221 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-zsv5q" Nov 28 10:13:40 crc kubenswrapper[4838]: W1128 10:13:40.248595 4838 reflector.go:561] object-"openstack-operators"/"cinder-operator-controller-manager-dockercfg-d96vd": failed to list *v1.Secret: secrets "cinder-operator-controller-manager-dockercfg-d96vd" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openstack-operators": no relationship found between node 'crc' and this object Nov 28 10:13:40 crc kubenswrapper[4838]: E1128 10:13:40.248631 4838 reflector.go:158] "Unhandled Error" err="object-\"openstack-operators\"/\"cinder-operator-controller-manager-dockercfg-d96vd\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"cinder-operator-controller-manager-dockercfg-d96vd\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"openstack-operators\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.249099 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"barbican-operator-controller-manager-dockercfg-4t986" Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.261298 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/designate-operator-controller-manager-955677c94-jtm69"] Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.262286 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/designate-operator-controller-manager-955677c94-jtm69" Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.263763 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"designate-operator-controller-manager-dockercfg-gt5lk" Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.266748 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/glance-operator-controller-manager-589cbd6b5b-sgt4v"] Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.267938 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-sgt4v" Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.270419 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"glance-operator-controller-manager-dockercfg-qvnwn" Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.272118 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rp75c\" (UniqueName: \"kubernetes.io/projected/aceca209-5955-4644-a139-2dfc5d36bb48-kube-api-access-rp75c\") pod \"designate-operator-controller-manager-955677c94-jtm69\" (UID: \"aceca209-5955-4644-a139-2dfc5d36bb48\") " pod="openstack-operators/designate-operator-controller-manager-955677c94-jtm69" Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.272172 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ztqr5\" (UniqueName: \"kubernetes.io/projected/458f4354-42e8-46d1-a571-0a0d1a852574-kube-api-access-ztqr5\") pod \"cinder-operator-controller-manager-6b7f75547b-z8m7s\" (UID: \"458f4354-42e8-46d1-a571-0a0d1a852574\") " pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-z8m7s" Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.272214 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9b8lv\" (UniqueName: \"kubernetes.io/projected/2d42b4ea-468b-482a-8d06-57d2cd7d40f0-kube-api-access-9b8lv\") pod \"barbican-operator-controller-manager-7b64f4fb85-zsv5q\" (UID: \"2d42b4ea-468b-482a-8d06-57d2cd7d40f0\") " pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-zsv5q" Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.274652 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-controller-manager-7b64f4fb85-zsv5q"] Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.281895 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/designate-operator-controller-manager-955677c94-jtm69"] Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.292855 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/heat-operator-controller-manager-5b77f656f-mfvxw"] Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.293831 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-mfvxw" Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.306076 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"heat-operator-controller-manager-dockercfg-zzt5b" Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.318816 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/heat-operator-controller-manager-5b77f656f-mfvxw"] Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.324345 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-controller-manager-589cbd6b5b-sgt4v"] Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.334673 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/cinder-operator-controller-manager-6b7f75547b-z8m7s"] Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.346524 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/horizon-operator-controller-manager-5d494799bf-99pnl"] Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.347514 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-99pnl" Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.350619 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"horizon-operator-controller-manager-dockercfg-sf6fv" Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.351124 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/infra-operator-controller-manager-57548d458d-7qwxs"] Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.352200 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-57548d458d-7qwxs" Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.356437 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-webhook-server-cert" Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.356678 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-controller-manager-dockercfg-lwn5n" Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.357493 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-dldwc"] Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.358313 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-dldwc" Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.359401 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-controller-manager-5d494799bf-99pnl"] Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.362284 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ironic-operator-controller-manager-dockercfg-54k9d" Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.363642 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-57548d458d-7qwxs"] Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.373692 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rp75c\" (UniqueName: \"kubernetes.io/projected/aceca209-5955-4644-a139-2dfc5d36bb48-kube-api-access-rp75c\") pod \"designate-operator-controller-manager-955677c94-jtm69\" (UID: \"aceca209-5955-4644-a139-2dfc5d36bb48\") " pod="openstack-operators/designate-operator-controller-manager-955677c94-jtm69" Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.373761 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fs2bp\" (UniqueName: \"kubernetes.io/projected/00ae7528-ac6c-4ceb-9e8f-80e588aced3d-kube-api-access-fs2bp\") pod \"ironic-operator-controller-manager-67cb4dc6d4-dldwc\" (UID: \"00ae7528-ac6c-4ceb-9e8f-80e588aced3d\") " pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-dldwc" Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.373779 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rcqcv\" (UniqueName: \"kubernetes.io/projected/bfc8796f-9498-4707-ae79-225de0c3d39f-kube-api-access-rcqcv\") pod \"heat-operator-controller-manager-5b77f656f-mfvxw\" (UID: \"bfc8796f-9498-4707-ae79-225de0c3d39f\") " pod="openstack-operators/heat-operator-controller-manager-5b77f656f-mfvxw" Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.373802 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ztqr5\" (UniqueName: \"kubernetes.io/projected/458f4354-42e8-46d1-a571-0a0d1a852574-kube-api-access-ztqr5\") pod \"cinder-operator-controller-manager-6b7f75547b-z8m7s\" (UID: \"458f4354-42e8-46d1-a571-0a0d1a852574\") " pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-z8m7s" Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.373823 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8kmfv\" (UniqueName: \"kubernetes.io/projected/f79a2f03-43b3-47d0-89f8-55374a730a22-kube-api-access-8kmfv\") pod \"horizon-operator-controller-manager-5d494799bf-99pnl\" (UID: \"f79a2f03-43b3-47d0-89f8-55374a730a22\") " pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-99pnl" Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.373848 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9b8lv\" (UniqueName: \"kubernetes.io/projected/2d42b4ea-468b-482a-8d06-57d2cd7d40f0-kube-api-access-9b8lv\") pod \"barbican-operator-controller-manager-7b64f4fb85-zsv5q\" (UID: \"2d42b4ea-468b-482a-8d06-57d2cd7d40f0\") " pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-zsv5q" Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.373875 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2vxnh\" (UniqueName: \"kubernetes.io/projected/20706334-3560-47c0-beee-0eacda6e2eeb-kube-api-access-2vxnh\") pod \"glance-operator-controller-manager-589cbd6b5b-sgt4v\" (UID: \"20706334-3560-47c0-beee-0eacda6e2eeb\") " pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-sgt4v" Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.373892 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zmdh9\" (UniqueName: \"kubernetes.io/projected/64a7b90b-6294-429b-b7f8-7820d9a5514e-kube-api-access-zmdh9\") pod \"infra-operator-controller-manager-57548d458d-7qwxs\" (UID: \"64a7b90b-6294-429b-b7f8-7820d9a5514e\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-7qwxs" Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.373920 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/64a7b90b-6294-429b-b7f8-7820d9a5514e-cert\") pod \"infra-operator-controller-manager-57548d458d-7qwxs\" (UID: \"64a7b90b-6294-429b-b7f8-7820d9a5514e\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-7qwxs" Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.382105 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-dldwc"] Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.403489 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/keystone-operator-controller-manager-7b4567c7cf-qt6hp"] Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.409520 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rp75c\" (UniqueName: \"kubernetes.io/projected/aceca209-5955-4644-a139-2dfc5d36bb48-kube-api-access-rp75c\") pod \"designate-operator-controller-manager-955677c94-jtm69\" (UID: \"aceca209-5955-4644-a139-2dfc5d36bb48\") " pod="openstack-operators/designate-operator-controller-manager-955677c94-jtm69" Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.410493 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ztqr5\" (UniqueName: \"kubernetes.io/projected/458f4354-42e8-46d1-a571-0a0d1a852574-kube-api-access-ztqr5\") pod \"cinder-operator-controller-manager-6b7f75547b-z8m7s\" (UID: \"458f4354-42e8-46d1-a571-0a0d1a852574\") " pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-z8m7s" Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.414248 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9b8lv\" (UniqueName: \"kubernetes.io/projected/2d42b4ea-468b-482a-8d06-57d2cd7d40f0-kube-api-access-9b8lv\") pod \"barbican-operator-controller-manager-7b64f4fb85-zsv5q\" (UID: \"2d42b4ea-468b-482a-8d06-57d2cd7d40f0\") " pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-zsv5q" Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.423658 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-qt6hp" Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.428011 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"keystone-operator-controller-manager-dockercfg-s542h" Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.436778 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-7b4567c7cf-qt6hp"] Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.455774 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-lppm4"] Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.456909 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-lppm4" Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.459924 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"mariadb-operator-controller-manager-dockercfg-zstrg" Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.467574 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/manila-operator-controller-manager-67764766d7-5dcgh"] Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.468746 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-controller-manager-67764766d7-5dcgh" Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.475558 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zmdh9\" (UniqueName: \"kubernetes.io/projected/64a7b90b-6294-429b-b7f8-7820d9a5514e-kube-api-access-zmdh9\") pod \"infra-operator-controller-manager-57548d458d-7qwxs\" (UID: \"64a7b90b-6294-429b-b7f8-7820d9a5514e\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-7qwxs" Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.475594 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2vxnh\" (UniqueName: \"kubernetes.io/projected/20706334-3560-47c0-beee-0eacda6e2eeb-kube-api-access-2vxnh\") pod \"glance-operator-controller-manager-589cbd6b5b-sgt4v\" (UID: \"20706334-3560-47c0-beee-0eacda6e2eeb\") " pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-sgt4v" Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.475628 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/64a7b90b-6294-429b-b7f8-7820d9a5514e-cert\") pod \"infra-operator-controller-manager-57548d458d-7qwxs\" (UID: \"64a7b90b-6294-429b-b7f8-7820d9a5514e\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-7qwxs" Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.475682 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9njbw\" (UniqueName: \"kubernetes.io/projected/f9825746-5143-4716-9458-aad44231b721-kube-api-access-9njbw\") pod \"keystone-operator-controller-manager-7b4567c7cf-qt6hp\" (UID: \"f9825746-5143-4716-9458-aad44231b721\") " pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-qt6hp" Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.475713 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fs2bp\" (UniqueName: \"kubernetes.io/projected/00ae7528-ac6c-4ceb-9e8f-80e588aced3d-kube-api-access-fs2bp\") pod \"ironic-operator-controller-manager-67cb4dc6d4-dldwc\" (UID: \"00ae7528-ac6c-4ceb-9e8f-80e588aced3d\") " pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-dldwc" Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.475748 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rcqcv\" (UniqueName: \"kubernetes.io/projected/bfc8796f-9498-4707-ae79-225de0c3d39f-kube-api-access-rcqcv\") pod \"heat-operator-controller-manager-5b77f656f-mfvxw\" (UID: \"bfc8796f-9498-4707-ae79-225de0c3d39f\") " pod="openstack-operators/heat-operator-controller-manager-5b77f656f-mfvxw" Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.475771 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8kmfv\" (UniqueName: \"kubernetes.io/projected/f79a2f03-43b3-47d0-89f8-55374a730a22-kube-api-access-8kmfv\") pod \"horizon-operator-controller-manager-5d494799bf-99pnl\" (UID: \"f79a2f03-43b3-47d0-89f8-55374a730a22\") " pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-99pnl" Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.475788 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-srwxm\" (UniqueName: \"kubernetes.io/projected/aef566ed-5e8f-4ce9-9fa4-75bfef26a65e-kube-api-access-srwxm\") pod \"mariadb-operator-controller-manager-66f4dd4bc7-lppm4\" (UID: \"aef566ed-5e8f-4ce9-9fa4-75bfef26a65e\") " pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-lppm4" Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.475806 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8g9g7\" (UniqueName: \"kubernetes.io/projected/d8596d98-979f-4b13-bef4-ccaabbcf155e-kube-api-access-8g9g7\") pod \"manila-operator-controller-manager-67764766d7-5dcgh\" (UID: \"d8596d98-979f-4b13-bef4-ccaabbcf155e\") " pod="openstack-operators/manila-operator-controller-manager-67764766d7-5dcgh" Nov 28 10:13:40 crc kubenswrapper[4838]: E1128 10:13:40.476287 4838 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Nov 28 10:13:40 crc kubenswrapper[4838]: E1128 10:13:40.476336 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/64a7b90b-6294-429b-b7f8-7820d9a5514e-cert podName:64a7b90b-6294-429b-b7f8-7820d9a5514e nodeName:}" failed. No retries permitted until 2025-11-28 10:13:40.976321232 +0000 UTC m=+992.675295402 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/64a7b90b-6294-429b-b7f8-7820d9a5514e-cert") pod "infra-operator-controller-manager-57548d458d-7qwxs" (UID: "64a7b90b-6294-429b-b7f8-7820d9a5514e") : secret "infra-operator-webhook-server-cert" not found Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.477266 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"manila-operator-controller-manager-dockercfg-kmfmt" Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.480962 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-lppm4"] Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.489348 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/manila-operator-controller-manager-67764766d7-5dcgh"] Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.499227 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fs2bp\" (UniqueName: \"kubernetes.io/projected/00ae7528-ac6c-4ceb-9e8f-80e588aced3d-kube-api-access-fs2bp\") pod \"ironic-operator-controller-manager-67cb4dc6d4-dldwc\" (UID: \"00ae7528-ac6c-4ceb-9e8f-80e588aced3d\") " pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-dldwc" Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.509348 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8kmfv\" (UniqueName: \"kubernetes.io/projected/f79a2f03-43b3-47d0-89f8-55374a730a22-kube-api-access-8kmfv\") pod \"horizon-operator-controller-manager-5d494799bf-99pnl\" (UID: \"f79a2f03-43b3-47d0-89f8-55374a730a22\") " pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-99pnl" Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.513675 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/neutron-operator-controller-manager-6fdcddb789-nw7wx"] Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.514678 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-nw7wx" Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.530752 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rcqcv\" (UniqueName: \"kubernetes.io/projected/bfc8796f-9498-4707-ae79-225de0c3d39f-kube-api-access-rcqcv\") pod \"heat-operator-controller-manager-5b77f656f-mfvxw\" (UID: \"bfc8796f-9498-4707-ae79-225de0c3d39f\") " pod="openstack-operators/heat-operator-controller-manager-5b77f656f-mfvxw" Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.534320 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/neutron-operator-controller-manager-6fdcddb789-nw7wx"] Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.534652 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"neutron-operator-controller-manager-dockercfg-xw58r" Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.540831 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zmdh9\" (UniqueName: \"kubernetes.io/projected/64a7b90b-6294-429b-b7f8-7820d9a5514e-kube-api-access-zmdh9\") pod \"infra-operator-controller-manager-57548d458d-7qwxs\" (UID: \"64a7b90b-6294-429b-b7f8-7820d9a5514e\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-7qwxs" Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.547647 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2vxnh\" (UniqueName: \"kubernetes.io/projected/20706334-3560-47c0-beee-0eacda6e2eeb-kube-api-access-2vxnh\") pod \"glance-operator-controller-manager-589cbd6b5b-sgt4v\" (UID: \"20706334-3560-47c0-beee-0eacda6e2eeb\") " pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-sgt4v" Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.547744 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/nova-operator-controller-manager-79556f57fc-zrl6r"] Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.548739 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-zrl6r" Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.551002 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"nova-operator-controller-manager-dockercfg-nqnlh" Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.587831 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-zsv5q" Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.594613 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/designate-operator-controller-manager-955677c94-jtm69" Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.599397 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/nova-operator-controller-manager-79556f57fc-zrl6r"] Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.599595 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/octavia-operator-controller-manager-64cdc6ff96-tjbnw"] Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.611256 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wwr7f\" (UniqueName: \"kubernetes.io/projected/90e6e6d2-fd36-40e9-9002-d3a5e4c53f4e-kube-api-access-wwr7f\") pod \"nova-operator-controller-manager-79556f57fc-zrl6r\" (UID: \"90e6e6d2-fd36-40e9-9002-d3a5e4c53f4e\") " pod="openstack-operators/nova-operator-controller-manager-79556f57fc-zrl6r" Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.611304 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-srwxm\" (UniqueName: \"kubernetes.io/projected/aef566ed-5e8f-4ce9-9fa4-75bfef26a65e-kube-api-access-srwxm\") pod \"mariadb-operator-controller-manager-66f4dd4bc7-lppm4\" (UID: \"aef566ed-5e8f-4ce9-9fa4-75bfef26a65e\") " pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-lppm4" Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.611326 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8g9g7\" (UniqueName: \"kubernetes.io/projected/d8596d98-979f-4b13-bef4-ccaabbcf155e-kube-api-access-8g9g7\") pod \"manila-operator-controller-manager-67764766d7-5dcgh\" (UID: \"d8596d98-979f-4b13-bef4-ccaabbcf155e\") " pod="openstack-operators/manila-operator-controller-manager-67764766d7-5dcgh" Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.611887 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5646f\" (UniqueName: \"kubernetes.io/projected/03bb7fb2-31ae-4e18-b77d-e6dad8007460-kube-api-access-5646f\") pod \"neutron-operator-controller-manager-6fdcddb789-nw7wx\" (UID: \"03bb7fb2-31ae-4e18-b77d-e6dad8007460\") " pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-nw7wx" Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.611963 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9njbw\" (UniqueName: \"kubernetes.io/projected/f9825746-5143-4716-9458-aad44231b721-kube-api-access-9njbw\") pod \"keystone-operator-controller-manager-7b4567c7cf-qt6hp\" (UID: \"f9825746-5143-4716-9458-aad44231b721\") " pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-qt6hp" Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.612031 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-tjbnw" Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.613518 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-sgt4v" Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.614133 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"octavia-operator-controller-manager-dockercfg-ddfr2" Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.627071 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/octavia-operator-controller-manager-64cdc6ff96-tjbnw"] Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.627474 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-mfvxw" Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.638594 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-srwxm\" (UniqueName: \"kubernetes.io/projected/aef566ed-5e8f-4ce9-9fa4-75bfef26a65e-kube-api-access-srwxm\") pod \"mariadb-operator-controller-manager-66f4dd4bc7-lppm4\" (UID: \"aef566ed-5e8f-4ce9-9fa4-75bfef26a65e\") " pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-lppm4" Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.638698 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9njbw\" (UniqueName: \"kubernetes.io/projected/f9825746-5143-4716-9458-aad44231b721-kube-api-access-9njbw\") pod \"keystone-operator-controller-manager-7b4567c7cf-qt6hp\" (UID: \"f9825746-5143-4716-9458-aad44231b721\") " pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-qt6hp" Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.644259 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6bpf8p6"] Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.645577 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6bpf8p6" Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.648323 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-controller-manager-dockercfg-fqx4k" Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.650299 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8g9g7\" (UniqueName: \"kubernetes.io/projected/d8596d98-979f-4b13-bef4-ccaabbcf155e-kube-api-access-8g9g7\") pod \"manila-operator-controller-manager-67764766d7-5dcgh\" (UID: \"d8596d98-979f-4b13-bef4-ccaabbcf155e\") " pod="openstack-operators/manila-operator-controller-manager-67764766d7-5dcgh" Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.653344 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-webhook-server-cert" Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.657894 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/ovn-operator-controller-manager-56897c768d-jk9v2"] Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.659280 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-jk9v2" Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.660558 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ovn-operator-controller-manager-dockercfg-8p4rc" Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.669154 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-99pnl" Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.676873 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ovn-operator-controller-manager-56897c768d-jk9v2"] Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.688063 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6bpf8p6"] Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.696356 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/swift-operator-controller-manager-d77b94747-2mxtr"] Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.702145 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-dldwc" Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.707369 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-d77b94747-2mxtr" Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.712988 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"swift-operator-controller-manager-dockercfg-7l4fz" Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.713167 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zghm2\" (UniqueName: \"kubernetes.io/projected/dd6ab766-6c66-4d3e-8089-9fe2faf6a28a-kube-api-access-zghm2\") pod \"openstack-baremetal-operator-controller-manager-5fcdb54b6bpf8p6\" (UID: \"dd6ab766-6c66-4d3e-8089-9fe2faf6a28a\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6bpf8p6" Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.713252 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-845jk\" (UniqueName: \"kubernetes.io/projected/eb245c1e-92f6-486e-be63-0093a22ed7b0-kube-api-access-845jk\") pod \"ovn-operator-controller-manager-56897c768d-jk9v2\" (UID: \"eb245c1e-92f6-486e-be63-0093a22ed7b0\") " pod="openstack-operators/ovn-operator-controller-manager-56897c768d-jk9v2" Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.713297 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5646f\" (UniqueName: \"kubernetes.io/projected/03bb7fb2-31ae-4e18-b77d-e6dad8007460-kube-api-access-5646f\") pod \"neutron-operator-controller-manager-6fdcddb789-nw7wx\" (UID: \"03bb7fb2-31ae-4e18-b77d-e6dad8007460\") " pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-nw7wx" Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.713324 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/dd6ab766-6c66-4d3e-8089-9fe2faf6a28a-cert\") pod \"openstack-baremetal-operator-controller-manager-5fcdb54b6bpf8p6\" (UID: \"dd6ab766-6c66-4d3e-8089-9fe2faf6a28a\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6bpf8p6" Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.713362 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xv74h\" (UniqueName: \"kubernetes.io/projected/40466f6c-03c1-4aa4-9d03-947168f4068c-kube-api-access-xv74h\") pod \"octavia-operator-controller-manager-64cdc6ff96-tjbnw\" (UID: \"40466f6c-03c1-4aa4-9d03-947168f4068c\") " pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-tjbnw" Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.713382 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wwr7f\" (UniqueName: \"kubernetes.io/projected/90e6e6d2-fd36-40e9-9002-d3a5e4c53f4e-kube-api-access-wwr7f\") pod \"nova-operator-controller-manager-79556f57fc-zrl6r\" (UID: \"90e6e6d2-fd36-40e9-9002-d3a5e4c53f4e\") " pod="openstack-operators/nova-operator-controller-manager-79556f57fc-zrl6r" Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.723760 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/placement-operator-controller-manager-57988cc5b5-t8tj5"] Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.724857 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-t8tj5" Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.727467 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"placement-operator-controller-manager-dockercfg-c5vjf" Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.739153 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wwr7f\" (UniqueName: \"kubernetes.io/projected/90e6e6d2-fd36-40e9-9002-d3a5e4c53f4e-kube-api-access-wwr7f\") pod \"nova-operator-controller-manager-79556f57fc-zrl6r\" (UID: \"90e6e6d2-fd36-40e9-9002-d3a5e4c53f4e\") " pod="openstack-operators/nova-operator-controller-manager-79556f57fc-zrl6r" Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.742577 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5646f\" (UniqueName: \"kubernetes.io/projected/03bb7fb2-31ae-4e18-b77d-e6dad8007460-kube-api-access-5646f\") pod \"neutron-operator-controller-manager-6fdcddb789-nw7wx\" (UID: \"03bb7fb2-31ae-4e18-b77d-e6dad8007460\") " pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-nw7wx" Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.746652 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-d77b94747-2mxtr"] Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.754102 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/placement-operator-controller-manager-57988cc5b5-t8tj5"] Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.771047 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-qt6hp" Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.773217 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-2xswp"] Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.774450 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-2xswp" Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.778076 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"telemetry-operator-controller-manager-dockercfg-tzczr" Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.794072 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-2xswp"] Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.796575 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-lppm4" Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.814223 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/dd6ab766-6c66-4d3e-8089-9fe2faf6a28a-cert\") pod \"openstack-baremetal-operator-controller-manager-5fcdb54b6bpf8p6\" (UID: \"dd6ab766-6c66-4d3e-8089-9fe2faf6a28a\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6bpf8p6" Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.814282 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xv74h\" (UniqueName: \"kubernetes.io/projected/40466f6c-03c1-4aa4-9d03-947168f4068c-kube-api-access-xv74h\") pod \"octavia-operator-controller-manager-64cdc6ff96-tjbnw\" (UID: \"40466f6c-03c1-4aa4-9d03-947168f4068c\") " pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-tjbnw" Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.814313 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zhssf\" (UniqueName: \"kubernetes.io/projected/fb9e8fa1-8798-424a-a435-daae465a8e79-kube-api-access-zhssf\") pod \"swift-operator-controller-manager-d77b94747-2mxtr\" (UID: \"fb9e8fa1-8798-424a-a435-daae465a8e79\") " pod="openstack-operators/swift-operator-controller-manager-d77b94747-2mxtr" Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.814334 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wdmhr\" (UniqueName: \"kubernetes.io/projected/f74a2c89-ae8d-428e-b8b2-d2d58e943f8e-kube-api-access-wdmhr\") pod \"placement-operator-controller-manager-57988cc5b5-t8tj5\" (UID: \"f74a2c89-ae8d-428e-b8b2-d2d58e943f8e\") " pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-t8tj5" Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.814365 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zghm2\" (UniqueName: \"kubernetes.io/projected/dd6ab766-6c66-4d3e-8089-9fe2faf6a28a-kube-api-access-zghm2\") pod \"openstack-baremetal-operator-controller-manager-5fcdb54b6bpf8p6\" (UID: \"dd6ab766-6c66-4d3e-8089-9fe2faf6a28a\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6bpf8p6" Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.814429 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-845jk\" (UniqueName: \"kubernetes.io/projected/eb245c1e-92f6-486e-be63-0093a22ed7b0-kube-api-access-845jk\") pod \"ovn-operator-controller-manager-56897c768d-jk9v2\" (UID: \"eb245c1e-92f6-486e-be63-0093a22ed7b0\") " pod="openstack-operators/ovn-operator-controller-manager-56897c768d-jk9v2" Nov 28 10:13:40 crc kubenswrapper[4838]: E1128 10:13:40.815078 4838 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 28 10:13:40 crc kubenswrapper[4838]: E1128 10:13:40.815116 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/dd6ab766-6c66-4d3e-8089-9fe2faf6a28a-cert podName:dd6ab766-6c66-4d3e-8089-9fe2faf6a28a nodeName:}" failed. No retries permitted until 2025-11-28 10:13:41.31510255 +0000 UTC m=+993.014076720 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/dd6ab766-6c66-4d3e-8089-9fe2faf6a28a-cert") pod "openstack-baremetal-operator-controller-manager-5fcdb54b6bpf8p6" (UID: "dd6ab766-6c66-4d3e-8089-9fe2faf6a28a") : secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.832305 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zghm2\" (UniqueName: \"kubernetes.io/projected/dd6ab766-6c66-4d3e-8089-9fe2faf6a28a-kube-api-access-zghm2\") pod \"openstack-baremetal-operator-controller-manager-5fcdb54b6bpf8p6\" (UID: \"dd6ab766-6c66-4d3e-8089-9fe2faf6a28a\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6bpf8p6" Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.834660 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-845jk\" (UniqueName: \"kubernetes.io/projected/eb245c1e-92f6-486e-be63-0093a22ed7b0-kube-api-access-845jk\") pod \"ovn-operator-controller-manager-56897c768d-jk9v2\" (UID: \"eb245c1e-92f6-486e-be63-0093a22ed7b0\") " pod="openstack-operators/ovn-operator-controller-manager-56897c768d-jk9v2" Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.835869 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/test-operator-controller-manager-5cd6c7f4c8-jrtsw"] Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.846939 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-jrtsw" Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.847575 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xv74h\" (UniqueName: \"kubernetes.io/projected/40466f6c-03c1-4aa4-9d03-947168f4068c-kube-api-access-xv74h\") pod \"octavia-operator-controller-manager-64cdc6ff96-tjbnw\" (UID: \"40466f6c-03c1-4aa4-9d03-947168f4068c\") " pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-tjbnw" Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.854698 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"test-operator-controller-manager-dockercfg-55l8z" Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.859340 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/test-operator-controller-manager-5cd6c7f4c8-jrtsw"] Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.882873 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-controller-manager-67764766d7-5dcgh" Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.893450 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/watcher-operator-controller-manager-656dcb59d4-pqwfn"] Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.894772 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-pqwfn" Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.898163 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"watcher-operator-controller-manager-dockercfg-l6tdw" Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.898256 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-nw7wx" Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.904241 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-656dcb59d4-pqwfn"] Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.916109 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mtpsq\" (UniqueName: \"kubernetes.io/projected/5f3be357-f971-4dd1-bb7e-82098aaad7b4-kube-api-access-mtpsq\") pod \"telemetry-operator-controller-manager-76cc84c6bb-2xswp\" (UID: \"5f3be357-f971-4dd1-bb7e-82098aaad7b4\") " pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-2xswp" Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.916172 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-msbhf\" (UniqueName: \"kubernetes.io/projected/7b19f93b-ae7d-4e10-acce-53f0c65bbce0-kube-api-access-msbhf\") pod \"test-operator-controller-manager-5cd6c7f4c8-jrtsw\" (UID: \"7b19f93b-ae7d-4e10-acce-53f0c65bbce0\") " pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-jrtsw" Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.916195 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zhssf\" (UniqueName: \"kubernetes.io/projected/fb9e8fa1-8798-424a-a435-daae465a8e79-kube-api-access-zhssf\") pod \"swift-operator-controller-manager-d77b94747-2mxtr\" (UID: \"fb9e8fa1-8798-424a-a435-daae465a8e79\") " pod="openstack-operators/swift-operator-controller-manager-d77b94747-2mxtr" Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.916217 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wdmhr\" (UniqueName: \"kubernetes.io/projected/f74a2c89-ae8d-428e-b8b2-d2d58e943f8e-kube-api-access-wdmhr\") pod \"placement-operator-controller-manager-57988cc5b5-t8tj5\" (UID: \"f74a2c89-ae8d-428e-b8b2-d2d58e943f8e\") " pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-t8tj5" Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.923784 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-zrl6r" Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.936659 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zhssf\" (UniqueName: \"kubernetes.io/projected/fb9e8fa1-8798-424a-a435-daae465a8e79-kube-api-access-zhssf\") pod \"swift-operator-controller-manager-d77b94747-2mxtr\" (UID: \"fb9e8fa1-8798-424a-a435-daae465a8e79\") " pod="openstack-operators/swift-operator-controller-manager-d77b94747-2mxtr" Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.944303 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wdmhr\" (UniqueName: \"kubernetes.io/projected/f74a2c89-ae8d-428e-b8b2-d2d58e943f8e-kube-api-access-wdmhr\") pod \"placement-operator-controller-manager-57988cc5b5-t8tj5\" (UID: \"f74a2c89-ae8d-428e-b8b2-d2d58e943f8e\") " pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-t8tj5" Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.972286 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-controller-manager-8599fd59b5-m2r97"] Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.973504 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-manager-8599fd59b5-m2r97" Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.974795 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-tjbnw" Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.984413 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"metrics-server-cert" Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.984634 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"webhook-server-cert" Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.993150 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-manager-dockercfg-gjhr7" Nov 28 10:13:40 crc kubenswrapper[4838]: I1128 10:13:40.994683 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-manager-8599fd59b5-m2r97"] Nov 28 10:13:41 crc kubenswrapper[4838]: I1128 10:13:41.006619 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-jk9v2" Nov 28 10:13:41 crc kubenswrapper[4838]: I1128 10:13:41.024953 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/64a7b90b-6294-429b-b7f8-7820d9a5514e-cert\") pod \"infra-operator-controller-manager-57548d458d-7qwxs\" (UID: \"64a7b90b-6294-429b-b7f8-7820d9a5514e\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-7qwxs" Nov 28 10:13:41 crc kubenswrapper[4838]: I1128 10:13:41.025064 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mtpsq\" (UniqueName: \"kubernetes.io/projected/5f3be357-f971-4dd1-bb7e-82098aaad7b4-kube-api-access-mtpsq\") pod \"telemetry-operator-controller-manager-76cc84c6bb-2xswp\" (UID: \"5f3be357-f971-4dd1-bb7e-82098aaad7b4\") " pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-2xswp" Nov 28 10:13:41 crc kubenswrapper[4838]: I1128 10:13:41.025124 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/397a8728-e2ae-4db0-8446-9044007df4e1-metrics-certs\") pod \"openstack-operator-controller-manager-8599fd59b5-m2r97\" (UID: \"397a8728-e2ae-4db0-8446-9044007df4e1\") " pod="openstack-operators/openstack-operator-controller-manager-8599fd59b5-m2r97" Nov 28 10:13:41 crc kubenswrapper[4838]: I1128 10:13:41.025195 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-msbhf\" (UniqueName: \"kubernetes.io/projected/7b19f93b-ae7d-4e10-acce-53f0c65bbce0-kube-api-access-msbhf\") pod \"test-operator-controller-manager-5cd6c7f4c8-jrtsw\" (UID: \"7b19f93b-ae7d-4e10-acce-53f0c65bbce0\") " pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-jrtsw" Nov 28 10:13:41 crc kubenswrapper[4838]: I1128 10:13:41.025233 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/397a8728-e2ae-4db0-8446-9044007df4e1-webhook-certs\") pod \"openstack-operator-controller-manager-8599fd59b5-m2r97\" (UID: \"397a8728-e2ae-4db0-8446-9044007df4e1\") " pod="openstack-operators/openstack-operator-controller-manager-8599fd59b5-m2r97" Nov 28 10:13:41 crc kubenswrapper[4838]: I1128 10:13:41.025317 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hjf6c\" (UniqueName: \"kubernetes.io/projected/397a8728-e2ae-4db0-8446-9044007df4e1-kube-api-access-hjf6c\") pod \"openstack-operator-controller-manager-8599fd59b5-m2r97\" (UID: \"397a8728-e2ae-4db0-8446-9044007df4e1\") " pod="openstack-operators/openstack-operator-controller-manager-8599fd59b5-m2r97" Nov 28 10:13:41 crc kubenswrapper[4838]: I1128 10:13:41.025339 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2dvvg\" (UniqueName: \"kubernetes.io/projected/431c4d08-781a-4925-96cd-153997f72239-kube-api-access-2dvvg\") pod \"watcher-operator-controller-manager-656dcb59d4-pqwfn\" (UID: \"431c4d08-781a-4925-96cd-153997f72239\") " pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-pqwfn" Nov 28 10:13:41 crc kubenswrapper[4838]: E1128 10:13:41.025463 4838 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Nov 28 10:13:41 crc kubenswrapper[4838]: E1128 10:13:41.025509 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/64a7b90b-6294-429b-b7f8-7820d9a5514e-cert podName:64a7b90b-6294-429b-b7f8-7820d9a5514e nodeName:}" failed. No retries permitted until 2025-11-28 10:13:42.025494039 +0000 UTC m=+993.724468199 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/64a7b90b-6294-429b-b7f8-7820d9a5514e-cert") pod "infra-operator-controller-manager-57548d458d-7qwxs" (UID: "64a7b90b-6294-429b-b7f8-7820d9a5514e") : secret "infra-operator-webhook-server-cert" not found Nov 28 10:13:41 crc kubenswrapper[4838]: I1128 10:13:41.035608 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-d77b94747-2mxtr" Nov 28 10:13:41 crc kubenswrapper[4838]: I1128 10:13:41.056535 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-t8tj5" Nov 28 10:13:41 crc kubenswrapper[4838]: I1128 10:13:41.061257 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/designate-operator-controller-manager-955677c94-jtm69"] Nov 28 10:13:41 crc kubenswrapper[4838]: I1128 10:13:41.064029 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mtpsq\" (UniqueName: \"kubernetes.io/projected/5f3be357-f971-4dd1-bb7e-82098aaad7b4-kube-api-access-mtpsq\") pod \"telemetry-operator-controller-manager-76cc84c6bb-2xswp\" (UID: \"5f3be357-f971-4dd1-bb7e-82098aaad7b4\") " pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-2xswp" Nov 28 10:13:41 crc kubenswrapper[4838]: I1128 10:13:41.064437 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-msbhf\" (UniqueName: \"kubernetes.io/projected/7b19f93b-ae7d-4e10-acce-53f0c65bbce0-kube-api-access-msbhf\") pod \"test-operator-controller-manager-5cd6c7f4c8-jrtsw\" (UID: \"7b19f93b-ae7d-4e10-acce-53f0c65bbce0\") " pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-jrtsw" Nov 28 10:13:41 crc kubenswrapper[4838]: I1128 10:13:41.082374 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-xj7cz"] Nov 28 10:13:41 crc kubenswrapper[4838]: I1128 10:13:41.083439 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-xj7cz" Nov 28 10:13:41 crc kubenswrapper[4838]: I1128 10:13:41.085459 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"rabbitmq-cluster-operator-controller-manager-dockercfg-x68pq" Nov 28 10:13:41 crc kubenswrapper[4838]: I1128 10:13:41.087766 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-2xswp" Nov 28 10:13:41 crc kubenswrapper[4838]: I1128 10:13:41.121375 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-xj7cz"] Nov 28 10:13:41 crc kubenswrapper[4838]: I1128 10:13:41.127950 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/397a8728-e2ae-4db0-8446-9044007df4e1-metrics-certs\") pod \"openstack-operator-controller-manager-8599fd59b5-m2r97\" (UID: \"397a8728-e2ae-4db0-8446-9044007df4e1\") " pod="openstack-operators/openstack-operator-controller-manager-8599fd59b5-m2r97" Nov 28 10:13:41 crc kubenswrapper[4838]: I1128 10:13:41.127995 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/397a8728-e2ae-4db0-8446-9044007df4e1-webhook-certs\") pod \"openstack-operator-controller-manager-8599fd59b5-m2r97\" (UID: \"397a8728-e2ae-4db0-8446-9044007df4e1\") " pod="openstack-operators/openstack-operator-controller-manager-8599fd59b5-m2r97" Nov 28 10:13:41 crc kubenswrapper[4838]: I1128 10:13:41.128040 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hjf6c\" (UniqueName: \"kubernetes.io/projected/397a8728-e2ae-4db0-8446-9044007df4e1-kube-api-access-hjf6c\") pod \"openstack-operator-controller-manager-8599fd59b5-m2r97\" (UID: \"397a8728-e2ae-4db0-8446-9044007df4e1\") " pod="openstack-operators/openstack-operator-controller-manager-8599fd59b5-m2r97" Nov 28 10:13:41 crc kubenswrapper[4838]: I1128 10:13:41.128059 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2dvvg\" (UniqueName: \"kubernetes.io/projected/431c4d08-781a-4925-96cd-153997f72239-kube-api-access-2dvvg\") pod \"watcher-operator-controller-manager-656dcb59d4-pqwfn\" (UID: \"431c4d08-781a-4925-96cd-153997f72239\") " pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-pqwfn" Nov 28 10:13:41 crc kubenswrapper[4838]: I1128 10:13:41.128083 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2ww78\" (UniqueName: \"kubernetes.io/projected/6979c02b-5bc1-4eec-aa05-086f449ffd93-kube-api-access-2ww78\") pod \"rabbitmq-cluster-operator-manager-668c99d594-xj7cz\" (UID: \"6979c02b-5bc1-4eec-aa05-086f449ffd93\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-xj7cz" Nov 28 10:13:41 crc kubenswrapper[4838]: E1128 10:13:41.128207 4838 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Nov 28 10:13:41 crc kubenswrapper[4838]: E1128 10:13:41.130404 4838 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Nov 28 10:13:41 crc kubenswrapper[4838]: E1128 10:13:41.145130 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/397a8728-e2ae-4db0-8446-9044007df4e1-metrics-certs podName:397a8728-e2ae-4db0-8446-9044007df4e1 nodeName:}" failed. No retries permitted until 2025-11-28 10:13:41.645103992 +0000 UTC m=+993.344078162 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/397a8728-e2ae-4db0-8446-9044007df4e1-metrics-certs") pod "openstack-operator-controller-manager-8599fd59b5-m2r97" (UID: "397a8728-e2ae-4db0-8446-9044007df4e1") : secret "metrics-server-cert" not found Nov 28 10:13:41 crc kubenswrapper[4838]: E1128 10:13:41.145209 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/397a8728-e2ae-4db0-8446-9044007df4e1-webhook-certs podName:397a8728-e2ae-4db0-8446-9044007df4e1 nodeName:}" failed. No retries permitted until 2025-11-28 10:13:41.645186244 +0000 UTC m=+993.344160404 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/397a8728-e2ae-4db0-8446-9044007df4e1-webhook-certs") pod "openstack-operator-controller-manager-8599fd59b5-m2r97" (UID: "397a8728-e2ae-4db0-8446-9044007df4e1") : secret "webhook-server-cert" not found Nov 28 10:13:41 crc kubenswrapper[4838]: I1128 10:13:41.150128 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hjf6c\" (UniqueName: \"kubernetes.io/projected/397a8728-e2ae-4db0-8446-9044007df4e1-kube-api-access-hjf6c\") pod \"openstack-operator-controller-manager-8599fd59b5-m2r97\" (UID: \"397a8728-e2ae-4db0-8446-9044007df4e1\") " pod="openstack-operators/openstack-operator-controller-manager-8599fd59b5-m2r97" Nov 28 10:13:41 crc kubenswrapper[4838]: I1128 10:13:41.156216 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2dvvg\" (UniqueName: \"kubernetes.io/projected/431c4d08-781a-4925-96cd-153997f72239-kube-api-access-2dvvg\") pod \"watcher-operator-controller-manager-656dcb59d4-pqwfn\" (UID: \"431c4d08-781a-4925-96cd-153997f72239\") " pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-pqwfn" Nov 28 10:13:41 crc kubenswrapper[4838]: I1128 10:13:41.158328 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-controller-manager-7b64f4fb85-zsv5q"] Nov 28 10:13:41 crc kubenswrapper[4838]: I1128 10:13:41.179854 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-jrtsw" Nov 28 10:13:41 crc kubenswrapper[4838]: I1128 10:13:41.229474 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2ww78\" (UniqueName: \"kubernetes.io/projected/6979c02b-5bc1-4eec-aa05-086f449ffd93-kube-api-access-2ww78\") pod \"rabbitmq-cluster-operator-manager-668c99d594-xj7cz\" (UID: \"6979c02b-5bc1-4eec-aa05-086f449ffd93\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-xj7cz" Nov 28 10:13:41 crc kubenswrapper[4838]: I1128 10:13:41.243177 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-pqwfn" Nov 28 10:13:41 crc kubenswrapper[4838]: I1128 10:13:41.245820 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2ww78\" (UniqueName: \"kubernetes.io/projected/6979c02b-5bc1-4eec-aa05-086f449ffd93-kube-api-access-2ww78\") pod \"rabbitmq-cluster-operator-manager-668c99d594-xj7cz\" (UID: \"6979c02b-5bc1-4eec-aa05-086f449ffd93\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-xj7cz" Nov 28 10:13:41 crc kubenswrapper[4838]: I1128 10:13:41.330325 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/dd6ab766-6c66-4d3e-8089-9fe2faf6a28a-cert\") pod \"openstack-baremetal-operator-controller-manager-5fcdb54b6bpf8p6\" (UID: \"dd6ab766-6c66-4d3e-8089-9fe2faf6a28a\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6bpf8p6" Nov 28 10:13:41 crc kubenswrapper[4838]: E1128 10:13:41.330503 4838 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 28 10:13:41 crc kubenswrapper[4838]: E1128 10:13:41.330552 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/dd6ab766-6c66-4d3e-8089-9fe2faf6a28a-cert podName:dd6ab766-6c66-4d3e-8089-9fe2faf6a28a nodeName:}" failed. No retries permitted until 2025-11-28 10:13:42.330535928 +0000 UTC m=+994.029510098 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/dd6ab766-6c66-4d3e-8089-9fe2faf6a28a-cert") pod "openstack-baremetal-operator-controller-manager-5fcdb54b6bpf8p6" (UID: "dd6ab766-6c66-4d3e-8089-9fe2faf6a28a") : secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 28 10:13:41 crc kubenswrapper[4838]: I1128 10:13:41.399371 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-controller-manager-5d494799bf-99pnl"] Nov 28 10:13:41 crc kubenswrapper[4838]: W1128 10:13:41.400148 4838 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf79a2f03_43b3_47d0_89f8_55374a730a22.slice/crio-450b1258c2a10fbda0412ed04f7ad7b0cd9a3999180587bda08e7edeb32d1493 WatchSource:0}: Error finding container 450b1258c2a10fbda0412ed04f7ad7b0cd9a3999180587bda08e7edeb32d1493: Status 404 returned error can't find the container with id 450b1258c2a10fbda0412ed04f7ad7b0cd9a3999180587bda08e7edeb32d1493 Nov 28 10:13:41 crc kubenswrapper[4838]: I1128 10:13:41.418739 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-controller-manager-589cbd6b5b-sgt4v"] Nov 28 10:13:41 crc kubenswrapper[4838]: I1128 10:13:41.429757 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-xj7cz" Nov 28 10:13:41 crc kubenswrapper[4838]: I1128 10:13:41.432237 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-dldwc"] Nov 28 10:13:41 crc kubenswrapper[4838]: I1128 10:13:41.572181 4838 kubelet_pods.go:1007] "Unable to retrieve pull secret, the image pull may not succeed." pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-z8m7s" secret="" err="failed to sync secret cache: timed out waiting for the condition" Nov 28 10:13:41 crc kubenswrapper[4838]: I1128 10:13:41.572271 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-z8m7s" Nov 28 10:13:41 crc kubenswrapper[4838]: I1128 10:13:41.609851 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-7b4567c7cf-qt6hp"] Nov 28 10:13:41 crc kubenswrapper[4838]: W1128 10:13:41.622922 4838 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf9825746_5143_4716_9458_aad44231b721.slice/crio-f852a3a5bb5a65643c445439dac5a0acc1b5f737da7ae2c2127adaec9240d54d WatchSource:0}: Error finding container f852a3a5bb5a65643c445439dac5a0acc1b5f737da7ae2c2127adaec9240d54d: Status 404 returned error can't find the container with id f852a3a5bb5a65643c445439dac5a0acc1b5f737da7ae2c2127adaec9240d54d Nov 28 10:13:41 crc kubenswrapper[4838]: I1128 10:13:41.632511 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-lppm4"] Nov 28 10:13:41 crc kubenswrapper[4838]: I1128 10:13:41.662161 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"cinder-operator-controller-manager-dockercfg-d96vd" Nov 28 10:13:41 crc kubenswrapper[4838]: I1128 10:13:41.664945 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/heat-operator-controller-manager-5b77f656f-mfvxw"] Nov 28 10:13:41 crc kubenswrapper[4838]: W1128 10:13:41.677337 4838 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podbfc8796f_9498_4707_ae79_225de0c3d39f.slice/crio-8071b682dc6f29d3411ca22fca811f5e2c87bab12ddef31bab10d63e5ddbba87 WatchSource:0}: Error finding container 8071b682dc6f29d3411ca22fca811f5e2c87bab12ddef31bab10d63e5ddbba87: Status 404 returned error can't find the container with id 8071b682dc6f29d3411ca22fca811f5e2c87bab12ddef31bab10d63e5ddbba87 Nov 28 10:13:41 crc kubenswrapper[4838]: I1128 10:13:41.734752 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/397a8728-e2ae-4db0-8446-9044007df4e1-metrics-certs\") pod \"openstack-operator-controller-manager-8599fd59b5-m2r97\" (UID: \"397a8728-e2ae-4db0-8446-9044007df4e1\") " pod="openstack-operators/openstack-operator-controller-manager-8599fd59b5-m2r97" Nov 28 10:13:41 crc kubenswrapper[4838]: I1128 10:13:41.734803 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/397a8728-e2ae-4db0-8446-9044007df4e1-webhook-certs\") pod \"openstack-operator-controller-manager-8599fd59b5-m2r97\" (UID: \"397a8728-e2ae-4db0-8446-9044007df4e1\") " pod="openstack-operators/openstack-operator-controller-manager-8599fd59b5-m2r97" Nov 28 10:13:41 crc kubenswrapper[4838]: E1128 10:13:41.734910 4838 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Nov 28 10:13:41 crc kubenswrapper[4838]: E1128 10:13:41.734960 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/397a8728-e2ae-4db0-8446-9044007df4e1-webhook-certs podName:397a8728-e2ae-4db0-8446-9044007df4e1 nodeName:}" failed. No retries permitted until 2025-11-28 10:13:42.734947045 +0000 UTC m=+994.433921215 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/397a8728-e2ae-4db0-8446-9044007df4e1-webhook-certs") pod "openstack-operator-controller-manager-8599fd59b5-m2r97" (UID: "397a8728-e2ae-4db0-8446-9044007df4e1") : secret "webhook-server-cert" not found Nov 28 10:13:41 crc kubenswrapper[4838]: E1128 10:13:41.734972 4838 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Nov 28 10:13:41 crc kubenswrapper[4838]: E1128 10:13:41.735055 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/397a8728-e2ae-4db0-8446-9044007df4e1-metrics-certs podName:397a8728-e2ae-4db0-8446-9044007df4e1 nodeName:}" failed. No retries permitted until 2025-11-28 10:13:42.735021686 +0000 UTC m=+994.433995857 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/397a8728-e2ae-4db0-8446-9044007df4e1-metrics-certs") pod "openstack-operator-controller-manager-8599fd59b5-m2r97" (UID: "397a8728-e2ae-4db0-8446-9044007df4e1") : secret "metrics-server-cert" not found Nov 28 10:13:41 crc kubenswrapper[4838]: I1128 10:13:41.811642 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/neutron-operator-controller-manager-6fdcddb789-nw7wx"] Nov 28 10:13:41 crc kubenswrapper[4838]: I1128 10:13:41.816020 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ovn-operator-controller-manager-56897c768d-jk9v2"] Nov 28 10:13:41 crc kubenswrapper[4838]: I1128 10:13:41.819935 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/manila-operator-controller-manager-67764766d7-5dcgh"] Nov 28 10:13:41 crc kubenswrapper[4838]: W1128 10:13:41.822096 4838 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podeb245c1e_92f6_486e_be63_0093a22ed7b0.slice/crio-eade434e42a0e9f4030ea69ab6cc0f249e6aed485aa3d14ae8a13f9d5fc74766 WatchSource:0}: Error finding container eade434e42a0e9f4030ea69ab6cc0f249e6aed485aa3d14ae8a13f9d5fc74766: Status 404 returned error can't find the container with id eade434e42a0e9f4030ea69ab6cc0f249e6aed485aa3d14ae8a13f9d5fc74766 Nov 28 10:13:41 crc kubenswrapper[4838]: W1128 10:13:41.824892 4838 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd8596d98_979f_4b13_bef4_ccaabbcf155e.slice/crio-7e8a47cbe42336b15e1701cc3607ee40802a7c23061ca2d9f4d59f0943f632e2 WatchSource:0}: Error finding container 7e8a47cbe42336b15e1701cc3607ee40802a7c23061ca2d9f4d59f0943f632e2: Status 404 returned error can't find the container with id 7e8a47cbe42336b15e1701cc3607ee40802a7c23061ca2d9f4d59f0943f632e2 Nov 28 10:13:41 crc kubenswrapper[4838]: I1128 10:13:41.900962 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/octavia-operator-controller-manager-64cdc6ff96-tjbnw"] Nov 28 10:13:41 crc kubenswrapper[4838]: I1128 10:13:41.909038 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-d77b94747-2mxtr"] Nov 28 10:13:41 crc kubenswrapper[4838]: I1128 10:13:41.913967 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/test-operator-controller-manager-5cd6c7f4c8-jrtsw"] Nov 28 10:13:41 crc kubenswrapper[4838]: W1128 10:13:41.919624 4838 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod7b19f93b_ae7d_4e10_acce_53f0c65bbce0.slice/crio-e0d3304a8b49b6b34beeee661ee08f117aa60d71b1ab2a205f4fd5ed568ab48b WatchSource:0}: Error finding container e0d3304a8b49b6b34beeee661ee08f117aa60d71b1ab2a205f4fd5ed568ab48b: Status 404 returned error can't find the container with id e0d3304a8b49b6b34beeee661ee08f117aa60d71b1ab2a205f4fd5ed568ab48b Nov 28 10:13:41 crc kubenswrapper[4838]: W1128 10:13:41.932345 4838 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfb9e8fa1_8798_424a_a435_daae465a8e79.slice/crio-25349a69bf361207d7c89192502ec29ccafa6887fd5f9ee20ebd6ef9d2b15551 WatchSource:0}: Error finding container 25349a69bf361207d7c89192502ec29ccafa6887fd5f9ee20ebd6ef9d2b15551: Status 404 returned error can't find the container with id 25349a69bf361207d7c89192502ec29ccafa6887fd5f9ee20ebd6ef9d2b15551 Nov 28 10:13:41 crc kubenswrapper[4838]: I1128 10:13:41.938530 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/nova-operator-controller-manager-79556f57fc-zrl6r"] Nov 28 10:13:41 crc kubenswrapper[4838]: E1128 10:13:41.946075 4838 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/placement-operator@sha256:225958f250a1075b69439d776a13acc45c78695c21abda23600fb53ca1640423,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-wdmhr,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod placement-operator-controller-manager-57988cc5b5-t8tj5_openstack-operators(f74a2c89-ae8d-428e-b8b2-d2d58e943f8e): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 28 10:13:41 crc kubenswrapper[4838]: I1128 10:13:41.947947 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/placement-operator-controller-manager-57988cc5b5-t8tj5"] Nov 28 10:13:41 crc kubenswrapper[4838]: E1128 10:13:41.954821 4838 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-wdmhr,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod placement-operator-controller-manager-57988cc5b5-t8tj5_openstack-operators(f74a2c89-ae8d-428e-b8b2-d2d58e943f8e): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 28 10:13:41 crc kubenswrapper[4838]: E1128 10:13:41.955970 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"]" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-t8tj5" podUID="f74a2c89-ae8d-428e-b8b2-d2d58e943f8e" Nov 28 10:13:41 crc kubenswrapper[4838]: I1128 10:13:41.958806 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/cinder-operator-controller-manager-6b7f75547b-z8m7s"] Nov 28 10:13:41 crc kubenswrapper[4838]: E1128 10:13:41.963559 4838 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/nova-operator@sha256:c053e34316044f14929e16e4f0d97f9f1b24cb68b5e22b925ca74c66aaaed0a7,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-wwr7f,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod nova-operator-controller-manager-79556f57fc-zrl6r_openstack-operators(90e6e6d2-fd36-40e9-9002-d3a5e4c53f4e): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 28 10:13:41 crc kubenswrapper[4838]: E1128 10:13:41.965358 4838 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-wwr7f,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod nova-operator-controller-manager-79556f57fc-zrl6r_openstack-operators(90e6e6d2-fd36-40e9-9002-d3a5e4c53f4e): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 28 10:13:41 crc kubenswrapper[4838]: I1128 10:13:41.965389 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-zsv5q" event={"ID":"2d42b4ea-468b-482a-8d06-57d2cd7d40f0","Type":"ContainerStarted","Data":"39d5d9011d4cb1100f3e8c5a5121ce949992000e2dcf89564821d1163c304d97"} Nov 28 10:13:41 crc kubenswrapper[4838]: E1128 10:13:41.966700 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"]" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-zrl6r" podUID="90e6e6d2-fd36-40e9-9002-d3a5e4c53f4e" Nov 28 10:13:41 crc kubenswrapper[4838]: I1128 10:13:41.967077 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-jk9v2" event={"ID":"eb245c1e-92f6-486e-be63-0093a22ed7b0","Type":"ContainerStarted","Data":"eade434e42a0e9f4030ea69ab6cc0f249e6aed485aa3d14ae8a13f9d5fc74766"} Nov 28 10:13:41 crc kubenswrapper[4838]: I1128 10:13:41.969114 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-lppm4" event={"ID":"aef566ed-5e8f-4ce9-9fa4-75bfef26a65e","Type":"ContainerStarted","Data":"1e3f9a4c45891fa93c81f731cf60950c84538e9852455cfdddd4e2b14668fee8"} Nov 28 10:13:41 crc kubenswrapper[4838]: I1128 10:13:41.970104 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-d77b94747-2mxtr" event={"ID":"fb9e8fa1-8798-424a-a435-daae465a8e79","Type":"ContainerStarted","Data":"25349a69bf361207d7c89192502ec29ccafa6887fd5f9ee20ebd6ef9d2b15551"} Nov 28 10:13:41 crc kubenswrapper[4838]: W1128 10:13:41.972910 4838 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod458f4354_42e8_46d1_a571_0a0d1a852574.slice/crio-dd6023b068210f72be7c9f7c4192fb1a2646220934eae76bb7516469e517e953 WatchSource:0}: Error finding container dd6023b068210f72be7c9f7c4192fb1a2646220934eae76bb7516469e517e953: Status 404 returned error can't find the container with id dd6023b068210f72be7c9f7c4192fb1a2646220934eae76bb7516469e517e953 Nov 28 10:13:41 crc kubenswrapper[4838]: I1128 10:13:41.973318 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-99pnl" event={"ID":"f79a2f03-43b3-47d0-89f8-55374a730a22","Type":"ContainerStarted","Data":"450b1258c2a10fbda0412ed04f7ad7b0cd9a3999180587bda08e7edeb32d1493"} Nov 28 10:13:41 crc kubenswrapper[4838]: I1128 10:13:41.974928 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-955677c94-jtm69" event={"ID":"aceca209-5955-4644-a139-2dfc5d36bb48","Type":"ContainerStarted","Data":"20ca4ee654f50a155a0a75e71ac135f77484fe747aa237524baac973694669a9"} Nov 28 10:13:41 crc kubenswrapper[4838]: I1128 10:13:41.976751 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-67764766d7-5dcgh" event={"ID":"d8596d98-979f-4b13-bef4-ccaabbcf155e","Type":"ContainerStarted","Data":"7e8a47cbe42336b15e1701cc3607ee40802a7c23061ca2d9f4d59f0943f632e2"} Nov 28 10:13:41 crc kubenswrapper[4838]: E1128 10:13:41.977489 4838 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/cinder-operator@sha256:ca332e48d07f932e470177e48dba9332848a1d14c857cff6f9bfb1adc1998482,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-ztqr5,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod cinder-operator-controller-manager-6b7f75547b-z8m7s_openstack-operators(458f4354-42e8-46d1-a571-0a0d1a852574): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 28 10:13:41 crc kubenswrapper[4838]: I1128 10:13:41.979439 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-tjbnw" event={"ID":"40466f6c-03c1-4aa4-9d03-947168f4068c","Type":"ContainerStarted","Data":"685a7344249770e0e49160ef3dc575106af124e4e0aef530b9d4e926847f5ee2"} Nov 28 10:13:41 crc kubenswrapper[4838]: I1128 10:13:41.980915 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-mfvxw" event={"ID":"bfc8796f-9498-4707-ae79-225de0c3d39f","Type":"ContainerStarted","Data":"8071b682dc6f29d3411ca22fca811f5e2c87bab12ddef31bab10d63e5ddbba87"} Nov 28 10:13:41 crc kubenswrapper[4838]: E1128 10:13:41.981750 4838 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-ztqr5,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod cinder-operator-controller-manager-6b7f75547b-z8m7s_openstack-operators(458f4354-42e8-46d1-a571-0a0d1a852574): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 28 10:13:41 crc kubenswrapper[4838]: I1128 10:13:41.982402 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-t8tj5" event={"ID":"f74a2c89-ae8d-428e-b8b2-d2d58e943f8e","Type":"ContainerStarted","Data":"ef732d7378204515413473f63600d6a207b429d8f2f456b3c70a63ad3ed462db"} Nov 28 10:13:41 crc kubenswrapper[4838]: E1128 10:13:41.983373 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"]" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-z8m7s" podUID="458f4354-42e8-46d1-a571-0a0d1a852574" Nov 28 10:13:41 crc kubenswrapper[4838]: E1128 10:13:41.985830 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/placement-operator@sha256:225958f250a1075b69439d776a13acc45c78695c21abda23600fb53ca1640423\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-t8tj5" podUID="f74a2c89-ae8d-428e-b8b2-d2d58e943f8e" Nov 28 10:13:41 crc kubenswrapper[4838]: I1128 10:13:41.986949 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-dldwc" event={"ID":"00ae7528-ac6c-4ceb-9e8f-80e588aced3d","Type":"ContainerStarted","Data":"e5b141df823bef35e6b405f04f50aacfec8c6678c9411aced9adb1d2b3f66623"} Nov 28 10:13:41 crc kubenswrapper[4838]: I1128 10:13:41.988666 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-zrl6r" event={"ID":"90e6e6d2-fd36-40e9-9002-d3a5e4c53f4e","Type":"ContainerStarted","Data":"d293c74fed2d210cc79de25cdde6c2762f4435fea52e95396bc0aa7d371d6379"} Nov 28 10:13:41 crc kubenswrapper[4838]: I1128 10:13:41.990552 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-qt6hp" event={"ID":"f9825746-5143-4716-9458-aad44231b721","Type":"ContainerStarted","Data":"f852a3a5bb5a65643c445439dac5a0acc1b5f737da7ae2c2127adaec9240d54d"} Nov 28 10:13:41 crc kubenswrapper[4838]: I1128 10:13:41.991964 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-nw7wx" event={"ID":"03bb7fb2-31ae-4e18-b77d-e6dad8007460","Type":"ContainerStarted","Data":"188435e2e354eee5a4f34249d4a91fbc050e1c2bb501d9cc11e30cc915845e51"} Nov 28 10:13:41 crc kubenswrapper[4838]: E1128 10:13:41.992478 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/nova-operator@sha256:c053e34316044f14929e16e4f0d97f9f1b24cb68b5e22b925ca74c66aaaed0a7\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-zrl6r" podUID="90e6e6d2-fd36-40e9-9002-d3a5e4c53f4e" Nov 28 10:13:41 crc kubenswrapper[4838]: I1128 10:13:41.993796 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-sgt4v" event={"ID":"20706334-3560-47c0-beee-0eacda6e2eeb","Type":"ContainerStarted","Data":"ea10c5283c151b44b3bef953a2e1c169fce324fbf447083150417d7fd8e2411d"} Nov 28 10:13:42 crc kubenswrapper[4838]: I1128 10:13:42.000034 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-jrtsw" event={"ID":"7b19f93b-ae7d-4e10-acce-53f0c65bbce0","Type":"ContainerStarted","Data":"e0d3304a8b49b6b34beeee661ee08f117aa60d71b1ab2a205f4fd5ed568ab48b"} Nov 28 10:13:42 crc kubenswrapper[4838]: I1128 10:13:42.041098 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/64a7b90b-6294-429b-b7f8-7820d9a5514e-cert\") pod \"infra-operator-controller-manager-57548d458d-7qwxs\" (UID: \"64a7b90b-6294-429b-b7f8-7820d9a5514e\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-7qwxs" Nov 28 10:13:42 crc kubenswrapper[4838]: E1128 10:13:42.041269 4838 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Nov 28 10:13:42 crc kubenswrapper[4838]: E1128 10:13:42.041312 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/64a7b90b-6294-429b-b7f8-7820d9a5514e-cert podName:64a7b90b-6294-429b-b7f8-7820d9a5514e nodeName:}" failed. No retries permitted until 2025-11-28 10:13:44.04129978 +0000 UTC m=+995.740273950 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/64a7b90b-6294-429b-b7f8-7820d9a5514e-cert") pod "infra-operator-controller-manager-57548d458d-7qwxs" (UID: "64a7b90b-6294-429b-b7f8-7820d9a5514e") : secret "infra-operator-webhook-server-cert" not found Nov 28 10:13:42 crc kubenswrapper[4838]: I1128 10:13:42.054236 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-xj7cz"] Nov 28 10:13:42 crc kubenswrapper[4838]: I1128 10:13:42.064803 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-2xswp"] Nov 28 10:13:42 crc kubenswrapper[4838]: E1128 10:13:42.067475 4838 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/telemetry-operator@sha256:7d66757c0af67104f0389e851a7cc0daa44443ad202d157417bd86bbb57cc385,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-mtpsq,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod telemetry-operator-controller-manager-76cc84c6bb-2xswp_openstack-operators(5f3be357-f971-4dd1-bb7e-82098aaad7b4): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 28 10:13:42 crc kubenswrapper[4838]: I1128 10:13:42.069382 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-656dcb59d4-pqwfn"] Nov 28 10:13:42 crc kubenswrapper[4838]: E1128 10:13:42.071412 4838 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-mtpsq,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod telemetry-operator-controller-manager-76cc84c6bb-2xswp_openstack-operators(5f3be357-f971-4dd1-bb7e-82098aaad7b4): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 28 10:13:42 crc kubenswrapper[4838]: E1128 10:13:42.073081 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"]" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-2xswp" podUID="5f3be357-f971-4dd1-bb7e-82098aaad7b4" Nov 28 10:13:42 crc kubenswrapper[4838]: E1128 10:13:42.079177 4838 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:operator,Image:quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2,Command:[/manager],Args:[],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:metrics,HostPort:0,ContainerPort:9782,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:OPERATOR_NAMESPACE,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{200 -3} {} 200m DecimalSI},memory: {{524288000 0} {} 500Mi BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-2ww78,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-cluster-operator-manager-668c99d594-xj7cz_openstack-operators(6979c02b-5bc1-4eec-aa05-086f449ffd93): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 28 10:13:42 crc kubenswrapper[4838]: E1128 10:13:42.081771 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-xj7cz" podUID="6979c02b-5bc1-4eec-aa05-086f449ffd93" Nov 28 10:13:42 crc kubenswrapper[4838]: E1128 10:13:42.083172 4838 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/watcher-operator@sha256:6bed55b172b9ee8ccc3952cbfc543d8bd44e2690f6db94348a754152fd78f4cf,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-2dvvg,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod watcher-operator-controller-manager-656dcb59d4-pqwfn_openstack-operators(431c4d08-781a-4925-96cd-153997f72239): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 28 10:13:42 crc kubenswrapper[4838]: E1128 10:13:42.085159 4838 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-2dvvg,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod watcher-operator-controller-manager-656dcb59d4-pqwfn_openstack-operators(431c4d08-781a-4925-96cd-153997f72239): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 28 10:13:42 crc kubenswrapper[4838]: E1128 10:13:42.086348 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"]" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-pqwfn" podUID="431c4d08-781a-4925-96cd-153997f72239" Nov 28 10:13:42 crc kubenswrapper[4838]: I1128 10:13:42.346503 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/dd6ab766-6c66-4d3e-8089-9fe2faf6a28a-cert\") pod \"openstack-baremetal-operator-controller-manager-5fcdb54b6bpf8p6\" (UID: \"dd6ab766-6c66-4d3e-8089-9fe2faf6a28a\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6bpf8p6" Nov 28 10:13:42 crc kubenswrapper[4838]: E1128 10:13:42.346672 4838 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 28 10:13:42 crc kubenswrapper[4838]: E1128 10:13:42.346755 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/dd6ab766-6c66-4d3e-8089-9fe2faf6a28a-cert podName:dd6ab766-6c66-4d3e-8089-9fe2faf6a28a nodeName:}" failed. No retries permitted until 2025-11-28 10:13:44.346735849 +0000 UTC m=+996.045710019 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/dd6ab766-6c66-4d3e-8089-9fe2faf6a28a-cert") pod "openstack-baremetal-operator-controller-manager-5fcdb54b6bpf8p6" (UID: "dd6ab766-6c66-4d3e-8089-9fe2faf6a28a") : secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 28 10:13:42 crc kubenswrapper[4838]: I1128 10:13:42.757405 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/397a8728-e2ae-4db0-8446-9044007df4e1-metrics-certs\") pod \"openstack-operator-controller-manager-8599fd59b5-m2r97\" (UID: \"397a8728-e2ae-4db0-8446-9044007df4e1\") " pod="openstack-operators/openstack-operator-controller-manager-8599fd59b5-m2r97" Nov 28 10:13:42 crc kubenswrapper[4838]: I1128 10:13:42.757801 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/397a8728-e2ae-4db0-8446-9044007df4e1-webhook-certs\") pod \"openstack-operator-controller-manager-8599fd59b5-m2r97\" (UID: \"397a8728-e2ae-4db0-8446-9044007df4e1\") " pod="openstack-operators/openstack-operator-controller-manager-8599fd59b5-m2r97" Nov 28 10:13:42 crc kubenswrapper[4838]: E1128 10:13:42.757586 4838 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Nov 28 10:13:42 crc kubenswrapper[4838]: E1128 10:13:42.757929 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/397a8728-e2ae-4db0-8446-9044007df4e1-metrics-certs podName:397a8728-e2ae-4db0-8446-9044007df4e1 nodeName:}" failed. No retries permitted until 2025-11-28 10:13:44.757910878 +0000 UTC m=+996.456885048 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/397a8728-e2ae-4db0-8446-9044007df4e1-metrics-certs") pod "openstack-operator-controller-manager-8599fd59b5-m2r97" (UID: "397a8728-e2ae-4db0-8446-9044007df4e1") : secret "metrics-server-cert" not found Nov 28 10:13:42 crc kubenswrapper[4838]: E1128 10:13:42.757964 4838 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Nov 28 10:13:42 crc kubenswrapper[4838]: E1128 10:13:42.758020 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/397a8728-e2ae-4db0-8446-9044007df4e1-webhook-certs podName:397a8728-e2ae-4db0-8446-9044007df4e1 nodeName:}" failed. No retries permitted until 2025-11-28 10:13:44.758004931 +0000 UTC m=+996.456979101 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/397a8728-e2ae-4db0-8446-9044007df4e1-webhook-certs") pod "openstack-operator-controller-manager-8599fd59b5-m2r97" (UID: "397a8728-e2ae-4db0-8446-9044007df4e1") : secret "webhook-server-cert" not found Nov 28 10:13:43 crc kubenswrapper[4838]: I1128 10:13:43.009508 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-2xswp" event={"ID":"5f3be357-f971-4dd1-bb7e-82098aaad7b4","Type":"ContainerStarted","Data":"a30485138cfb8c9b2726ab91776570e5e23c364734bd464c5aff61d44d51e521"} Nov 28 10:13:43 crc kubenswrapper[4838]: I1128 10:13:43.012790 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-pqwfn" event={"ID":"431c4d08-781a-4925-96cd-153997f72239","Type":"ContainerStarted","Data":"312e0ae60afc7fb9787b617dda61dbf0db1832a424754ac90a030ec5141657f0"} Nov 28 10:13:43 crc kubenswrapper[4838]: E1128 10:13:43.013236 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/telemetry-operator@sha256:7d66757c0af67104f0389e851a7cc0daa44443ad202d157417bd86bbb57cc385\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-2xswp" podUID="5f3be357-f971-4dd1-bb7e-82098aaad7b4" Nov 28 10:13:43 crc kubenswrapper[4838]: E1128 10:13:43.019760 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/watcher-operator@sha256:6bed55b172b9ee8ccc3952cbfc543d8bd44e2690f6db94348a754152fd78f4cf\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-pqwfn" podUID="431c4d08-781a-4925-96cd-153997f72239" Nov 28 10:13:43 crc kubenswrapper[4838]: I1128 10:13:43.024780 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-z8m7s" event={"ID":"458f4354-42e8-46d1-a571-0a0d1a852574","Type":"ContainerStarted","Data":"dd6023b068210f72be7c9f7c4192fb1a2646220934eae76bb7516469e517e953"} Nov 28 10:13:43 crc kubenswrapper[4838]: E1128 10:13:43.027888 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/cinder-operator@sha256:ca332e48d07f932e470177e48dba9332848a1d14c857cff6f9bfb1adc1998482\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-z8m7s" podUID="458f4354-42e8-46d1-a571-0a0d1a852574" Nov 28 10:13:43 crc kubenswrapper[4838]: I1128 10:13:43.030644 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-xj7cz" event={"ID":"6979c02b-5bc1-4eec-aa05-086f449ffd93","Type":"ContainerStarted","Data":"2536f135a60a6425e056022501ea7825c0fd196cc17cb0df4869e77a50877707"} Nov 28 10:13:43 crc kubenswrapper[4838]: E1128 10:13:43.033663 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/nova-operator@sha256:c053e34316044f14929e16e4f0d97f9f1b24cb68b5e22b925ca74c66aaaed0a7\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-zrl6r" podUID="90e6e6d2-fd36-40e9-9002-d3a5e4c53f4e" Nov 28 10:13:43 crc kubenswrapper[4838]: E1128 10:13:43.033730 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2\\\"\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-xj7cz" podUID="6979c02b-5bc1-4eec-aa05-086f449ffd93" Nov 28 10:13:43 crc kubenswrapper[4838]: E1128 10:13:43.038412 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/placement-operator@sha256:225958f250a1075b69439d776a13acc45c78695c21abda23600fb53ca1640423\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-t8tj5" podUID="f74a2c89-ae8d-428e-b8b2-d2d58e943f8e" Nov 28 10:13:44 crc kubenswrapper[4838]: E1128 10:13:44.060110 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2\\\"\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-xj7cz" podUID="6979c02b-5bc1-4eec-aa05-086f449ffd93" Nov 28 10:13:44 crc kubenswrapper[4838]: E1128 10:13:44.060668 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/watcher-operator@sha256:6bed55b172b9ee8ccc3952cbfc543d8bd44e2690f6db94348a754152fd78f4cf\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-pqwfn" podUID="431c4d08-781a-4925-96cd-153997f72239" Nov 28 10:13:44 crc kubenswrapper[4838]: E1128 10:13:44.060911 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/cinder-operator@sha256:ca332e48d07f932e470177e48dba9332848a1d14c857cff6f9bfb1adc1998482\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-z8m7s" podUID="458f4354-42e8-46d1-a571-0a0d1a852574" Nov 28 10:13:44 crc kubenswrapper[4838]: E1128 10:13:44.060974 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/telemetry-operator@sha256:7d66757c0af67104f0389e851a7cc0daa44443ad202d157417bd86bbb57cc385\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-2xswp" podUID="5f3be357-f971-4dd1-bb7e-82098aaad7b4" Nov 28 10:13:44 crc kubenswrapper[4838]: I1128 10:13:44.088256 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/64a7b90b-6294-429b-b7f8-7820d9a5514e-cert\") pod \"infra-operator-controller-manager-57548d458d-7qwxs\" (UID: \"64a7b90b-6294-429b-b7f8-7820d9a5514e\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-7qwxs" Nov 28 10:13:44 crc kubenswrapper[4838]: E1128 10:13:44.088434 4838 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Nov 28 10:13:44 crc kubenswrapper[4838]: E1128 10:13:44.088489 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/64a7b90b-6294-429b-b7f8-7820d9a5514e-cert podName:64a7b90b-6294-429b-b7f8-7820d9a5514e nodeName:}" failed. No retries permitted until 2025-11-28 10:13:48.08847274 +0000 UTC m=+999.787446910 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/64a7b90b-6294-429b-b7f8-7820d9a5514e-cert") pod "infra-operator-controller-manager-57548d458d-7qwxs" (UID: "64a7b90b-6294-429b-b7f8-7820d9a5514e") : secret "infra-operator-webhook-server-cert" not found Nov 28 10:13:44 crc kubenswrapper[4838]: I1128 10:13:44.396479 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/dd6ab766-6c66-4d3e-8089-9fe2faf6a28a-cert\") pod \"openstack-baremetal-operator-controller-manager-5fcdb54b6bpf8p6\" (UID: \"dd6ab766-6c66-4d3e-8089-9fe2faf6a28a\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6bpf8p6" Nov 28 10:13:44 crc kubenswrapper[4838]: E1128 10:13:44.396888 4838 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 28 10:13:44 crc kubenswrapper[4838]: E1128 10:13:44.396998 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/dd6ab766-6c66-4d3e-8089-9fe2faf6a28a-cert podName:dd6ab766-6c66-4d3e-8089-9fe2faf6a28a nodeName:}" failed. No retries permitted until 2025-11-28 10:13:48.396930051 +0000 UTC m=+1000.095904221 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/dd6ab766-6c66-4d3e-8089-9fe2faf6a28a-cert") pod "openstack-baremetal-operator-controller-manager-5fcdb54b6bpf8p6" (UID: "dd6ab766-6c66-4d3e-8089-9fe2faf6a28a") : secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 28 10:13:44 crc kubenswrapper[4838]: I1128 10:13:44.800675 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/397a8728-e2ae-4db0-8446-9044007df4e1-webhook-certs\") pod \"openstack-operator-controller-manager-8599fd59b5-m2r97\" (UID: \"397a8728-e2ae-4db0-8446-9044007df4e1\") " pod="openstack-operators/openstack-operator-controller-manager-8599fd59b5-m2r97" Nov 28 10:13:44 crc kubenswrapper[4838]: E1128 10:13:44.800814 4838 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Nov 28 10:13:44 crc kubenswrapper[4838]: E1128 10:13:44.800944 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/397a8728-e2ae-4db0-8446-9044007df4e1-webhook-certs podName:397a8728-e2ae-4db0-8446-9044007df4e1 nodeName:}" failed. No retries permitted until 2025-11-28 10:13:48.800930256 +0000 UTC m=+1000.499904426 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/397a8728-e2ae-4db0-8446-9044007df4e1-webhook-certs") pod "openstack-operator-controller-manager-8599fd59b5-m2r97" (UID: "397a8728-e2ae-4db0-8446-9044007df4e1") : secret "webhook-server-cert" not found Nov 28 10:13:44 crc kubenswrapper[4838]: I1128 10:13:44.801302 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/397a8728-e2ae-4db0-8446-9044007df4e1-metrics-certs\") pod \"openstack-operator-controller-manager-8599fd59b5-m2r97\" (UID: \"397a8728-e2ae-4db0-8446-9044007df4e1\") " pod="openstack-operators/openstack-operator-controller-manager-8599fd59b5-m2r97" Nov 28 10:13:44 crc kubenswrapper[4838]: E1128 10:13:44.801396 4838 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Nov 28 10:13:44 crc kubenswrapper[4838]: E1128 10:13:44.801425 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/397a8728-e2ae-4db0-8446-9044007df4e1-metrics-certs podName:397a8728-e2ae-4db0-8446-9044007df4e1 nodeName:}" failed. No retries permitted until 2025-11-28 10:13:48.801417989 +0000 UTC m=+1000.500392159 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/397a8728-e2ae-4db0-8446-9044007df4e1-metrics-certs") pod "openstack-operator-controller-manager-8599fd59b5-m2r97" (UID: "397a8728-e2ae-4db0-8446-9044007df4e1") : secret "metrics-server-cert" not found Nov 28 10:13:48 crc kubenswrapper[4838]: I1128 10:13:48.152942 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/64a7b90b-6294-429b-b7f8-7820d9a5514e-cert\") pod \"infra-operator-controller-manager-57548d458d-7qwxs\" (UID: \"64a7b90b-6294-429b-b7f8-7820d9a5514e\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-7qwxs" Nov 28 10:13:48 crc kubenswrapper[4838]: E1128 10:13:48.153151 4838 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Nov 28 10:13:48 crc kubenswrapper[4838]: E1128 10:13:48.153242 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/64a7b90b-6294-429b-b7f8-7820d9a5514e-cert podName:64a7b90b-6294-429b-b7f8-7820d9a5514e nodeName:}" failed. No retries permitted until 2025-11-28 10:13:56.153218532 +0000 UTC m=+1007.852192722 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/64a7b90b-6294-429b-b7f8-7820d9a5514e-cert") pod "infra-operator-controller-manager-57548d458d-7qwxs" (UID: "64a7b90b-6294-429b-b7f8-7820d9a5514e") : secret "infra-operator-webhook-server-cert" not found Nov 28 10:13:48 crc kubenswrapper[4838]: I1128 10:13:48.457426 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/dd6ab766-6c66-4d3e-8089-9fe2faf6a28a-cert\") pod \"openstack-baremetal-operator-controller-manager-5fcdb54b6bpf8p6\" (UID: \"dd6ab766-6c66-4d3e-8089-9fe2faf6a28a\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6bpf8p6" Nov 28 10:13:48 crc kubenswrapper[4838]: E1128 10:13:48.457610 4838 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 28 10:13:48 crc kubenswrapper[4838]: E1128 10:13:48.457713 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/dd6ab766-6c66-4d3e-8089-9fe2faf6a28a-cert podName:dd6ab766-6c66-4d3e-8089-9fe2faf6a28a nodeName:}" failed. No retries permitted until 2025-11-28 10:13:56.457687225 +0000 UTC m=+1008.156661395 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/dd6ab766-6c66-4d3e-8089-9fe2faf6a28a-cert") pod "openstack-baremetal-operator-controller-manager-5fcdb54b6bpf8p6" (UID: "dd6ab766-6c66-4d3e-8089-9fe2faf6a28a") : secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 28 10:13:48 crc kubenswrapper[4838]: I1128 10:13:48.874317 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/397a8728-e2ae-4db0-8446-9044007df4e1-metrics-certs\") pod \"openstack-operator-controller-manager-8599fd59b5-m2r97\" (UID: \"397a8728-e2ae-4db0-8446-9044007df4e1\") " pod="openstack-operators/openstack-operator-controller-manager-8599fd59b5-m2r97" Nov 28 10:13:48 crc kubenswrapper[4838]: I1128 10:13:48.874879 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/397a8728-e2ae-4db0-8446-9044007df4e1-webhook-certs\") pod \"openstack-operator-controller-manager-8599fd59b5-m2r97\" (UID: \"397a8728-e2ae-4db0-8446-9044007df4e1\") " pod="openstack-operators/openstack-operator-controller-manager-8599fd59b5-m2r97" Nov 28 10:13:48 crc kubenswrapper[4838]: E1128 10:13:48.875297 4838 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Nov 28 10:13:48 crc kubenswrapper[4838]: E1128 10:13:48.875408 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/397a8728-e2ae-4db0-8446-9044007df4e1-webhook-certs podName:397a8728-e2ae-4db0-8446-9044007df4e1 nodeName:}" failed. No retries permitted until 2025-11-28 10:13:56.875382519 +0000 UTC m=+1008.574356689 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/397a8728-e2ae-4db0-8446-9044007df4e1-webhook-certs") pod "openstack-operator-controller-manager-8599fd59b5-m2r97" (UID: "397a8728-e2ae-4db0-8446-9044007df4e1") : secret "webhook-server-cert" not found Nov 28 10:13:48 crc kubenswrapper[4838]: E1128 10:13:48.876149 4838 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Nov 28 10:13:48 crc kubenswrapper[4838]: E1128 10:13:48.876256 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/397a8728-e2ae-4db0-8446-9044007df4e1-metrics-certs podName:397a8728-e2ae-4db0-8446-9044007df4e1 nodeName:}" failed. No retries permitted until 2025-11-28 10:13:56.876230183 +0000 UTC m=+1008.575204363 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/397a8728-e2ae-4db0-8446-9044007df4e1-metrics-certs") pod "openstack-operator-controller-manager-8599fd59b5-m2r97" (UID: "397a8728-e2ae-4db0-8446-9044007df4e1") : secret "metrics-server-cert" not found Nov 28 10:13:54 crc kubenswrapper[4838]: E1128 10:13:54.723944 4838 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-rcqcv,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod heat-operator-controller-manager-5b77f656f-mfvxw_openstack-operators(bfc8796f-9498-4707-ae79-225de0c3d39f): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 28 10:13:54 crc kubenswrapper[4838]: E1128 10:13:54.725997 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-mfvxw" podUID="bfc8796f-9498-4707-ae79-225de0c3d39f" Nov 28 10:13:54 crc kubenswrapper[4838]: E1128 10:13:54.741932 4838 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-zhssf,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod swift-operator-controller-manager-d77b94747-2mxtr_openstack-operators(fb9e8fa1-8798-424a-a435-daae465a8e79): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 28 10:13:54 crc kubenswrapper[4838]: E1128 10:13:54.741976 4838 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-srwxm,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod mariadb-operator-controller-manager-66f4dd4bc7-lppm4_openstack-operators(aef566ed-5e8f-4ce9-9fa4-75bfef26a65e): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 28 10:13:54 crc kubenswrapper[4838]: E1128 10:13:54.745172 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/swift-operator-controller-manager-d77b94747-2mxtr" podUID="fb9e8fa1-8798-424a-a435-daae465a8e79" Nov 28 10:13:54 crc kubenswrapper[4838]: E1128 10:13:54.745194 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-lppm4" podUID="aef566ed-5e8f-4ce9-9fa4-75bfef26a65e" Nov 28 10:13:54 crc kubenswrapper[4838]: E1128 10:13:54.754620 4838 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-5646f,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod neutron-operator-controller-manager-6fdcddb789-nw7wx_openstack-operators(03bb7fb2-31ae-4e18-b77d-e6dad8007460): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 28 10:13:54 crc kubenswrapper[4838]: E1128 10:13:54.756480 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-nw7wx" podUID="03bb7fb2-31ae-4e18-b77d-e6dad8007460" Nov 28 10:13:55 crc kubenswrapper[4838]: I1128 10:13:55.159158 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-jrtsw" event={"ID":"7b19f93b-ae7d-4e10-acce-53f0c65bbce0","Type":"ContainerStarted","Data":"9553787df9877dcd5be6436a7a676cfbff5a423caf7937810af67c2c7fd585af"} Nov 28 10:13:55 crc kubenswrapper[4838]: I1128 10:13:55.171138 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-zsv5q" event={"ID":"2d42b4ea-468b-482a-8d06-57d2cd7d40f0","Type":"ContainerStarted","Data":"8be85f22c8236a186a8aae7e234be566e4a2b2e1069628374391acda5eb84532"} Nov 28 10:13:55 crc kubenswrapper[4838]: I1128 10:13:55.184155 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-99pnl" event={"ID":"f79a2f03-43b3-47d0-89f8-55374a730a22","Type":"ContainerStarted","Data":"d6ecccbac1dffc8080653cf9046041dc62757fa1be59713185c7f6a7ef9cd783"} Nov 28 10:13:55 crc kubenswrapper[4838]: I1128 10:13:55.188300 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-dldwc" event={"ID":"00ae7528-ac6c-4ceb-9e8f-80e588aced3d","Type":"ContainerStarted","Data":"5a1311b8963141d064f4d4593f32ebe76e115a006f3a81079d883d0a5697a864"} Nov 28 10:13:55 crc kubenswrapper[4838]: I1128 10:13:55.202966 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-67764766d7-5dcgh" event={"ID":"d8596d98-979f-4b13-bef4-ccaabbcf155e","Type":"ContainerStarted","Data":"8c166b73678fcf6540a6acee382eede6cb4addd43a4f4f703137ab6b545ed3ac"} Nov 28 10:13:55 crc kubenswrapper[4838]: I1128 10:13:55.214586 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-sgt4v" event={"ID":"20706334-3560-47c0-beee-0eacda6e2eeb","Type":"ContainerStarted","Data":"dcc38a6a9f361eed710b7ee79d4c86a6c2c6c44ba6002d7c5d315675180c6381"} Nov 28 10:13:55 crc kubenswrapper[4838]: I1128 10:13:55.230060 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-tjbnw" event={"ID":"40466f6c-03c1-4aa4-9d03-947168f4068c","Type":"ContainerStarted","Data":"5ee3e3927f47ba4c5c26d4effdf71060a181e1ed74ef9726c19db31f55643631"} Nov 28 10:13:55 crc kubenswrapper[4838]: I1128 10:13:55.253469 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-955677c94-jtm69" event={"ID":"aceca209-5955-4644-a139-2dfc5d36bb48","Type":"ContainerStarted","Data":"ea4d6d4c56b5ca11e56c472d76d37070cdff18cced850f16a8b9aa03807e5aea"} Nov 28 10:13:55 crc kubenswrapper[4838]: I1128 10:13:55.255028 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-mfvxw" event={"ID":"bfc8796f-9498-4707-ae79-225de0c3d39f","Type":"ContainerStarted","Data":"f961f536019bbc6514d8734a3ea6ab16d8148c7edbf673d615424a9ad7512152"} Nov 28 10:13:55 crc kubenswrapper[4838]: I1128 10:13:55.255966 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-mfvxw" Nov 28 10:13:55 crc kubenswrapper[4838]: E1128 10:13:55.258174 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-mfvxw" podUID="bfc8796f-9498-4707-ae79-225de0c3d39f" Nov 28 10:13:55 crc kubenswrapper[4838]: I1128 10:13:55.258320 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-lppm4" event={"ID":"aef566ed-5e8f-4ce9-9fa4-75bfef26a65e","Type":"ContainerStarted","Data":"d41cc360057daf9bcaeffc3c094051eb4d20f77bc34d8a05aaafa6a48f694e8a"} Nov 28 10:13:55 crc kubenswrapper[4838]: I1128 10:13:55.258979 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-lppm4" Nov 28 10:13:55 crc kubenswrapper[4838]: E1128 10:13:55.277030 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-lppm4" podUID="aef566ed-5e8f-4ce9-9fa4-75bfef26a65e" Nov 28 10:13:55 crc kubenswrapper[4838]: I1128 10:13:55.303457 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-d77b94747-2mxtr" event={"ID":"fb9e8fa1-8798-424a-a435-daae465a8e79","Type":"ContainerStarted","Data":"dd4ec97e1ee34f0873c0945aab2e0e2fcb5f7c6ff68123b979664f827d6dc639"} Nov 28 10:13:55 crc kubenswrapper[4838]: I1128 10:13:55.304097 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/swift-operator-controller-manager-d77b94747-2mxtr" Nov 28 10:13:55 crc kubenswrapper[4838]: E1128 10:13:55.309861 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"" pod="openstack-operators/swift-operator-controller-manager-d77b94747-2mxtr" podUID="fb9e8fa1-8798-424a-a435-daae465a8e79" Nov 28 10:13:55 crc kubenswrapper[4838]: I1128 10:13:55.342618 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-qt6hp" event={"ID":"f9825746-5143-4716-9458-aad44231b721","Type":"ContainerStarted","Data":"1d6fd3e5b1f7bb9f4611fa8352511d0b0c722956e3d60d020bf9b0df2b97d0d5"} Nov 28 10:13:55 crc kubenswrapper[4838]: I1128 10:13:55.344382 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-nw7wx" event={"ID":"03bb7fb2-31ae-4e18-b77d-e6dad8007460","Type":"ContainerStarted","Data":"c3a3f4bc0510f598e27e1879dde93e567067d2105914f502f157ab165a2247d7"} Nov 28 10:13:55 crc kubenswrapper[4838]: I1128 10:13:55.345068 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-nw7wx" Nov 28 10:13:55 crc kubenswrapper[4838]: E1128 10:13:55.350997 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-nw7wx" podUID="03bb7fb2-31ae-4e18-b77d-e6dad8007460" Nov 28 10:13:55 crc kubenswrapper[4838]: I1128 10:13:55.377635 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-jk9v2" event={"ID":"eb245c1e-92f6-486e-be63-0093a22ed7b0","Type":"ContainerStarted","Data":"92be1d00b00a05ec6c7d7dd59684f47c0d513b2ae9f7df46589c87ed115ba63c"} Nov 28 10:13:56 crc kubenswrapper[4838]: I1128 10:13:56.180222 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/64a7b90b-6294-429b-b7f8-7820d9a5514e-cert\") pod \"infra-operator-controller-manager-57548d458d-7qwxs\" (UID: \"64a7b90b-6294-429b-b7f8-7820d9a5514e\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-7qwxs" Nov 28 10:13:56 crc kubenswrapper[4838]: I1128 10:13:56.209882 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/64a7b90b-6294-429b-b7f8-7820d9a5514e-cert\") pod \"infra-operator-controller-manager-57548d458d-7qwxs\" (UID: \"64a7b90b-6294-429b-b7f8-7820d9a5514e\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-7qwxs" Nov 28 10:13:56 crc kubenswrapper[4838]: I1128 10:13:56.288640 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-57548d458d-7qwxs" Nov 28 10:13:56 crc kubenswrapper[4838]: E1128 10:13:56.391536 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-mfvxw" podUID="bfc8796f-9498-4707-ae79-225de0c3d39f" Nov 28 10:13:56 crc kubenswrapper[4838]: E1128 10:13:56.391833 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-nw7wx" podUID="03bb7fb2-31ae-4e18-b77d-e6dad8007460" Nov 28 10:13:56 crc kubenswrapper[4838]: E1128 10:13:56.392567 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"" pod="openstack-operators/swift-operator-controller-manager-d77b94747-2mxtr" podUID="fb9e8fa1-8798-424a-a435-daae465a8e79" Nov 28 10:13:56 crc kubenswrapper[4838]: E1128 10:13:56.392643 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-lppm4" podUID="aef566ed-5e8f-4ce9-9fa4-75bfef26a65e" Nov 28 10:13:56 crc kubenswrapper[4838]: I1128 10:13:56.487495 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/dd6ab766-6c66-4d3e-8089-9fe2faf6a28a-cert\") pod \"openstack-baremetal-operator-controller-manager-5fcdb54b6bpf8p6\" (UID: \"dd6ab766-6c66-4d3e-8089-9fe2faf6a28a\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6bpf8p6" Nov 28 10:13:56 crc kubenswrapper[4838]: I1128 10:13:56.492326 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/dd6ab766-6c66-4d3e-8089-9fe2faf6a28a-cert\") pod \"openstack-baremetal-operator-controller-manager-5fcdb54b6bpf8p6\" (UID: \"dd6ab766-6c66-4d3e-8089-9fe2faf6a28a\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6bpf8p6" Nov 28 10:13:56 crc kubenswrapper[4838]: I1128 10:13:56.577729 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6bpf8p6" Nov 28 10:13:56 crc kubenswrapper[4838]: I1128 10:13:56.894578 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/397a8728-e2ae-4db0-8446-9044007df4e1-metrics-certs\") pod \"openstack-operator-controller-manager-8599fd59b5-m2r97\" (UID: \"397a8728-e2ae-4db0-8446-9044007df4e1\") " pod="openstack-operators/openstack-operator-controller-manager-8599fd59b5-m2r97" Nov 28 10:13:56 crc kubenswrapper[4838]: I1128 10:13:56.894631 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/397a8728-e2ae-4db0-8446-9044007df4e1-webhook-certs\") pod \"openstack-operator-controller-manager-8599fd59b5-m2r97\" (UID: \"397a8728-e2ae-4db0-8446-9044007df4e1\") " pod="openstack-operators/openstack-operator-controller-manager-8599fd59b5-m2r97" Nov 28 10:13:56 crc kubenswrapper[4838]: I1128 10:13:56.898487 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/397a8728-e2ae-4db0-8446-9044007df4e1-metrics-certs\") pod \"openstack-operator-controller-manager-8599fd59b5-m2r97\" (UID: \"397a8728-e2ae-4db0-8446-9044007df4e1\") " pod="openstack-operators/openstack-operator-controller-manager-8599fd59b5-m2r97" Nov 28 10:13:56 crc kubenswrapper[4838]: I1128 10:13:56.911867 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/397a8728-e2ae-4db0-8446-9044007df4e1-webhook-certs\") pod \"openstack-operator-controller-manager-8599fd59b5-m2r97\" (UID: \"397a8728-e2ae-4db0-8446-9044007df4e1\") " pod="openstack-operators/openstack-operator-controller-manager-8599fd59b5-m2r97" Nov 28 10:13:57 crc kubenswrapper[4838]: I1128 10:13:57.013991 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-manager-8599fd59b5-m2r97" Nov 28 10:13:57 crc kubenswrapper[4838]: I1128 10:13:57.460429 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-57548d458d-7qwxs"] Nov 28 10:13:57 crc kubenswrapper[4838]: W1128 10:13:57.482146 4838 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod64a7b90b_6294_429b_b7f8_7820d9a5514e.slice/crio-2918ae7a32c106f1ca3292891208ba73413cf0410a1e0bfdc8d97f3f020355a7 WatchSource:0}: Error finding container 2918ae7a32c106f1ca3292891208ba73413cf0410a1e0bfdc8d97f3f020355a7: Status 404 returned error can't find the container with id 2918ae7a32c106f1ca3292891208ba73413cf0410a1e0bfdc8d97f3f020355a7 Nov 28 10:13:57 crc kubenswrapper[4838]: I1128 10:13:57.533321 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6bpf8p6"] Nov 28 10:13:57 crc kubenswrapper[4838]: W1128 10:13:57.597661 4838 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poddd6ab766_6c66_4d3e_8089_9fe2faf6a28a.slice/crio-a1cc727a45338985113d95b532a84b655d6406b3e563b40ad1246b6728ff88f4 WatchSource:0}: Error finding container a1cc727a45338985113d95b532a84b655d6406b3e563b40ad1246b6728ff88f4: Status 404 returned error can't find the container with id a1cc727a45338985113d95b532a84b655d6406b3e563b40ad1246b6728ff88f4 Nov 28 10:13:58 crc kubenswrapper[4838]: I1128 10:13:58.415888 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-57548d458d-7qwxs" event={"ID":"64a7b90b-6294-429b-b7f8-7820d9a5514e","Type":"ContainerStarted","Data":"2918ae7a32c106f1ca3292891208ba73413cf0410a1e0bfdc8d97f3f020355a7"} Nov 28 10:13:58 crc kubenswrapper[4838]: I1128 10:13:58.418293 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6bpf8p6" event={"ID":"dd6ab766-6c66-4d3e-8089-9fe2faf6a28a","Type":"ContainerStarted","Data":"a1cc727a45338985113d95b532a84b655d6406b3e563b40ad1246b6728ff88f4"} Nov 28 10:13:58 crc kubenswrapper[4838]: I1128 10:13:58.943523 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-manager-8599fd59b5-m2r97"] Nov 28 10:14:00 crc kubenswrapper[4838]: W1128 10:14:00.299457 4838 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod397a8728_e2ae_4db0_8446_9044007df4e1.slice/crio-f4bb0d1010f2eff88c02ce10747cf998dcd1b7ff8bc343b44c5475b5058382e4 WatchSource:0}: Error finding container f4bb0d1010f2eff88c02ce10747cf998dcd1b7ff8bc343b44c5475b5058382e4: Status 404 returned error can't find the container with id f4bb0d1010f2eff88c02ce10747cf998dcd1b7ff8bc343b44c5475b5058382e4 Nov 28 10:14:00 crc kubenswrapper[4838]: I1128 10:14:00.430962 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-8599fd59b5-m2r97" event={"ID":"397a8728-e2ae-4db0-8446-9044007df4e1","Type":"ContainerStarted","Data":"f4bb0d1010f2eff88c02ce10747cf998dcd1b7ff8bc343b44c5475b5058382e4"} Nov 28 10:14:00 crc kubenswrapper[4838]: I1128 10:14:00.630160 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-mfvxw" Nov 28 10:14:00 crc kubenswrapper[4838]: I1128 10:14:00.800776 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-lppm4" Nov 28 10:14:00 crc kubenswrapper[4838]: I1128 10:14:00.906039 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-nw7wx" Nov 28 10:14:01 crc kubenswrapper[4838]: I1128 10:14:01.038686 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/swift-operator-controller-manager-d77b94747-2mxtr" Nov 28 10:14:07 crc kubenswrapper[4838]: I1128 10:14:07.483454 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-8599fd59b5-m2r97" event={"ID":"397a8728-e2ae-4db0-8446-9044007df4e1","Type":"ContainerStarted","Data":"e074b96bea4ad74354d687d8af2625fc2486f718417ae13420f91e005793ec67"} Nov 28 10:14:07 crc kubenswrapper[4838]: I1128 10:14:07.484852 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-manager-8599fd59b5-m2r97" Nov 28 10:14:08 crc kubenswrapper[4838]: I1128 10:14:08.492728 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-955677c94-jtm69" event={"ID":"aceca209-5955-4644-a139-2dfc5d36bb48","Type":"ContainerStarted","Data":"899ecb4252247f950f20ad1de3358076ef6642de95fcad75916ec0059cd96e2d"} Nov 28 10:14:08 crc kubenswrapper[4838]: I1128 10:14:08.493532 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/designate-operator-controller-manager-955677c94-jtm69" Nov 28 10:14:08 crc kubenswrapper[4838]: I1128 10:14:08.494299 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-xj7cz" event={"ID":"6979c02b-5bc1-4eec-aa05-086f449ffd93","Type":"ContainerStarted","Data":"d4e563ffa5a3a8089d60d2208782f382d72ce4fadb9f08c38c61bcc4c0a69439"} Nov 28 10:14:08 crc kubenswrapper[4838]: I1128 10:14:08.495339 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/designate-operator-controller-manager-955677c94-jtm69" Nov 28 10:14:08 crc kubenswrapper[4838]: I1128 10:14:08.496557 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-2xswp" event={"ID":"5f3be357-f971-4dd1-bb7e-82098aaad7b4","Type":"ContainerStarted","Data":"4e993a4c9728ba129a9973cf823bc7b7ed630c7ebe8aad4c846083b47e9d75ce"} Nov 28 10:14:08 crc kubenswrapper[4838]: I1128 10:14:08.498748 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-99pnl" event={"ID":"f79a2f03-43b3-47d0-89f8-55374a730a22","Type":"ContainerStarted","Data":"5f1f4439383be60402877482001117bfe029b99463e2e8883a6cc9308233ed0f"} Nov 28 10:14:08 crc kubenswrapper[4838]: I1128 10:14:08.498982 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-99pnl" Nov 28 10:14:08 crc kubenswrapper[4838]: I1128 10:14:08.500359 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-jk9v2" event={"ID":"eb245c1e-92f6-486e-be63-0093a22ed7b0","Type":"ContainerStarted","Data":"3f036840dce8301091ce3f44ac9358092d4d16edf40777c51a12cfbe936481ef"} Nov 28 10:14:08 crc kubenswrapper[4838]: I1128 10:14:08.501277 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-jk9v2" Nov 28 10:14:08 crc kubenswrapper[4838]: I1128 10:14:08.501500 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-99pnl" Nov 28 10:14:08 crc kubenswrapper[4838]: I1128 10:14:08.502364 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-jk9v2" Nov 28 10:14:08 crc kubenswrapper[4838]: I1128 10:14:08.502848 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-z8m7s" event={"ID":"458f4354-42e8-46d1-a571-0a0d1a852574","Type":"ContainerStarted","Data":"73ca185fd3cb4444abb443cf6cf786aa0cca2a4256b65fd08d16d81d1d23110b"} Nov 28 10:14:08 crc kubenswrapper[4838]: I1128 10:14:08.503902 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-57548d458d-7qwxs" event={"ID":"64a7b90b-6294-429b-b7f8-7820d9a5514e","Type":"ContainerStarted","Data":"74595eefac4f9f016c8984a6067f72152bdf4fc0e67259a46c9f8cce3f587c3f"} Nov 28 10:14:08 crc kubenswrapper[4838]: I1128 10:14:08.504987 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6bpf8p6" event={"ID":"dd6ab766-6c66-4d3e-8089-9fe2faf6a28a","Type":"ContainerStarted","Data":"bc351bb3dc1832a7bbb5040bb45eccf1a64265fc86f674c20d5ef6503b7ecc75"} Nov 28 10:14:08 crc kubenswrapper[4838]: I1128 10:14:08.506310 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-lppm4" event={"ID":"aef566ed-5e8f-4ce9-9fa4-75bfef26a65e","Type":"ContainerStarted","Data":"95e16975b410aebdd2c77656aeb5ce4d2f52e57148e1de7a04816f23cb25d053"} Nov 28 10:14:08 crc kubenswrapper[4838]: I1128 10:14:08.507346 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-t8tj5" event={"ID":"f74a2c89-ae8d-428e-b8b2-d2d58e943f8e","Type":"ContainerStarted","Data":"ca2fdd5c3891d9fc7c4130f4fc175a0bbc4bde60c255d3eabde25278cfa9becb"} Nov 28 10:14:08 crc kubenswrapper[4838]: I1128 10:14:08.508189 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-zrl6r" event={"ID":"90e6e6d2-fd36-40e9-9002-d3a5e4c53f4e","Type":"ContainerStarted","Data":"9a180f4a6e0678a9707bd12ebaeb5081b6f39ddc4150bca9cad2ded744866586"} Nov 28 10:14:08 crc kubenswrapper[4838]: I1128 10:14:08.511145 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-jrtsw" event={"ID":"7b19f93b-ae7d-4e10-acce-53f0c65bbce0","Type":"ContainerStarted","Data":"c81af396959d532ce03140a721658912147bf476009a3cea9cfe52e0275f53e3"} Nov 28 10:14:08 crc kubenswrapper[4838]: I1128 10:14:08.511778 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-jrtsw" Nov 28 10:14:08 crc kubenswrapper[4838]: I1128 10:14:08.512629 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-controller-manager-8599fd59b5-m2r97" podStartSLOduration=28.512608683 podStartE2EDuration="28.512608683s" podCreationTimestamp="2025-11-28 10:13:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 10:14:07.519097223 +0000 UTC m=+1019.218071403" watchObservedRunningTime="2025-11-28 10:14:08.512608683 +0000 UTC m=+1020.211582873" Nov 28 10:14:08 crc kubenswrapper[4838]: I1128 10:14:08.514068 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-jrtsw" Nov 28 10:14:08 crc kubenswrapper[4838]: I1128 10:14:08.518739 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/designate-operator-controller-manager-955677c94-jtm69" podStartSLOduration=2.625555931 podStartE2EDuration="28.518704077s" podCreationTimestamp="2025-11-28 10:13:40 +0000 UTC" firstStartedPulling="2025-11-28 10:13:41.126225193 +0000 UTC m=+992.825199363" lastFinishedPulling="2025-11-28 10:14:07.019373349 +0000 UTC m=+1018.718347509" observedRunningTime="2025-11-28 10:14:08.509103119 +0000 UTC m=+1020.208077309" watchObservedRunningTime="2025-11-28 10:14:08.518704077 +0000 UTC m=+1020.217678247" Nov 28 10:14:08 crc kubenswrapper[4838]: I1128 10:14:08.523037 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-lppm4" podStartSLOduration=15.962513888 podStartE2EDuration="28.523014264s" podCreationTimestamp="2025-11-28 10:13:40 +0000 UTC" firstStartedPulling="2025-11-28 10:13:41.666939202 +0000 UTC m=+993.365913372" lastFinishedPulling="2025-11-28 10:13:54.227439578 +0000 UTC m=+1005.926413748" observedRunningTime="2025-11-28 10:14:08.521810321 +0000 UTC m=+1020.220784541" watchObservedRunningTime="2025-11-28 10:14:08.523014264 +0000 UTC m=+1020.221988464" Nov 28 10:14:08 crc kubenswrapper[4838]: I1128 10:14:08.613037 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-99pnl" podStartSLOduration=3.003208248 podStartE2EDuration="28.613018439s" podCreationTimestamp="2025-11-28 10:13:40 +0000 UTC" firstStartedPulling="2025-11-28 10:13:41.409423144 +0000 UTC m=+993.108397314" lastFinishedPulling="2025-11-28 10:14:07.019233315 +0000 UTC m=+1018.718207505" observedRunningTime="2025-11-28 10:14:08.587025359 +0000 UTC m=+1020.285999529" watchObservedRunningTime="2025-11-28 10:14:08.613018439 +0000 UTC m=+1020.311992609" Nov 28 10:14:08 crc kubenswrapper[4838]: I1128 10:14:08.645326 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-jk9v2" podStartSLOduration=9.574440445 podStartE2EDuration="28.645306999s" podCreationTimestamp="2025-11-28 10:13:40 +0000 UTC" firstStartedPulling="2025-11-28 10:13:41.824797235 +0000 UTC m=+993.523771405" lastFinishedPulling="2025-11-28 10:14:00.895663779 +0000 UTC m=+1012.594637959" observedRunningTime="2025-11-28 10:14:08.611057445 +0000 UTC m=+1020.310031605" watchObservedRunningTime="2025-11-28 10:14:08.645306999 +0000 UTC m=+1020.344281169" Nov 28 10:14:08 crc kubenswrapper[4838]: I1128 10:14:08.665999 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-xj7cz" podStartSLOduration=8.378577958 podStartE2EDuration="27.665977776s" podCreationTimestamp="2025-11-28 10:13:41 +0000 UTC" firstStartedPulling="2025-11-28 10:13:42.079041856 +0000 UTC m=+993.778016026" lastFinishedPulling="2025-11-28 10:14:01.366441664 +0000 UTC m=+1013.065415844" observedRunningTime="2025-11-28 10:14:08.627285703 +0000 UTC m=+1020.326259873" watchObservedRunningTime="2025-11-28 10:14:08.665977776 +0000 UTC m=+1020.364951946" Nov 28 10:14:08 crc kubenswrapper[4838]: I1128 10:14:08.671567 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-jrtsw" podStartSLOduration=3.591437687 podStartE2EDuration="28.671547906s" podCreationTimestamp="2025-11-28 10:13:40 +0000 UTC" firstStartedPulling="2025-11-28 10:13:41.940708539 +0000 UTC m=+993.639682709" lastFinishedPulling="2025-11-28 10:14:07.020818758 +0000 UTC m=+1018.719792928" observedRunningTime="2025-11-28 10:14:08.653366615 +0000 UTC m=+1020.352340785" watchObservedRunningTime="2025-11-28 10:14:08.671547906 +0000 UTC m=+1020.370522076" Nov 28 10:14:15 crc kubenswrapper[4838]: I1128 10:14:15.582273 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-nw7wx" event={"ID":"03bb7fb2-31ae-4e18-b77d-e6dad8007460","Type":"ContainerStarted","Data":"d14e9a78eab2729cff8a632747607ab3a6ea561d9e5d17a3cba6578943d077eb"} Nov 28 10:14:15 crc kubenswrapper[4838]: I1128 10:14:15.584539 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-sgt4v" event={"ID":"20706334-3560-47c0-beee-0eacda6e2eeb","Type":"ContainerStarted","Data":"010b12094210fcbef60fcaf7a8f1bba0be775cc6df7ced348ffd072b1ec5d837"} Nov 28 10:14:16 crc kubenswrapper[4838]: I1128 10:14:16.592439 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-tjbnw" event={"ID":"40466f6c-03c1-4aa4-9d03-947168f4068c","Type":"ContainerStarted","Data":"78d815818a463526340f645ce044901a6ff3c2c334674763aea2239eeef845ca"} Nov 28 10:14:16 crc kubenswrapper[4838]: I1128 10:14:16.593822 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-mfvxw" event={"ID":"bfc8796f-9498-4707-ae79-225de0c3d39f","Type":"ContainerStarted","Data":"53bc500723ecc332be3bf4edf372e0983fa86db87063aef228bf623089984c08"} Nov 28 10:14:16 crc kubenswrapper[4838]: I1128 10:14:16.595347 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-d77b94747-2mxtr" event={"ID":"fb9e8fa1-8798-424a-a435-daae465a8e79","Type":"ContainerStarted","Data":"fde078bbac8fddfb46c5f4e17f68138720fb1936ce72bb3f2e1117e328eae530"} Nov 28 10:14:16 crc kubenswrapper[4838]: I1128 10:14:16.596978 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-dldwc" event={"ID":"00ae7528-ac6c-4ceb-9e8f-80e588aced3d","Type":"ContainerStarted","Data":"35de5537fb78d92af4853f13c267f68b4e5cc213312c0c4b2c992b50bf6c7d44"} Nov 28 10:14:16 crc kubenswrapper[4838]: I1128 10:14:16.598280 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-pqwfn" event={"ID":"431c4d08-781a-4925-96cd-153997f72239","Type":"ContainerStarted","Data":"b0f789df6d3848903def30869ed736a25967cca26b7275fe4bb6ccb9d6b46f23"} Nov 28 10:14:16 crc kubenswrapper[4838]: I1128 10:14:16.599731 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-t8tj5" event={"ID":"f74a2c89-ae8d-428e-b8b2-d2d58e943f8e","Type":"ContainerStarted","Data":"24a87a102b38f755074681bf86213330d64c294836b97a7b594a4ce0146e12f4"} Nov 28 10:14:16 crc kubenswrapper[4838]: I1128 10:14:16.601142 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-zrl6r" event={"ID":"90e6e6d2-fd36-40e9-9002-d3a5e4c53f4e","Type":"ContainerStarted","Data":"1d8ecc8aaac6686f84788e58d4fac5d2054f95e4b9fdea750706dbe6d4e7511a"} Nov 28 10:14:16 crc kubenswrapper[4838]: I1128 10:14:16.602661 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-qt6hp" event={"ID":"f9825746-5143-4716-9458-aad44231b721","Type":"ContainerStarted","Data":"852820457b4471c4ed88b6ba46da4ff6718835708cedb634d33c365c43477dc6"} Nov 28 10:14:16 crc kubenswrapper[4838]: I1128 10:14:16.603598 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-qt6hp" Nov 28 10:14:16 crc kubenswrapper[4838]: I1128 10:14:16.605107 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6bpf8p6" event={"ID":"dd6ab766-6c66-4d3e-8089-9fe2faf6a28a","Type":"ContainerStarted","Data":"d12a690843266d6d99e92c28ec7a7df0562b55aa80bb78b1148081df518ddc30"} Nov 28 10:14:16 crc kubenswrapper[4838]: I1128 10:14:16.605401 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-qt6hp" Nov 28 10:14:16 crc kubenswrapper[4838]: I1128 10:14:16.607235 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-z8m7s" event={"ID":"458f4354-42e8-46d1-a571-0a0d1a852574","Type":"ContainerStarted","Data":"f60150896e85d7e237e46562536b3f96a73211c221c2b4e02884b6b407bb0751"} Nov 28 10:14:16 crc kubenswrapper[4838]: I1128 10:14:16.608850 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-67764766d7-5dcgh" event={"ID":"d8596d98-979f-4b13-bef4-ccaabbcf155e","Type":"ContainerStarted","Data":"e5a9461270f6a06d58d46107bdf01dab880bde472b95186a1b684a2296e3e1a0"} Nov 28 10:14:16 crc kubenswrapper[4838]: I1128 10:14:16.610843 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-zsv5q" event={"ID":"2d42b4ea-468b-482a-8d06-57d2cd7d40f0","Type":"ContainerStarted","Data":"5f508d2fad0c981abfdae45037bcc31647ba12018e954140399116cefc276ec0"} Nov 28 10:14:16 crc kubenswrapper[4838]: I1128 10:14:16.611367 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-zsv5q" Nov 28 10:14:16 crc kubenswrapper[4838]: I1128 10:14:16.612636 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-2xswp" event={"ID":"5f3be357-f971-4dd1-bb7e-82098aaad7b4","Type":"ContainerStarted","Data":"7d7dbe911ac41567fb65fd29a5868dc6b3d317f8850dfcefa778e951e3dbc930"} Nov 28 10:14:16 crc kubenswrapper[4838]: I1128 10:14:16.615960 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/swift-operator-controller-manager-d77b94747-2mxtr" podStartSLOduration=24.305847464 podStartE2EDuration="36.615944453s" podCreationTimestamp="2025-11-28 10:13:40 +0000 UTC" firstStartedPulling="2025-11-28 10:13:41.942169718 +0000 UTC m=+993.641143898" lastFinishedPulling="2025-11-28 10:13:54.252266717 +0000 UTC m=+1005.951240887" observedRunningTime="2025-11-28 10:14:16.612829219 +0000 UTC m=+1028.311803389" watchObservedRunningTime="2025-11-28 10:14:16.615944453 +0000 UTC m=+1028.314918623" Nov 28 10:14:16 crc kubenswrapper[4838]: I1128 10:14:16.616601 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-57548d458d-7qwxs" event={"ID":"64a7b90b-6294-429b-b7f8-7820d9a5514e","Type":"ContainerStarted","Data":"b1d947dad4112bbeeda9b063b717ee6f3b16608f8cae66aa8e08747db5cfc16b"} Nov 28 10:14:16 crc kubenswrapper[4838]: I1128 10:14:16.617474 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-sgt4v" Nov 28 10:14:16 crc kubenswrapper[4838]: I1128 10:14:16.618428 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-sgt4v" Nov 28 10:14:16 crc kubenswrapper[4838]: I1128 10:14:16.619190 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-zsv5q" Nov 28 10:14:16 crc kubenswrapper[4838]: I1128 10:14:16.632947 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-qt6hp" podStartSLOduration=11.238229005 podStartE2EDuration="36.63293284s" podCreationTimestamp="2025-11-28 10:13:40 +0000 UTC" firstStartedPulling="2025-11-28 10:13:41.625485505 +0000 UTC m=+993.324459675" lastFinishedPulling="2025-11-28 10:14:07.02018934 +0000 UTC m=+1018.719163510" observedRunningTime="2025-11-28 10:14:16.62736774 +0000 UTC m=+1028.326341910" watchObservedRunningTime="2025-11-28 10:14:16.63293284 +0000 UTC m=+1028.331907010" Nov 28 10:14:16 crc kubenswrapper[4838]: I1128 10:14:16.671619 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-zsv5q" podStartSLOduration=10.868967737 podStartE2EDuration="36.671601553s" podCreationTimestamp="2025-11-28 10:13:40 +0000 UTC" firstStartedPulling="2025-11-28 10:13:41.222857567 +0000 UTC m=+992.921831737" lastFinishedPulling="2025-11-28 10:14:07.025491383 +0000 UTC m=+1018.724465553" observedRunningTime="2025-11-28 10:14:16.661919921 +0000 UTC m=+1028.360894091" watchObservedRunningTime="2025-11-28 10:14:16.671601553 +0000 UTC m=+1028.370575723" Nov 28 10:14:16 crc kubenswrapper[4838]: I1128 10:14:16.685633 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-sgt4v" podStartSLOduration=17.278793035 podStartE2EDuration="36.68562302s" podCreationTimestamp="2025-11-28 10:13:40 +0000 UTC" firstStartedPulling="2025-11-28 10:13:41.489143192 +0000 UTC m=+993.188117352" lastFinishedPulling="2025-11-28 10:14:00.895973137 +0000 UTC m=+1012.594947337" observedRunningTime="2025-11-28 10:14:16.682123605 +0000 UTC m=+1028.381097775" watchObservedRunningTime="2025-11-28 10:14:16.68562302 +0000 UTC m=+1028.384597190" Nov 28 10:14:16 crc kubenswrapper[4838]: I1128 10:14:16.741084 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-nw7wx" podStartSLOduration=24.337214509 podStartE2EDuration="36.741065473s" podCreationTimestamp="2025-11-28 10:13:40 +0000 UTC" firstStartedPulling="2025-11-28 10:13:41.805460675 +0000 UTC m=+993.504434845" lastFinishedPulling="2025-11-28 10:13:54.209311639 +0000 UTC m=+1005.908285809" observedRunningTime="2025-11-28 10:14:16.738218367 +0000 UTC m=+1028.437192547" watchObservedRunningTime="2025-11-28 10:14:16.741065473 +0000 UTC m=+1028.440039643" Nov 28 10:14:17 crc kubenswrapper[4838]: I1128 10:14:17.021662 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-manager-8599fd59b5-m2r97" Nov 28 10:14:17 crc kubenswrapper[4838]: I1128 10:14:17.626324 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-zrl6r" Nov 28 10:14:17 crc kubenswrapper[4838]: I1128 10:14:17.630712 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-zrl6r" Nov 28 10:14:17 crc kubenswrapper[4838]: I1128 10:14:17.647002 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-zrl6r" podStartSLOduration=12.500920758 podStartE2EDuration="37.646986484s" podCreationTimestamp="2025-11-28 10:13:40 +0000 UTC" firstStartedPulling="2025-11-28 10:13:41.963431741 +0000 UTC m=+993.662405911" lastFinishedPulling="2025-11-28 10:14:07.109497467 +0000 UTC m=+1018.808471637" observedRunningTime="2025-11-28 10:14:17.641616579 +0000 UTC m=+1029.340590749" watchObservedRunningTime="2025-11-28 10:14:17.646986484 +0000 UTC m=+1029.345960654" Nov 28 10:14:17 crc kubenswrapper[4838]: I1128 10:14:17.668750 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/manila-operator-controller-manager-67764766d7-5dcgh" podStartSLOduration=12.46942629 podStartE2EDuration="37.668706649s" podCreationTimestamp="2025-11-28 10:13:40 +0000 UTC" firstStartedPulling="2025-11-28 10:13:41.826737868 +0000 UTC m=+993.525712038" lastFinishedPulling="2025-11-28 10:14:07.026018217 +0000 UTC m=+1018.724992397" observedRunningTime="2025-11-28 10:14:17.660661372 +0000 UTC m=+1029.359635582" watchObservedRunningTime="2025-11-28 10:14:17.668706649 +0000 UTC m=+1029.367680829" Nov 28 10:14:17 crc kubenswrapper[4838]: I1128 10:14:17.705699 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-z8m7s" podStartSLOduration=12.571266563 podStartE2EDuration="37.705675195s" podCreationTimestamp="2025-11-28 10:13:40 +0000 UTC" firstStartedPulling="2025-11-28 10:13:41.977374587 +0000 UTC m=+993.676348747" lastFinishedPulling="2025-11-28 10:14:07.111783209 +0000 UTC m=+1018.810757379" observedRunningTime="2025-11-28 10:14:17.704139523 +0000 UTC m=+1029.403113713" watchObservedRunningTime="2025-11-28 10:14:17.705675195 +0000 UTC m=+1029.404649385" Nov 28 10:14:17 crc kubenswrapper[4838]: I1128 10:14:17.711663 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-tjbnw" podStartSLOduration=12.604124138 podStartE2EDuration="37.711637925s" podCreationTimestamp="2025-11-28 10:13:40 +0000 UTC" firstStartedPulling="2025-11-28 10:13:41.913463565 +0000 UTC m=+993.612437725" lastFinishedPulling="2025-11-28 10:14:07.020977332 +0000 UTC m=+1018.719951512" observedRunningTime="2025-11-28 10:14:17.68841534 +0000 UTC m=+1029.387389510" watchObservedRunningTime="2025-11-28 10:14:17.711637925 +0000 UTC m=+1029.410612115" Nov 28 10:14:17 crc kubenswrapper[4838]: I1128 10:14:17.748972 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-2xswp" podStartSLOduration=12.701379359 podStartE2EDuration="37.748953411s" podCreationTimestamp="2025-11-28 10:13:40 +0000 UTC" firstStartedPulling="2025-11-28 10:13:42.06729095 +0000 UTC m=+993.766265120" lastFinishedPulling="2025-11-28 10:14:07.114864972 +0000 UTC m=+1018.813839172" observedRunningTime="2025-11-28 10:14:17.736595118 +0000 UTC m=+1029.435569288" watchObservedRunningTime="2025-11-28 10:14:17.748953411 +0000 UTC m=+1029.447927581" Nov 28 10:14:17 crc kubenswrapper[4838]: I1128 10:14:17.768616 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/infra-operator-controller-manager-57548d458d-7qwxs" podStartSLOduration=28.233628566 podStartE2EDuration="37.76858854s" podCreationTimestamp="2025-11-28 10:13:40 +0000 UTC" firstStartedPulling="2025-11-28 10:13:57.485216666 +0000 UTC m=+1009.184190836" lastFinishedPulling="2025-11-28 10:14:07.02017663 +0000 UTC m=+1018.719150810" observedRunningTime="2025-11-28 10:14:17.761404297 +0000 UTC m=+1029.460378467" watchObservedRunningTime="2025-11-28 10:14:17.76858854 +0000 UTC m=+1029.467562750" Nov 28 10:14:17 crc kubenswrapper[4838]: I1128 10:14:17.791582 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-t8tj5" podStartSLOduration=12.622592176 podStartE2EDuration="37.791539408s" podCreationTimestamp="2025-11-28 10:13:40 +0000 UTC" firstStartedPulling="2025-11-28 10:13:41.945906069 +0000 UTC m=+993.644880249" lastFinishedPulling="2025-11-28 10:14:07.114853311 +0000 UTC m=+1018.813827481" observedRunningTime="2025-11-28 10:14:17.787051237 +0000 UTC m=+1029.486025417" watchObservedRunningTime="2025-11-28 10:14:17.791539408 +0000 UTC m=+1029.490513588" Nov 28 10:14:17 crc kubenswrapper[4838]: I1128 10:14:17.809189 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-mfvxw" podStartSLOduration=25.247035775 podStartE2EDuration="37.809170634s" podCreationTimestamp="2025-11-28 10:13:40 +0000 UTC" firstStartedPulling="2025-11-28 10:13:41.679638975 +0000 UTC m=+993.378613145" lastFinishedPulling="2025-11-28 10:13:54.241773834 +0000 UTC m=+1005.940748004" observedRunningTime="2025-11-28 10:14:17.802028861 +0000 UTC m=+1029.501003031" watchObservedRunningTime="2025-11-28 10:14:17.809170634 +0000 UTC m=+1029.508144804" Nov 28 10:14:17 crc kubenswrapper[4838]: I1128 10:14:17.821634 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-dldwc" podStartSLOduration=12.272375509 podStartE2EDuration="37.821612888s" podCreationTimestamp="2025-11-28 10:13:40 +0000 UTC" firstStartedPulling="2025-11-28 10:13:41.476773848 +0000 UTC m=+993.175748018" lastFinishedPulling="2025-11-28 10:14:07.026011227 +0000 UTC m=+1018.724985397" observedRunningTime="2025-11-28 10:14:17.818082443 +0000 UTC m=+1029.517056613" watchObservedRunningTime="2025-11-28 10:14:17.821612888 +0000 UTC m=+1029.520587068" Nov 28 10:14:17 crc kubenswrapper[4838]: I1128 10:14:17.853797 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6bpf8p6" podStartSLOduration=28.343093876 podStartE2EDuration="37.853776616s" podCreationTimestamp="2025-11-28 10:13:40 +0000 UTC" firstStartedPulling="2025-11-28 10:13:57.599916717 +0000 UTC m=+1009.298890887" lastFinishedPulling="2025-11-28 10:14:07.110599457 +0000 UTC m=+1018.809573627" observedRunningTime="2025-11-28 10:14:17.843340484 +0000 UTC m=+1029.542314674" watchObservedRunningTime="2025-11-28 10:14:17.853776616 +0000 UTC m=+1029.552750796" Nov 28 10:14:18 crc kubenswrapper[4838]: I1128 10:14:18.635313 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-pqwfn" event={"ID":"431c4d08-781a-4925-96cd-153997f72239","Type":"ContainerStarted","Data":"56bd2878aeba3c1ab705786f536f17017817e0bb21e4f887c3d592aab176f429"} Nov 28 10:14:19 crc kubenswrapper[4838]: I1128 10:14:19.646265 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-pqwfn" Nov 28 10:14:19 crc kubenswrapper[4838]: I1128 10:14:19.668490 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-pqwfn" podStartSLOduration=14.576101311 podStartE2EDuration="39.66847066s" podCreationTimestamp="2025-11-28 10:13:40 +0000 UTC" firstStartedPulling="2025-11-28 10:13:42.082906681 +0000 UTC m=+993.781880851" lastFinishedPulling="2025-11-28 10:14:07.17527602 +0000 UTC m=+1018.874250200" observedRunningTime="2025-11-28 10:14:19.664083323 +0000 UTC m=+1031.363057523" watchObservedRunningTime="2025-11-28 10:14:19.66847066 +0000 UTC m=+1031.367444830" Nov 28 10:14:20 crc kubenswrapper[4838]: I1128 10:14:20.703610 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-dldwc" Nov 28 10:14:20 crc kubenswrapper[4838]: I1128 10:14:20.706868 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-dldwc" Nov 28 10:14:20 crc kubenswrapper[4838]: I1128 10:14:20.884196 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/manila-operator-controller-manager-67764766d7-5dcgh" Nov 28 10:14:20 crc kubenswrapper[4838]: I1128 10:14:20.886927 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/manila-operator-controller-manager-67764766d7-5dcgh" Nov 28 10:14:20 crc kubenswrapper[4838]: I1128 10:14:20.981842 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-tjbnw" Nov 28 10:14:20 crc kubenswrapper[4838]: I1128 10:14:20.983969 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-tjbnw" Nov 28 10:14:21 crc kubenswrapper[4838]: I1128 10:14:21.057861 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-t8tj5" Nov 28 10:14:21 crc kubenswrapper[4838]: I1128 10:14:21.060596 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-t8tj5" Nov 28 10:14:21 crc kubenswrapper[4838]: I1128 10:14:21.088422 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-2xswp" Nov 28 10:14:21 crc kubenswrapper[4838]: I1128 10:14:21.091283 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-2xswp" Nov 28 10:14:21 crc kubenswrapper[4838]: I1128 10:14:21.246229 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-pqwfn" Nov 28 10:14:21 crc kubenswrapper[4838]: I1128 10:14:21.573340 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-z8m7s" Nov 28 10:14:21 crc kubenswrapper[4838]: I1128 10:14:21.575672 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-z8m7s" Nov 28 10:14:23 crc kubenswrapper[4838]: I1128 10:14:23.939996 4838 patch_prober.go:28] interesting pod/machine-config-daemon-5dxdd container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 10:14:23 crc kubenswrapper[4838]: I1128 10:14:23.940636 4838 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 10:14:26 crc kubenswrapper[4838]: I1128 10:14:26.289250 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/infra-operator-controller-manager-57548d458d-7qwxs" Nov 28 10:14:26 crc kubenswrapper[4838]: I1128 10:14:26.295562 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/infra-operator-controller-manager-57548d458d-7qwxs" Nov 28 10:14:26 crc kubenswrapper[4838]: I1128 10:14:26.578710 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6bpf8p6" Nov 28 10:14:26 crc kubenswrapper[4838]: I1128 10:14:26.584805 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6bpf8p6" Nov 28 10:14:43 crc kubenswrapper[4838]: I1128 10:14:43.173319 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-qrzr7"] Nov 28 10:14:43 crc kubenswrapper[4838]: I1128 10:14:43.177050 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-qrzr7" Nov 28 10:14:43 crc kubenswrapper[4838]: I1128 10:14:43.181942 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns" Nov 28 10:14:43 crc kubenswrapper[4838]: I1128 10:14:43.181990 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dnsmasq-dns-dockercfg-5gwgl" Nov 28 10:14:43 crc kubenswrapper[4838]: I1128 10:14:43.182265 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"kube-root-ca.crt" Nov 28 10:14:43 crc kubenswrapper[4838]: I1128 10:14:43.182817 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openshift-service-ca.crt" Nov 28 10:14:43 crc kubenswrapper[4838]: I1128 10:14:43.188525 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-qrzr7"] Nov 28 10:14:43 crc kubenswrapper[4838]: I1128 10:14:43.230461 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qkmrt\" (UniqueName: \"kubernetes.io/projected/5d83d658-68c9-4bad-a05b-221d397b88ec-kube-api-access-qkmrt\") pod \"dnsmasq-dns-675f4bcbfc-qrzr7\" (UID: \"5d83d658-68c9-4bad-a05b-221d397b88ec\") " pod="openstack/dnsmasq-dns-675f4bcbfc-qrzr7" Nov 28 10:14:43 crc kubenswrapper[4838]: I1128 10:14:43.231984 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5d83d658-68c9-4bad-a05b-221d397b88ec-config\") pod \"dnsmasq-dns-675f4bcbfc-qrzr7\" (UID: \"5d83d658-68c9-4bad-a05b-221d397b88ec\") " pod="openstack/dnsmasq-dns-675f4bcbfc-qrzr7" Nov 28 10:14:43 crc kubenswrapper[4838]: I1128 10:14:43.253572 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-zqrtr"] Nov 28 10:14:43 crc kubenswrapper[4838]: I1128 10:14:43.258975 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-zqrtr" Nov 28 10:14:43 crc kubenswrapper[4838]: I1128 10:14:43.262024 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-svc" Nov 28 10:14:43 crc kubenswrapper[4838]: I1128 10:14:43.269313 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-zqrtr"] Nov 28 10:14:43 crc kubenswrapper[4838]: I1128 10:14:43.333063 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8d508c37-bed9-4481-b086-0159800059e5-config\") pod \"dnsmasq-dns-78dd6ddcc-zqrtr\" (UID: \"8d508c37-bed9-4481-b086-0159800059e5\") " pod="openstack/dnsmasq-dns-78dd6ddcc-zqrtr" Nov 28 10:14:43 crc kubenswrapper[4838]: I1128 10:14:43.333139 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qkmrt\" (UniqueName: \"kubernetes.io/projected/5d83d658-68c9-4bad-a05b-221d397b88ec-kube-api-access-qkmrt\") pod \"dnsmasq-dns-675f4bcbfc-qrzr7\" (UID: \"5d83d658-68c9-4bad-a05b-221d397b88ec\") " pod="openstack/dnsmasq-dns-675f4bcbfc-qrzr7" Nov 28 10:14:43 crc kubenswrapper[4838]: I1128 10:14:43.333172 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5d83d658-68c9-4bad-a05b-221d397b88ec-config\") pod \"dnsmasq-dns-675f4bcbfc-qrzr7\" (UID: \"5d83d658-68c9-4bad-a05b-221d397b88ec\") " pod="openstack/dnsmasq-dns-675f4bcbfc-qrzr7" Nov 28 10:14:43 crc kubenswrapper[4838]: I1128 10:14:43.333200 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nt6mq\" (UniqueName: \"kubernetes.io/projected/8d508c37-bed9-4481-b086-0159800059e5-kube-api-access-nt6mq\") pod \"dnsmasq-dns-78dd6ddcc-zqrtr\" (UID: \"8d508c37-bed9-4481-b086-0159800059e5\") " pod="openstack/dnsmasq-dns-78dd6ddcc-zqrtr" Nov 28 10:14:43 crc kubenswrapper[4838]: I1128 10:14:43.333219 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8d508c37-bed9-4481-b086-0159800059e5-dns-svc\") pod \"dnsmasq-dns-78dd6ddcc-zqrtr\" (UID: \"8d508c37-bed9-4481-b086-0159800059e5\") " pod="openstack/dnsmasq-dns-78dd6ddcc-zqrtr" Nov 28 10:14:43 crc kubenswrapper[4838]: I1128 10:14:43.334181 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5d83d658-68c9-4bad-a05b-221d397b88ec-config\") pod \"dnsmasq-dns-675f4bcbfc-qrzr7\" (UID: \"5d83d658-68c9-4bad-a05b-221d397b88ec\") " pod="openstack/dnsmasq-dns-675f4bcbfc-qrzr7" Nov 28 10:14:43 crc kubenswrapper[4838]: I1128 10:14:43.361127 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qkmrt\" (UniqueName: \"kubernetes.io/projected/5d83d658-68c9-4bad-a05b-221d397b88ec-kube-api-access-qkmrt\") pod \"dnsmasq-dns-675f4bcbfc-qrzr7\" (UID: \"5d83d658-68c9-4bad-a05b-221d397b88ec\") " pod="openstack/dnsmasq-dns-675f4bcbfc-qrzr7" Nov 28 10:14:43 crc kubenswrapper[4838]: I1128 10:14:43.434444 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nt6mq\" (UniqueName: \"kubernetes.io/projected/8d508c37-bed9-4481-b086-0159800059e5-kube-api-access-nt6mq\") pod \"dnsmasq-dns-78dd6ddcc-zqrtr\" (UID: \"8d508c37-bed9-4481-b086-0159800059e5\") " pod="openstack/dnsmasq-dns-78dd6ddcc-zqrtr" Nov 28 10:14:43 crc kubenswrapper[4838]: I1128 10:14:43.434487 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8d508c37-bed9-4481-b086-0159800059e5-dns-svc\") pod \"dnsmasq-dns-78dd6ddcc-zqrtr\" (UID: \"8d508c37-bed9-4481-b086-0159800059e5\") " pod="openstack/dnsmasq-dns-78dd6ddcc-zqrtr" Nov 28 10:14:43 crc kubenswrapper[4838]: I1128 10:14:43.434542 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8d508c37-bed9-4481-b086-0159800059e5-config\") pod \"dnsmasq-dns-78dd6ddcc-zqrtr\" (UID: \"8d508c37-bed9-4481-b086-0159800059e5\") " pod="openstack/dnsmasq-dns-78dd6ddcc-zqrtr" Nov 28 10:14:43 crc kubenswrapper[4838]: I1128 10:14:43.435601 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8d508c37-bed9-4481-b086-0159800059e5-config\") pod \"dnsmasq-dns-78dd6ddcc-zqrtr\" (UID: \"8d508c37-bed9-4481-b086-0159800059e5\") " pod="openstack/dnsmasq-dns-78dd6ddcc-zqrtr" Nov 28 10:14:43 crc kubenswrapper[4838]: I1128 10:14:43.436547 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8d508c37-bed9-4481-b086-0159800059e5-dns-svc\") pod \"dnsmasq-dns-78dd6ddcc-zqrtr\" (UID: \"8d508c37-bed9-4481-b086-0159800059e5\") " pod="openstack/dnsmasq-dns-78dd6ddcc-zqrtr" Nov 28 10:14:43 crc kubenswrapper[4838]: I1128 10:14:43.455015 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nt6mq\" (UniqueName: \"kubernetes.io/projected/8d508c37-bed9-4481-b086-0159800059e5-kube-api-access-nt6mq\") pod \"dnsmasq-dns-78dd6ddcc-zqrtr\" (UID: \"8d508c37-bed9-4481-b086-0159800059e5\") " pod="openstack/dnsmasq-dns-78dd6ddcc-zqrtr" Nov 28 10:14:43 crc kubenswrapper[4838]: I1128 10:14:43.521173 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-qrzr7" Nov 28 10:14:43 crc kubenswrapper[4838]: I1128 10:14:43.582476 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-zqrtr" Nov 28 10:14:43 crc kubenswrapper[4838]: I1128 10:14:43.970779 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-qrzr7"] Nov 28 10:14:43 crc kubenswrapper[4838]: W1128 10:14:43.975900 4838 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5d83d658_68c9_4bad_a05b_221d397b88ec.slice/crio-7270f940d77814bf1e44d082d84685ea444deaa118d25d41ade9f6cc1501be91 WatchSource:0}: Error finding container 7270f940d77814bf1e44d082d84685ea444deaa118d25d41ade9f6cc1501be91: Status 404 returned error can't find the container with id 7270f940d77814bf1e44d082d84685ea444deaa118d25d41ade9f6cc1501be91 Nov 28 10:14:43 crc kubenswrapper[4838]: I1128 10:14:43.979272 4838 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 28 10:14:44 crc kubenswrapper[4838]: I1128 10:14:44.055099 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-zqrtr"] Nov 28 10:14:44 crc kubenswrapper[4838]: W1128 10:14:44.058506 4838 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8d508c37_bed9_4481_b086_0159800059e5.slice/crio-97226bc51f87332c5294fcfab4eaeb433c58351d1364989ee98d13a97018dd96 WatchSource:0}: Error finding container 97226bc51f87332c5294fcfab4eaeb433c58351d1364989ee98d13a97018dd96: Status 404 returned error can't find the container with id 97226bc51f87332c5294fcfab4eaeb433c58351d1364989ee98d13a97018dd96 Nov 28 10:14:44 crc kubenswrapper[4838]: I1128 10:14:44.876598 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-675f4bcbfc-qrzr7" event={"ID":"5d83d658-68c9-4bad-a05b-221d397b88ec","Type":"ContainerStarted","Data":"7270f940d77814bf1e44d082d84685ea444deaa118d25d41ade9f6cc1501be91"} Nov 28 10:14:44 crc kubenswrapper[4838]: I1128 10:14:44.878163 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-78dd6ddcc-zqrtr" event={"ID":"8d508c37-bed9-4481-b086-0159800059e5","Type":"ContainerStarted","Data":"97226bc51f87332c5294fcfab4eaeb433c58351d1364989ee98d13a97018dd96"} Nov 28 10:14:46 crc kubenswrapper[4838]: I1128 10:14:46.172223 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-qrzr7"] Nov 28 10:14:46 crc kubenswrapper[4838]: I1128 10:14:46.198923 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5ccc8479f9-sr898"] Nov 28 10:14:46 crc kubenswrapper[4838]: I1128 10:14:46.200217 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5ccc8479f9-sr898" Nov 28 10:14:46 crc kubenswrapper[4838]: I1128 10:14:46.209790 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5ccc8479f9-sr898"] Nov 28 10:14:46 crc kubenswrapper[4838]: I1128 10:14:46.281221 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f31fe79b-92dd-4237-b387-9d2c825fdacb-dns-svc\") pod \"dnsmasq-dns-5ccc8479f9-sr898\" (UID: \"f31fe79b-92dd-4237-b387-9d2c825fdacb\") " pod="openstack/dnsmasq-dns-5ccc8479f9-sr898" Nov 28 10:14:46 crc kubenswrapper[4838]: I1128 10:14:46.281359 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cb8n5\" (UniqueName: \"kubernetes.io/projected/f31fe79b-92dd-4237-b387-9d2c825fdacb-kube-api-access-cb8n5\") pod \"dnsmasq-dns-5ccc8479f9-sr898\" (UID: \"f31fe79b-92dd-4237-b387-9d2c825fdacb\") " pod="openstack/dnsmasq-dns-5ccc8479f9-sr898" Nov 28 10:14:46 crc kubenswrapper[4838]: I1128 10:14:46.281424 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f31fe79b-92dd-4237-b387-9d2c825fdacb-config\") pod \"dnsmasq-dns-5ccc8479f9-sr898\" (UID: \"f31fe79b-92dd-4237-b387-9d2c825fdacb\") " pod="openstack/dnsmasq-dns-5ccc8479f9-sr898" Nov 28 10:14:46 crc kubenswrapper[4838]: I1128 10:14:46.384894 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cb8n5\" (UniqueName: \"kubernetes.io/projected/f31fe79b-92dd-4237-b387-9d2c825fdacb-kube-api-access-cb8n5\") pod \"dnsmasq-dns-5ccc8479f9-sr898\" (UID: \"f31fe79b-92dd-4237-b387-9d2c825fdacb\") " pod="openstack/dnsmasq-dns-5ccc8479f9-sr898" Nov 28 10:14:46 crc kubenswrapper[4838]: I1128 10:14:46.384975 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f31fe79b-92dd-4237-b387-9d2c825fdacb-config\") pod \"dnsmasq-dns-5ccc8479f9-sr898\" (UID: \"f31fe79b-92dd-4237-b387-9d2c825fdacb\") " pod="openstack/dnsmasq-dns-5ccc8479f9-sr898" Nov 28 10:14:46 crc kubenswrapper[4838]: I1128 10:14:46.385084 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f31fe79b-92dd-4237-b387-9d2c825fdacb-dns-svc\") pod \"dnsmasq-dns-5ccc8479f9-sr898\" (UID: \"f31fe79b-92dd-4237-b387-9d2c825fdacb\") " pod="openstack/dnsmasq-dns-5ccc8479f9-sr898" Nov 28 10:14:46 crc kubenswrapper[4838]: I1128 10:14:46.386837 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f31fe79b-92dd-4237-b387-9d2c825fdacb-dns-svc\") pod \"dnsmasq-dns-5ccc8479f9-sr898\" (UID: \"f31fe79b-92dd-4237-b387-9d2c825fdacb\") " pod="openstack/dnsmasq-dns-5ccc8479f9-sr898" Nov 28 10:14:46 crc kubenswrapper[4838]: I1128 10:14:46.388501 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f31fe79b-92dd-4237-b387-9d2c825fdacb-config\") pod \"dnsmasq-dns-5ccc8479f9-sr898\" (UID: \"f31fe79b-92dd-4237-b387-9d2c825fdacb\") " pod="openstack/dnsmasq-dns-5ccc8479f9-sr898" Nov 28 10:14:46 crc kubenswrapper[4838]: I1128 10:14:46.441484 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cb8n5\" (UniqueName: \"kubernetes.io/projected/f31fe79b-92dd-4237-b387-9d2c825fdacb-kube-api-access-cb8n5\") pod \"dnsmasq-dns-5ccc8479f9-sr898\" (UID: \"f31fe79b-92dd-4237-b387-9d2c825fdacb\") " pod="openstack/dnsmasq-dns-5ccc8479f9-sr898" Nov 28 10:14:46 crc kubenswrapper[4838]: I1128 10:14:46.459479 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-zqrtr"] Nov 28 10:14:46 crc kubenswrapper[4838]: I1128 10:14:46.496177 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-pkbwk"] Nov 28 10:14:46 crc kubenswrapper[4838]: I1128 10:14:46.497322 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-pkbwk" Nov 28 10:14:46 crc kubenswrapper[4838]: I1128 10:14:46.511051 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-pkbwk"] Nov 28 10:14:46 crc kubenswrapper[4838]: I1128 10:14:46.525127 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5ccc8479f9-sr898" Nov 28 10:14:46 crc kubenswrapper[4838]: I1128 10:14:46.586664 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cb0df889-52d3-4150-87df-82886662e952-config\") pod \"dnsmasq-dns-57d769cc4f-pkbwk\" (UID: \"cb0df889-52d3-4150-87df-82886662e952\") " pod="openstack/dnsmasq-dns-57d769cc4f-pkbwk" Nov 28 10:14:46 crc kubenswrapper[4838]: I1128 10:14:46.586704 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/cb0df889-52d3-4150-87df-82886662e952-dns-svc\") pod \"dnsmasq-dns-57d769cc4f-pkbwk\" (UID: \"cb0df889-52d3-4150-87df-82886662e952\") " pod="openstack/dnsmasq-dns-57d769cc4f-pkbwk" Nov 28 10:14:46 crc kubenswrapper[4838]: I1128 10:14:46.586863 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q7ptw\" (UniqueName: \"kubernetes.io/projected/cb0df889-52d3-4150-87df-82886662e952-kube-api-access-q7ptw\") pod \"dnsmasq-dns-57d769cc4f-pkbwk\" (UID: \"cb0df889-52d3-4150-87df-82886662e952\") " pod="openstack/dnsmasq-dns-57d769cc4f-pkbwk" Nov 28 10:14:46 crc kubenswrapper[4838]: I1128 10:14:46.688707 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q7ptw\" (UniqueName: \"kubernetes.io/projected/cb0df889-52d3-4150-87df-82886662e952-kube-api-access-q7ptw\") pod \"dnsmasq-dns-57d769cc4f-pkbwk\" (UID: \"cb0df889-52d3-4150-87df-82886662e952\") " pod="openstack/dnsmasq-dns-57d769cc4f-pkbwk" Nov 28 10:14:46 crc kubenswrapper[4838]: I1128 10:14:46.688773 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cb0df889-52d3-4150-87df-82886662e952-config\") pod \"dnsmasq-dns-57d769cc4f-pkbwk\" (UID: \"cb0df889-52d3-4150-87df-82886662e952\") " pod="openstack/dnsmasq-dns-57d769cc4f-pkbwk" Nov 28 10:14:46 crc kubenswrapper[4838]: I1128 10:14:46.688794 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/cb0df889-52d3-4150-87df-82886662e952-dns-svc\") pod \"dnsmasq-dns-57d769cc4f-pkbwk\" (UID: \"cb0df889-52d3-4150-87df-82886662e952\") " pod="openstack/dnsmasq-dns-57d769cc4f-pkbwk" Nov 28 10:14:46 crc kubenswrapper[4838]: I1128 10:14:46.689652 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cb0df889-52d3-4150-87df-82886662e952-config\") pod \"dnsmasq-dns-57d769cc4f-pkbwk\" (UID: \"cb0df889-52d3-4150-87df-82886662e952\") " pod="openstack/dnsmasq-dns-57d769cc4f-pkbwk" Nov 28 10:14:46 crc kubenswrapper[4838]: I1128 10:14:46.689882 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/cb0df889-52d3-4150-87df-82886662e952-dns-svc\") pod \"dnsmasq-dns-57d769cc4f-pkbwk\" (UID: \"cb0df889-52d3-4150-87df-82886662e952\") " pod="openstack/dnsmasq-dns-57d769cc4f-pkbwk" Nov 28 10:14:46 crc kubenswrapper[4838]: I1128 10:14:46.727880 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q7ptw\" (UniqueName: \"kubernetes.io/projected/cb0df889-52d3-4150-87df-82886662e952-kube-api-access-q7ptw\") pod \"dnsmasq-dns-57d769cc4f-pkbwk\" (UID: \"cb0df889-52d3-4150-87df-82886662e952\") " pod="openstack/dnsmasq-dns-57d769cc4f-pkbwk" Nov 28 10:14:46 crc kubenswrapper[4838]: I1128 10:14:46.835981 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-pkbwk" Nov 28 10:14:47 crc kubenswrapper[4838]: I1128 10:14:47.042148 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5ccc8479f9-sr898"] Nov 28 10:14:47 crc kubenswrapper[4838]: I1128 10:14:47.292295 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-pkbwk"] Nov 28 10:14:47 crc kubenswrapper[4838]: W1128 10:14:47.294301 4838 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podcb0df889_52d3_4150_87df_82886662e952.slice/crio-563eecb49fe4d744f840d52eb3ef9e64570800eb3a3a29ffddcc582d553bdbdf WatchSource:0}: Error finding container 563eecb49fe4d744f840d52eb3ef9e64570800eb3a3a29ffddcc582d553bdbdf: Status 404 returned error can't find the container with id 563eecb49fe4d744f840d52eb3ef9e64570800eb3a3a29ffddcc582d553bdbdf Nov 28 10:14:47 crc kubenswrapper[4838]: I1128 10:14:47.352992 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 28 10:14:47 crc kubenswrapper[4838]: I1128 10:14:47.354610 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 28 10:14:47 crc kubenswrapper[4838]: I1128 10:14:47.358137 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-default-user" Nov 28 10:14:47 crc kubenswrapper[4838]: I1128 10:14:47.358336 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-plugins-conf" Nov 28 10:14:47 crc kubenswrapper[4838]: I1128 10:14:47.358502 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-cell1-svc" Nov 28 10:14:47 crc kubenswrapper[4838]: I1128 10:14:47.359936 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-server-dockercfg-87q8x" Nov 28 10:14:47 crc kubenswrapper[4838]: I1128 10:14:47.360046 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-server-conf" Nov 28 10:14:47 crc kubenswrapper[4838]: I1128 10:14:47.360209 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-erlang-cookie" Nov 28 10:14:47 crc kubenswrapper[4838]: I1128 10:14:47.360320 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-config-data" Nov 28 10:14:47 crc kubenswrapper[4838]: I1128 10:14:47.365353 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 28 10:14:47 crc kubenswrapper[4838]: I1128 10:14:47.397908 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/dbe39b78-c198-480e-9bca-17eaed6183bf-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"dbe39b78-c198-480e-9bca-17eaed6183bf\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 10:14:47 crc kubenswrapper[4838]: I1128 10:14:47.397953 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/dbe39b78-c198-480e-9bca-17eaed6183bf-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"dbe39b78-c198-480e-9bca-17eaed6183bf\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 10:14:47 crc kubenswrapper[4838]: I1128 10:14:47.397971 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-skjvs\" (UniqueName: \"kubernetes.io/projected/dbe39b78-c198-480e-9bca-17eaed6183bf-kube-api-access-skjvs\") pod \"rabbitmq-cell1-server-0\" (UID: \"dbe39b78-c198-480e-9bca-17eaed6183bf\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 10:14:47 crc kubenswrapper[4838]: I1128 10:14:47.397994 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/dbe39b78-c198-480e-9bca-17eaed6183bf-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"dbe39b78-c198-480e-9bca-17eaed6183bf\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 10:14:47 crc kubenswrapper[4838]: I1128 10:14:47.398053 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/dbe39b78-c198-480e-9bca-17eaed6183bf-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"dbe39b78-c198-480e-9bca-17eaed6183bf\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 10:14:47 crc kubenswrapper[4838]: I1128 10:14:47.398085 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/dbe39b78-c198-480e-9bca-17eaed6183bf-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"dbe39b78-c198-480e-9bca-17eaed6183bf\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 10:14:47 crc kubenswrapper[4838]: I1128 10:14:47.398111 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/dbe39b78-c198-480e-9bca-17eaed6183bf-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"dbe39b78-c198-480e-9bca-17eaed6183bf\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 10:14:47 crc kubenswrapper[4838]: I1128 10:14:47.398134 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"dbe39b78-c198-480e-9bca-17eaed6183bf\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 10:14:47 crc kubenswrapper[4838]: I1128 10:14:47.398204 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/dbe39b78-c198-480e-9bca-17eaed6183bf-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"dbe39b78-c198-480e-9bca-17eaed6183bf\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 10:14:47 crc kubenswrapper[4838]: I1128 10:14:47.398226 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/dbe39b78-c198-480e-9bca-17eaed6183bf-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"dbe39b78-c198-480e-9bca-17eaed6183bf\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 10:14:47 crc kubenswrapper[4838]: I1128 10:14:47.398262 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/dbe39b78-c198-480e-9bca-17eaed6183bf-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"dbe39b78-c198-480e-9bca-17eaed6183bf\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 10:14:47 crc kubenswrapper[4838]: I1128 10:14:47.498993 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/dbe39b78-c198-480e-9bca-17eaed6183bf-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"dbe39b78-c198-480e-9bca-17eaed6183bf\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 10:14:47 crc kubenswrapper[4838]: I1128 10:14:47.499060 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-skjvs\" (UniqueName: \"kubernetes.io/projected/dbe39b78-c198-480e-9bca-17eaed6183bf-kube-api-access-skjvs\") pod \"rabbitmq-cell1-server-0\" (UID: \"dbe39b78-c198-480e-9bca-17eaed6183bf\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 10:14:47 crc kubenswrapper[4838]: I1128 10:14:47.499088 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/dbe39b78-c198-480e-9bca-17eaed6183bf-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"dbe39b78-c198-480e-9bca-17eaed6183bf\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 10:14:47 crc kubenswrapper[4838]: I1128 10:14:47.499111 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/dbe39b78-c198-480e-9bca-17eaed6183bf-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"dbe39b78-c198-480e-9bca-17eaed6183bf\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 10:14:47 crc kubenswrapper[4838]: I1128 10:14:47.499134 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/dbe39b78-c198-480e-9bca-17eaed6183bf-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"dbe39b78-c198-480e-9bca-17eaed6183bf\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 10:14:47 crc kubenswrapper[4838]: I1128 10:14:47.499168 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/dbe39b78-c198-480e-9bca-17eaed6183bf-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"dbe39b78-c198-480e-9bca-17eaed6183bf\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 10:14:47 crc kubenswrapper[4838]: I1128 10:14:47.499188 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/dbe39b78-c198-480e-9bca-17eaed6183bf-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"dbe39b78-c198-480e-9bca-17eaed6183bf\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 10:14:47 crc kubenswrapper[4838]: I1128 10:14:47.499206 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"dbe39b78-c198-480e-9bca-17eaed6183bf\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 10:14:47 crc kubenswrapper[4838]: I1128 10:14:47.499251 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/dbe39b78-c198-480e-9bca-17eaed6183bf-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"dbe39b78-c198-480e-9bca-17eaed6183bf\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 10:14:47 crc kubenswrapper[4838]: I1128 10:14:47.499271 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/dbe39b78-c198-480e-9bca-17eaed6183bf-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"dbe39b78-c198-480e-9bca-17eaed6183bf\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 10:14:47 crc kubenswrapper[4838]: I1128 10:14:47.499299 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/dbe39b78-c198-480e-9bca-17eaed6183bf-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"dbe39b78-c198-480e-9bca-17eaed6183bf\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 10:14:47 crc kubenswrapper[4838]: I1128 10:14:47.499691 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/dbe39b78-c198-480e-9bca-17eaed6183bf-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"dbe39b78-c198-480e-9bca-17eaed6183bf\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 10:14:47 crc kubenswrapper[4838]: I1128 10:14:47.500115 4838 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"dbe39b78-c198-480e-9bca-17eaed6183bf\") device mount path \"/mnt/openstack/pv01\"" pod="openstack/rabbitmq-cell1-server-0" Nov 28 10:14:47 crc kubenswrapper[4838]: I1128 10:14:47.500667 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/dbe39b78-c198-480e-9bca-17eaed6183bf-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"dbe39b78-c198-480e-9bca-17eaed6183bf\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 10:14:47 crc kubenswrapper[4838]: I1128 10:14:47.500934 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/dbe39b78-c198-480e-9bca-17eaed6183bf-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"dbe39b78-c198-480e-9bca-17eaed6183bf\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 10:14:47 crc kubenswrapper[4838]: I1128 10:14:47.501565 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/dbe39b78-c198-480e-9bca-17eaed6183bf-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"dbe39b78-c198-480e-9bca-17eaed6183bf\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 10:14:47 crc kubenswrapper[4838]: I1128 10:14:47.501802 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/dbe39b78-c198-480e-9bca-17eaed6183bf-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"dbe39b78-c198-480e-9bca-17eaed6183bf\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 10:14:47 crc kubenswrapper[4838]: I1128 10:14:47.504640 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/dbe39b78-c198-480e-9bca-17eaed6183bf-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"dbe39b78-c198-480e-9bca-17eaed6183bf\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 10:14:47 crc kubenswrapper[4838]: I1128 10:14:47.506323 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/dbe39b78-c198-480e-9bca-17eaed6183bf-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"dbe39b78-c198-480e-9bca-17eaed6183bf\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 10:14:47 crc kubenswrapper[4838]: I1128 10:14:47.506614 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/dbe39b78-c198-480e-9bca-17eaed6183bf-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"dbe39b78-c198-480e-9bca-17eaed6183bf\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 10:14:47 crc kubenswrapper[4838]: I1128 10:14:47.506874 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/dbe39b78-c198-480e-9bca-17eaed6183bf-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"dbe39b78-c198-480e-9bca-17eaed6183bf\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 10:14:47 crc kubenswrapper[4838]: I1128 10:14:47.517145 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-skjvs\" (UniqueName: \"kubernetes.io/projected/dbe39b78-c198-480e-9bca-17eaed6183bf-kube-api-access-skjvs\") pod \"rabbitmq-cell1-server-0\" (UID: \"dbe39b78-c198-480e-9bca-17eaed6183bf\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 10:14:47 crc kubenswrapper[4838]: I1128 10:14:47.520899 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"dbe39b78-c198-480e-9bca-17eaed6183bf\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 10:14:47 crc kubenswrapper[4838]: I1128 10:14:47.664781 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-0"] Nov 28 10:14:47 crc kubenswrapper[4838]: I1128 10:14:47.666487 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 28 10:14:47 crc kubenswrapper[4838]: I1128 10:14:47.670534 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-plugins-conf" Nov 28 10:14:47 crc kubenswrapper[4838]: I1128 10:14:47.670554 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-svc" Nov 28 10:14:47 crc kubenswrapper[4838]: I1128 10:14:47.670795 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-erlang-cookie" Nov 28 10:14:47 crc kubenswrapper[4838]: I1128 10:14:47.671341 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-server-conf" Nov 28 10:14:47 crc kubenswrapper[4838]: I1128 10:14:47.671461 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-server-dockercfg-x97jh" Nov 28 10:14:47 crc kubenswrapper[4838]: I1128 10:14:47.671545 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-default-user" Nov 28 10:14:47 crc kubenswrapper[4838]: I1128 10:14:47.674769 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-config-data" Nov 28 10:14:47 crc kubenswrapper[4838]: I1128 10:14:47.678387 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 28 10:14:47 crc kubenswrapper[4838]: I1128 10:14:47.678642 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 28 10:14:47 crc kubenswrapper[4838]: I1128 10:14:47.703294 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/366c721a-0e79-44a0-aa02-761c4ddc6936-config-data\") pod \"rabbitmq-server-0\" (UID: \"366c721a-0e79-44a0-aa02-761c4ddc6936\") " pod="openstack/rabbitmq-server-0" Nov 28 10:14:47 crc kubenswrapper[4838]: I1128 10:14:47.703366 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/366c721a-0e79-44a0-aa02-761c4ddc6936-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"366c721a-0e79-44a0-aa02-761c4ddc6936\") " pod="openstack/rabbitmq-server-0" Nov 28 10:14:47 crc kubenswrapper[4838]: I1128 10:14:47.703391 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"rabbitmq-server-0\" (UID: \"366c721a-0e79-44a0-aa02-761c4ddc6936\") " pod="openstack/rabbitmq-server-0" Nov 28 10:14:47 crc kubenswrapper[4838]: I1128 10:14:47.703431 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/366c721a-0e79-44a0-aa02-761c4ddc6936-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"366c721a-0e79-44a0-aa02-761c4ddc6936\") " pod="openstack/rabbitmq-server-0" Nov 28 10:14:47 crc kubenswrapper[4838]: I1128 10:14:47.703448 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/366c721a-0e79-44a0-aa02-761c4ddc6936-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"366c721a-0e79-44a0-aa02-761c4ddc6936\") " pod="openstack/rabbitmq-server-0" Nov 28 10:14:47 crc kubenswrapper[4838]: I1128 10:14:47.703532 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/366c721a-0e79-44a0-aa02-761c4ddc6936-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"366c721a-0e79-44a0-aa02-761c4ddc6936\") " pod="openstack/rabbitmq-server-0" Nov 28 10:14:47 crc kubenswrapper[4838]: I1128 10:14:47.703684 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/366c721a-0e79-44a0-aa02-761c4ddc6936-pod-info\") pod \"rabbitmq-server-0\" (UID: \"366c721a-0e79-44a0-aa02-761c4ddc6936\") " pod="openstack/rabbitmq-server-0" Nov 28 10:14:47 crc kubenswrapper[4838]: I1128 10:14:47.703705 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4hqzv\" (UniqueName: \"kubernetes.io/projected/366c721a-0e79-44a0-aa02-761c4ddc6936-kube-api-access-4hqzv\") pod \"rabbitmq-server-0\" (UID: \"366c721a-0e79-44a0-aa02-761c4ddc6936\") " pod="openstack/rabbitmq-server-0" Nov 28 10:14:47 crc kubenswrapper[4838]: I1128 10:14:47.703763 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/366c721a-0e79-44a0-aa02-761c4ddc6936-server-conf\") pod \"rabbitmq-server-0\" (UID: \"366c721a-0e79-44a0-aa02-761c4ddc6936\") " pod="openstack/rabbitmq-server-0" Nov 28 10:14:47 crc kubenswrapper[4838]: I1128 10:14:47.703782 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/366c721a-0e79-44a0-aa02-761c4ddc6936-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"366c721a-0e79-44a0-aa02-761c4ddc6936\") " pod="openstack/rabbitmq-server-0" Nov 28 10:14:47 crc kubenswrapper[4838]: I1128 10:14:47.703917 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/366c721a-0e79-44a0-aa02-761c4ddc6936-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"366c721a-0e79-44a0-aa02-761c4ddc6936\") " pod="openstack/rabbitmq-server-0" Nov 28 10:14:47 crc kubenswrapper[4838]: I1128 10:14:47.805973 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/366c721a-0e79-44a0-aa02-761c4ddc6936-pod-info\") pod \"rabbitmq-server-0\" (UID: \"366c721a-0e79-44a0-aa02-761c4ddc6936\") " pod="openstack/rabbitmq-server-0" Nov 28 10:14:47 crc kubenswrapper[4838]: I1128 10:14:47.806019 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4hqzv\" (UniqueName: \"kubernetes.io/projected/366c721a-0e79-44a0-aa02-761c4ddc6936-kube-api-access-4hqzv\") pod \"rabbitmq-server-0\" (UID: \"366c721a-0e79-44a0-aa02-761c4ddc6936\") " pod="openstack/rabbitmq-server-0" Nov 28 10:14:47 crc kubenswrapper[4838]: I1128 10:14:47.806054 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/366c721a-0e79-44a0-aa02-761c4ddc6936-server-conf\") pod \"rabbitmq-server-0\" (UID: \"366c721a-0e79-44a0-aa02-761c4ddc6936\") " pod="openstack/rabbitmq-server-0" Nov 28 10:14:47 crc kubenswrapper[4838]: I1128 10:14:47.806074 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/366c721a-0e79-44a0-aa02-761c4ddc6936-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"366c721a-0e79-44a0-aa02-761c4ddc6936\") " pod="openstack/rabbitmq-server-0" Nov 28 10:14:47 crc kubenswrapper[4838]: I1128 10:14:47.806092 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/366c721a-0e79-44a0-aa02-761c4ddc6936-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"366c721a-0e79-44a0-aa02-761c4ddc6936\") " pod="openstack/rabbitmq-server-0" Nov 28 10:14:47 crc kubenswrapper[4838]: I1128 10:14:47.806154 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/366c721a-0e79-44a0-aa02-761c4ddc6936-config-data\") pod \"rabbitmq-server-0\" (UID: \"366c721a-0e79-44a0-aa02-761c4ddc6936\") " pod="openstack/rabbitmq-server-0" Nov 28 10:14:47 crc kubenswrapper[4838]: I1128 10:14:47.806177 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/366c721a-0e79-44a0-aa02-761c4ddc6936-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"366c721a-0e79-44a0-aa02-761c4ddc6936\") " pod="openstack/rabbitmq-server-0" Nov 28 10:14:47 crc kubenswrapper[4838]: I1128 10:14:47.806216 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"rabbitmq-server-0\" (UID: \"366c721a-0e79-44a0-aa02-761c4ddc6936\") " pod="openstack/rabbitmq-server-0" Nov 28 10:14:47 crc kubenswrapper[4838]: I1128 10:14:47.806235 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/366c721a-0e79-44a0-aa02-761c4ddc6936-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"366c721a-0e79-44a0-aa02-761c4ddc6936\") " pod="openstack/rabbitmq-server-0" Nov 28 10:14:47 crc kubenswrapper[4838]: I1128 10:14:47.806255 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/366c721a-0e79-44a0-aa02-761c4ddc6936-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"366c721a-0e79-44a0-aa02-761c4ddc6936\") " pod="openstack/rabbitmq-server-0" Nov 28 10:14:47 crc kubenswrapper[4838]: I1128 10:14:47.806302 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/366c721a-0e79-44a0-aa02-761c4ddc6936-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"366c721a-0e79-44a0-aa02-761c4ddc6936\") " pod="openstack/rabbitmq-server-0" Nov 28 10:14:47 crc kubenswrapper[4838]: I1128 10:14:47.807022 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/366c721a-0e79-44a0-aa02-761c4ddc6936-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"366c721a-0e79-44a0-aa02-761c4ddc6936\") " pod="openstack/rabbitmq-server-0" Nov 28 10:14:47 crc kubenswrapper[4838]: I1128 10:14:47.807163 4838 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"rabbitmq-server-0\" (UID: \"366c721a-0e79-44a0-aa02-761c4ddc6936\") device mount path \"/mnt/openstack/pv06\"" pod="openstack/rabbitmq-server-0" Nov 28 10:14:47 crc kubenswrapper[4838]: I1128 10:14:47.807979 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/366c721a-0e79-44a0-aa02-761c4ddc6936-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"366c721a-0e79-44a0-aa02-761c4ddc6936\") " pod="openstack/rabbitmq-server-0" Nov 28 10:14:47 crc kubenswrapper[4838]: I1128 10:14:47.808235 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/366c721a-0e79-44a0-aa02-761c4ddc6936-config-data\") pod \"rabbitmq-server-0\" (UID: \"366c721a-0e79-44a0-aa02-761c4ddc6936\") " pod="openstack/rabbitmq-server-0" Nov 28 10:14:47 crc kubenswrapper[4838]: I1128 10:14:47.808674 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/366c721a-0e79-44a0-aa02-761c4ddc6936-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"366c721a-0e79-44a0-aa02-761c4ddc6936\") " pod="openstack/rabbitmq-server-0" Nov 28 10:14:47 crc kubenswrapper[4838]: I1128 10:14:47.809267 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/366c721a-0e79-44a0-aa02-761c4ddc6936-server-conf\") pod \"rabbitmq-server-0\" (UID: \"366c721a-0e79-44a0-aa02-761c4ddc6936\") " pod="openstack/rabbitmq-server-0" Nov 28 10:14:47 crc kubenswrapper[4838]: I1128 10:14:47.810040 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/366c721a-0e79-44a0-aa02-761c4ddc6936-pod-info\") pod \"rabbitmq-server-0\" (UID: \"366c721a-0e79-44a0-aa02-761c4ddc6936\") " pod="openstack/rabbitmq-server-0" Nov 28 10:14:47 crc kubenswrapper[4838]: I1128 10:14:47.811806 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/366c721a-0e79-44a0-aa02-761c4ddc6936-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"366c721a-0e79-44a0-aa02-761c4ddc6936\") " pod="openstack/rabbitmq-server-0" Nov 28 10:14:47 crc kubenswrapper[4838]: I1128 10:14:47.814863 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/366c721a-0e79-44a0-aa02-761c4ddc6936-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"366c721a-0e79-44a0-aa02-761c4ddc6936\") " pod="openstack/rabbitmq-server-0" Nov 28 10:14:47 crc kubenswrapper[4838]: I1128 10:14:47.814892 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/366c721a-0e79-44a0-aa02-761c4ddc6936-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"366c721a-0e79-44a0-aa02-761c4ddc6936\") " pod="openstack/rabbitmq-server-0" Nov 28 10:14:47 crc kubenswrapper[4838]: I1128 10:14:47.825776 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4hqzv\" (UniqueName: \"kubernetes.io/projected/366c721a-0e79-44a0-aa02-761c4ddc6936-kube-api-access-4hqzv\") pod \"rabbitmq-server-0\" (UID: \"366c721a-0e79-44a0-aa02-761c4ddc6936\") " pod="openstack/rabbitmq-server-0" Nov 28 10:14:47 crc kubenswrapper[4838]: I1128 10:14:47.840792 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"rabbitmq-server-0\" (UID: \"366c721a-0e79-44a0-aa02-761c4ddc6936\") " pod="openstack/rabbitmq-server-0" Nov 28 10:14:47 crc kubenswrapper[4838]: I1128 10:14:47.926306 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5ccc8479f9-sr898" event={"ID":"f31fe79b-92dd-4237-b387-9d2c825fdacb","Type":"ContainerStarted","Data":"5fafbe3dbf0e4f1b139ce9f8f8ca0fd799088853671118eefe5057ebc272f41c"} Nov 28 10:14:47 crc kubenswrapper[4838]: I1128 10:14:47.929017 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-pkbwk" event={"ID":"cb0df889-52d3-4150-87df-82886662e952","Type":"ContainerStarted","Data":"563eecb49fe4d744f840d52eb3ef9e64570800eb3a3a29ffddcc582d553bdbdf"} Nov 28 10:14:47 crc kubenswrapper[4838]: I1128 10:14:47.986668 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 28 10:14:48 crc kubenswrapper[4838]: I1128 10:14:48.120815 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 28 10:14:48 crc kubenswrapper[4838]: I1128 10:14:48.536552 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 28 10:14:48 crc kubenswrapper[4838]: W1128 10:14:48.559862 4838 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod366c721a_0e79_44a0_aa02_761c4ddc6936.slice/crio-249fc1438860fe7409049c7d24ce60590ae19b8902705d578708b84bba9c9aba WatchSource:0}: Error finding container 249fc1438860fe7409049c7d24ce60590ae19b8902705d578708b84bba9c9aba: Status 404 returned error can't find the container with id 249fc1438860fe7409049c7d24ce60590ae19b8902705d578708b84bba9c9aba Nov 28 10:14:48 crc kubenswrapper[4838]: I1128 10:14:48.939875 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"dbe39b78-c198-480e-9bca-17eaed6183bf","Type":"ContainerStarted","Data":"d0b19a4e964ccfcc6a584d94a03552ce077687841c08299a7d1d4c655f6ca93b"} Nov 28 10:14:48 crc kubenswrapper[4838]: I1128 10:14:48.941050 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"366c721a-0e79-44a0-aa02-761c4ddc6936","Type":"ContainerStarted","Data":"249fc1438860fe7409049c7d24ce60590ae19b8902705d578708b84bba9c9aba"} Nov 28 10:14:49 crc kubenswrapper[4838]: I1128 10:14:49.191178 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-galera-0"] Nov 28 10:14:49 crc kubenswrapper[4838]: I1128 10:14:49.192461 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Nov 28 10:14:49 crc kubenswrapper[4838]: I1128 10:14:49.195903 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-svc" Nov 28 10:14:49 crc kubenswrapper[4838]: I1128 10:14:49.196047 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config-data" Nov 28 10:14:49 crc kubenswrapper[4838]: I1128 10:14:49.196400 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-scripts" Nov 28 10:14:49 crc kubenswrapper[4838]: I1128 10:14:49.196466 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-dockercfg-bg7kb" Nov 28 10:14:49 crc kubenswrapper[4838]: I1128 10:14:49.199877 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Nov 28 10:14:49 crc kubenswrapper[4838]: I1128 10:14:49.200257 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"combined-ca-bundle" Nov 28 10:14:49 crc kubenswrapper[4838]: I1128 10:14:49.238632 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/55cfa883-d16d-4231-95e7-fd0b3ad9b702-config-data-default\") pod \"openstack-galera-0\" (UID: \"55cfa883-d16d-4231-95e7-fd0b3ad9b702\") " pod="openstack/openstack-galera-0" Nov 28 10:14:49 crc kubenswrapper[4838]: I1128 10:14:49.238697 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/55cfa883-d16d-4231-95e7-fd0b3ad9b702-config-data-generated\") pod \"openstack-galera-0\" (UID: \"55cfa883-d16d-4231-95e7-fd0b3ad9b702\") " pod="openstack/openstack-galera-0" Nov 28 10:14:49 crc kubenswrapper[4838]: I1128 10:14:49.238879 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/55cfa883-d16d-4231-95e7-fd0b3ad9b702-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"55cfa883-d16d-4231-95e7-fd0b3ad9b702\") " pod="openstack/openstack-galera-0" Nov 28 10:14:49 crc kubenswrapper[4838]: I1128 10:14:49.238918 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/55cfa883-d16d-4231-95e7-fd0b3ad9b702-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"55cfa883-d16d-4231-95e7-fd0b3ad9b702\") " pod="openstack/openstack-galera-0" Nov 28 10:14:49 crc kubenswrapper[4838]: I1128 10:14:49.239161 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/55cfa883-d16d-4231-95e7-fd0b3ad9b702-kolla-config\") pod \"openstack-galera-0\" (UID: \"55cfa883-d16d-4231-95e7-fd0b3ad9b702\") " pod="openstack/openstack-galera-0" Nov 28 10:14:49 crc kubenswrapper[4838]: I1128 10:14:49.239252 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/55cfa883-d16d-4231-95e7-fd0b3ad9b702-operator-scripts\") pod \"openstack-galera-0\" (UID: \"55cfa883-d16d-4231-95e7-fd0b3ad9b702\") " pod="openstack/openstack-galera-0" Nov 28 10:14:49 crc kubenswrapper[4838]: I1128 10:14:49.239284 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"openstack-galera-0\" (UID: \"55cfa883-d16d-4231-95e7-fd0b3ad9b702\") " pod="openstack/openstack-galera-0" Nov 28 10:14:49 crc kubenswrapper[4838]: I1128 10:14:49.239310 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q4fbv\" (UniqueName: \"kubernetes.io/projected/55cfa883-d16d-4231-95e7-fd0b3ad9b702-kube-api-access-q4fbv\") pod \"openstack-galera-0\" (UID: \"55cfa883-d16d-4231-95e7-fd0b3ad9b702\") " pod="openstack/openstack-galera-0" Nov 28 10:14:49 crc kubenswrapper[4838]: I1128 10:14:49.340987 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/55cfa883-d16d-4231-95e7-fd0b3ad9b702-config-data-generated\") pod \"openstack-galera-0\" (UID: \"55cfa883-d16d-4231-95e7-fd0b3ad9b702\") " pod="openstack/openstack-galera-0" Nov 28 10:14:49 crc kubenswrapper[4838]: I1128 10:14:49.341034 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/55cfa883-d16d-4231-95e7-fd0b3ad9b702-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"55cfa883-d16d-4231-95e7-fd0b3ad9b702\") " pod="openstack/openstack-galera-0" Nov 28 10:14:49 crc kubenswrapper[4838]: I1128 10:14:49.341063 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/55cfa883-d16d-4231-95e7-fd0b3ad9b702-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"55cfa883-d16d-4231-95e7-fd0b3ad9b702\") " pod="openstack/openstack-galera-0" Nov 28 10:14:49 crc kubenswrapper[4838]: I1128 10:14:49.341116 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/55cfa883-d16d-4231-95e7-fd0b3ad9b702-kolla-config\") pod \"openstack-galera-0\" (UID: \"55cfa883-d16d-4231-95e7-fd0b3ad9b702\") " pod="openstack/openstack-galera-0" Nov 28 10:14:49 crc kubenswrapper[4838]: I1128 10:14:49.341153 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/55cfa883-d16d-4231-95e7-fd0b3ad9b702-operator-scripts\") pod \"openstack-galera-0\" (UID: \"55cfa883-d16d-4231-95e7-fd0b3ad9b702\") " pod="openstack/openstack-galera-0" Nov 28 10:14:49 crc kubenswrapper[4838]: I1128 10:14:49.341172 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"openstack-galera-0\" (UID: \"55cfa883-d16d-4231-95e7-fd0b3ad9b702\") " pod="openstack/openstack-galera-0" Nov 28 10:14:49 crc kubenswrapper[4838]: I1128 10:14:49.341197 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q4fbv\" (UniqueName: \"kubernetes.io/projected/55cfa883-d16d-4231-95e7-fd0b3ad9b702-kube-api-access-q4fbv\") pod \"openstack-galera-0\" (UID: \"55cfa883-d16d-4231-95e7-fd0b3ad9b702\") " pod="openstack/openstack-galera-0" Nov 28 10:14:49 crc kubenswrapper[4838]: I1128 10:14:49.341216 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/55cfa883-d16d-4231-95e7-fd0b3ad9b702-config-data-default\") pod \"openstack-galera-0\" (UID: \"55cfa883-d16d-4231-95e7-fd0b3ad9b702\") " pod="openstack/openstack-galera-0" Nov 28 10:14:49 crc kubenswrapper[4838]: I1128 10:14:49.341440 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/55cfa883-d16d-4231-95e7-fd0b3ad9b702-config-data-generated\") pod \"openstack-galera-0\" (UID: \"55cfa883-d16d-4231-95e7-fd0b3ad9b702\") " pod="openstack/openstack-galera-0" Nov 28 10:14:49 crc kubenswrapper[4838]: I1128 10:14:49.342332 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/55cfa883-d16d-4231-95e7-fd0b3ad9b702-config-data-default\") pod \"openstack-galera-0\" (UID: \"55cfa883-d16d-4231-95e7-fd0b3ad9b702\") " pod="openstack/openstack-galera-0" Nov 28 10:14:49 crc kubenswrapper[4838]: I1128 10:14:49.342572 4838 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"openstack-galera-0\" (UID: \"55cfa883-d16d-4231-95e7-fd0b3ad9b702\") device mount path \"/mnt/openstack/pv08\"" pod="openstack/openstack-galera-0" Nov 28 10:14:49 crc kubenswrapper[4838]: I1128 10:14:49.342771 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/55cfa883-d16d-4231-95e7-fd0b3ad9b702-kolla-config\") pod \"openstack-galera-0\" (UID: \"55cfa883-d16d-4231-95e7-fd0b3ad9b702\") " pod="openstack/openstack-galera-0" Nov 28 10:14:49 crc kubenswrapper[4838]: I1128 10:14:49.343479 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/55cfa883-d16d-4231-95e7-fd0b3ad9b702-operator-scripts\") pod \"openstack-galera-0\" (UID: \"55cfa883-d16d-4231-95e7-fd0b3ad9b702\") " pod="openstack/openstack-galera-0" Nov 28 10:14:49 crc kubenswrapper[4838]: I1128 10:14:49.363610 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/55cfa883-d16d-4231-95e7-fd0b3ad9b702-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"55cfa883-d16d-4231-95e7-fd0b3ad9b702\") " pod="openstack/openstack-galera-0" Nov 28 10:14:49 crc kubenswrapper[4838]: I1128 10:14:49.367936 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"openstack-galera-0\" (UID: \"55cfa883-d16d-4231-95e7-fd0b3ad9b702\") " pod="openstack/openstack-galera-0" Nov 28 10:14:49 crc kubenswrapper[4838]: I1128 10:14:49.368234 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q4fbv\" (UniqueName: \"kubernetes.io/projected/55cfa883-d16d-4231-95e7-fd0b3ad9b702-kube-api-access-q4fbv\") pod \"openstack-galera-0\" (UID: \"55cfa883-d16d-4231-95e7-fd0b3ad9b702\") " pod="openstack/openstack-galera-0" Nov 28 10:14:49 crc kubenswrapper[4838]: I1128 10:14:49.377012 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/55cfa883-d16d-4231-95e7-fd0b3ad9b702-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"55cfa883-d16d-4231-95e7-fd0b3ad9b702\") " pod="openstack/openstack-galera-0" Nov 28 10:14:49 crc kubenswrapper[4838]: I1128 10:14:49.518302 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Nov 28 10:14:50 crc kubenswrapper[4838]: I1128 10:14:50.503879 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 28 10:14:50 crc kubenswrapper[4838]: I1128 10:14:50.508811 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Nov 28 10:14:50 crc kubenswrapper[4838]: I1128 10:14:50.510173 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 28 10:14:50 crc kubenswrapper[4838]: I1128 10:14:50.511241 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-cell1-svc" Nov 28 10:14:50 crc kubenswrapper[4838]: I1128 10:14:50.512200 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-scripts" Nov 28 10:14:50 crc kubenswrapper[4838]: I1128 10:14:50.512426 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-cell1-dockercfg-df8m5" Nov 28 10:14:50 crc kubenswrapper[4838]: I1128 10:14:50.518263 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-config-data" Nov 28 10:14:50 crc kubenswrapper[4838]: I1128 10:14:50.662896 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/061baebe-5a1a-4090-a396-84571f88b105-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"061baebe-5a1a-4090-a396-84571f88b105\") " pod="openstack/openstack-cell1-galera-0" Nov 28 10:14:50 crc kubenswrapper[4838]: I1128 10:14:50.662942 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/061baebe-5a1a-4090-a396-84571f88b105-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"061baebe-5a1a-4090-a396-84571f88b105\") " pod="openstack/openstack-cell1-galera-0" Nov 28 10:14:50 crc kubenswrapper[4838]: I1128 10:14:50.662987 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"openstack-cell1-galera-0\" (UID: \"061baebe-5a1a-4090-a396-84571f88b105\") " pod="openstack/openstack-cell1-galera-0" Nov 28 10:14:50 crc kubenswrapper[4838]: I1128 10:14:50.663984 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/061baebe-5a1a-4090-a396-84571f88b105-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"061baebe-5a1a-4090-a396-84571f88b105\") " pod="openstack/openstack-cell1-galera-0" Nov 28 10:14:50 crc kubenswrapper[4838]: I1128 10:14:50.664049 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-krfqf\" (UniqueName: \"kubernetes.io/projected/061baebe-5a1a-4090-a396-84571f88b105-kube-api-access-krfqf\") pod \"openstack-cell1-galera-0\" (UID: \"061baebe-5a1a-4090-a396-84571f88b105\") " pod="openstack/openstack-cell1-galera-0" Nov 28 10:14:50 crc kubenswrapper[4838]: I1128 10:14:50.664081 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/061baebe-5a1a-4090-a396-84571f88b105-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"061baebe-5a1a-4090-a396-84571f88b105\") " pod="openstack/openstack-cell1-galera-0" Nov 28 10:14:50 crc kubenswrapper[4838]: I1128 10:14:50.664101 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/061baebe-5a1a-4090-a396-84571f88b105-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"061baebe-5a1a-4090-a396-84571f88b105\") " pod="openstack/openstack-cell1-galera-0" Nov 28 10:14:50 crc kubenswrapper[4838]: I1128 10:14:50.664116 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/061baebe-5a1a-4090-a396-84571f88b105-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"061baebe-5a1a-4090-a396-84571f88b105\") " pod="openstack/openstack-cell1-galera-0" Nov 28 10:14:50 crc kubenswrapper[4838]: I1128 10:14:50.669805 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/memcached-0"] Nov 28 10:14:50 crc kubenswrapper[4838]: I1128 10:14:50.670635 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Nov 28 10:14:50 crc kubenswrapper[4838]: I1128 10:14:50.677769 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"memcached-memcached-dockercfg-xh2rt" Nov 28 10:14:50 crc kubenswrapper[4838]: I1128 10:14:50.677974 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"memcached-config-data" Nov 28 10:14:50 crc kubenswrapper[4838]: I1128 10:14:50.678110 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-memcached-svc" Nov 28 10:14:50 crc kubenswrapper[4838]: I1128 10:14:50.694629 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Nov 28 10:14:50 crc kubenswrapper[4838]: I1128 10:14:50.765747 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"openstack-cell1-galera-0\" (UID: \"061baebe-5a1a-4090-a396-84571f88b105\") " pod="openstack/openstack-cell1-galera-0" Nov 28 10:14:50 crc kubenswrapper[4838]: I1128 10:14:50.766008 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/061baebe-5a1a-4090-a396-84571f88b105-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"061baebe-5a1a-4090-a396-84571f88b105\") " pod="openstack/openstack-cell1-galera-0" Nov 28 10:14:50 crc kubenswrapper[4838]: I1128 10:14:50.766045 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-krfqf\" (UniqueName: \"kubernetes.io/projected/061baebe-5a1a-4090-a396-84571f88b105-kube-api-access-krfqf\") pod \"openstack-cell1-galera-0\" (UID: \"061baebe-5a1a-4090-a396-84571f88b105\") " pod="openstack/openstack-cell1-galera-0" Nov 28 10:14:50 crc kubenswrapper[4838]: I1128 10:14:50.766072 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/061baebe-5a1a-4090-a396-84571f88b105-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"061baebe-5a1a-4090-a396-84571f88b105\") " pod="openstack/openstack-cell1-galera-0" Nov 28 10:14:50 crc kubenswrapper[4838]: I1128 10:14:50.766089 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/061baebe-5a1a-4090-a396-84571f88b105-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"061baebe-5a1a-4090-a396-84571f88b105\") " pod="openstack/openstack-cell1-galera-0" Nov 28 10:14:50 crc kubenswrapper[4838]: I1128 10:14:50.766107 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/061baebe-5a1a-4090-a396-84571f88b105-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"061baebe-5a1a-4090-a396-84571f88b105\") " pod="openstack/openstack-cell1-galera-0" Nov 28 10:14:50 crc kubenswrapper[4838]: I1128 10:14:50.766132 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/061baebe-5a1a-4090-a396-84571f88b105-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"061baebe-5a1a-4090-a396-84571f88b105\") " pod="openstack/openstack-cell1-galera-0" Nov 28 10:14:50 crc kubenswrapper[4838]: I1128 10:14:50.766153 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/061baebe-5a1a-4090-a396-84571f88b105-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"061baebe-5a1a-4090-a396-84571f88b105\") " pod="openstack/openstack-cell1-galera-0" Nov 28 10:14:50 crc kubenswrapper[4838]: I1128 10:14:50.766417 4838 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"openstack-cell1-galera-0\" (UID: \"061baebe-5a1a-4090-a396-84571f88b105\") device mount path \"/mnt/openstack/pv03\"" pod="openstack/openstack-cell1-galera-0" Nov 28 10:14:50 crc kubenswrapper[4838]: I1128 10:14:50.766787 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/061baebe-5a1a-4090-a396-84571f88b105-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"061baebe-5a1a-4090-a396-84571f88b105\") " pod="openstack/openstack-cell1-galera-0" Nov 28 10:14:50 crc kubenswrapper[4838]: I1128 10:14:50.768015 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/061baebe-5a1a-4090-a396-84571f88b105-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"061baebe-5a1a-4090-a396-84571f88b105\") " pod="openstack/openstack-cell1-galera-0" Nov 28 10:14:50 crc kubenswrapper[4838]: I1128 10:14:50.771412 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/061baebe-5a1a-4090-a396-84571f88b105-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"061baebe-5a1a-4090-a396-84571f88b105\") " pod="openstack/openstack-cell1-galera-0" Nov 28 10:14:50 crc kubenswrapper[4838]: I1128 10:14:50.771476 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/061baebe-5a1a-4090-a396-84571f88b105-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"061baebe-5a1a-4090-a396-84571f88b105\") " pod="openstack/openstack-cell1-galera-0" Nov 28 10:14:50 crc kubenswrapper[4838]: I1128 10:14:50.776942 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/061baebe-5a1a-4090-a396-84571f88b105-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"061baebe-5a1a-4090-a396-84571f88b105\") " pod="openstack/openstack-cell1-galera-0" Nov 28 10:14:50 crc kubenswrapper[4838]: I1128 10:14:50.779759 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/061baebe-5a1a-4090-a396-84571f88b105-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"061baebe-5a1a-4090-a396-84571f88b105\") " pod="openstack/openstack-cell1-galera-0" Nov 28 10:14:50 crc kubenswrapper[4838]: I1128 10:14:50.784387 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-krfqf\" (UniqueName: \"kubernetes.io/projected/061baebe-5a1a-4090-a396-84571f88b105-kube-api-access-krfqf\") pod \"openstack-cell1-galera-0\" (UID: \"061baebe-5a1a-4090-a396-84571f88b105\") " pod="openstack/openstack-cell1-galera-0" Nov 28 10:14:50 crc kubenswrapper[4838]: I1128 10:14:50.798382 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"openstack-cell1-galera-0\" (UID: \"061baebe-5a1a-4090-a396-84571f88b105\") " pod="openstack/openstack-cell1-galera-0" Nov 28 10:14:50 crc kubenswrapper[4838]: I1128 10:14:50.833018 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Nov 28 10:14:50 crc kubenswrapper[4838]: I1128 10:14:50.867455 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/09948818-c683-4cab-ab8e-c4bfa54809a1-kolla-config\") pod \"memcached-0\" (UID: \"09948818-c683-4cab-ab8e-c4bfa54809a1\") " pod="openstack/memcached-0" Nov 28 10:14:50 crc kubenswrapper[4838]: I1128 10:14:50.867761 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/09948818-c683-4cab-ab8e-c4bfa54809a1-config-data\") pod \"memcached-0\" (UID: \"09948818-c683-4cab-ab8e-c4bfa54809a1\") " pod="openstack/memcached-0" Nov 28 10:14:50 crc kubenswrapper[4838]: I1128 10:14:50.867891 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/09948818-c683-4cab-ab8e-c4bfa54809a1-memcached-tls-certs\") pod \"memcached-0\" (UID: \"09948818-c683-4cab-ab8e-c4bfa54809a1\") " pod="openstack/memcached-0" Nov 28 10:14:50 crc kubenswrapper[4838]: I1128 10:14:50.868011 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pjj2w\" (UniqueName: \"kubernetes.io/projected/09948818-c683-4cab-ab8e-c4bfa54809a1-kube-api-access-pjj2w\") pod \"memcached-0\" (UID: \"09948818-c683-4cab-ab8e-c4bfa54809a1\") " pod="openstack/memcached-0" Nov 28 10:14:50 crc kubenswrapper[4838]: I1128 10:14:50.868152 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/09948818-c683-4cab-ab8e-c4bfa54809a1-combined-ca-bundle\") pod \"memcached-0\" (UID: \"09948818-c683-4cab-ab8e-c4bfa54809a1\") " pod="openstack/memcached-0" Nov 28 10:14:50 crc kubenswrapper[4838]: I1128 10:14:50.969001 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/09948818-c683-4cab-ab8e-c4bfa54809a1-memcached-tls-certs\") pod \"memcached-0\" (UID: \"09948818-c683-4cab-ab8e-c4bfa54809a1\") " pod="openstack/memcached-0" Nov 28 10:14:50 crc kubenswrapper[4838]: I1128 10:14:50.969082 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pjj2w\" (UniqueName: \"kubernetes.io/projected/09948818-c683-4cab-ab8e-c4bfa54809a1-kube-api-access-pjj2w\") pod \"memcached-0\" (UID: \"09948818-c683-4cab-ab8e-c4bfa54809a1\") " pod="openstack/memcached-0" Nov 28 10:14:50 crc kubenswrapper[4838]: I1128 10:14:50.969192 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/09948818-c683-4cab-ab8e-c4bfa54809a1-combined-ca-bundle\") pod \"memcached-0\" (UID: \"09948818-c683-4cab-ab8e-c4bfa54809a1\") " pod="openstack/memcached-0" Nov 28 10:14:50 crc kubenswrapper[4838]: I1128 10:14:50.969236 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/09948818-c683-4cab-ab8e-c4bfa54809a1-kolla-config\") pod \"memcached-0\" (UID: \"09948818-c683-4cab-ab8e-c4bfa54809a1\") " pod="openstack/memcached-0" Nov 28 10:14:50 crc kubenswrapper[4838]: I1128 10:14:50.969274 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/09948818-c683-4cab-ab8e-c4bfa54809a1-config-data\") pod \"memcached-0\" (UID: \"09948818-c683-4cab-ab8e-c4bfa54809a1\") " pod="openstack/memcached-0" Nov 28 10:14:50 crc kubenswrapper[4838]: I1128 10:14:50.970239 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/09948818-c683-4cab-ab8e-c4bfa54809a1-config-data\") pod \"memcached-0\" (UID: \"09948818-c683-4cab-ab8e-c4bfa54809a1\") " pod="openstack/memcached-0" Nov 28 10:14:50 crc kubenswrapper[4838]: I1128 10:14:50.970505 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/09948818-c683-4cab-ab8e-c4bfa54809a1-kolla-config\") pod \"memcached-0\" (UID: \"09948818-c683-4cab-ab8e-c4bfa54809a1\") " pod="openstack/memcached-0" Nov 28 10:14:50 crc kubenswrapper[4838]: I1128 10:14:50.972538 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/09948818-c683-4cab-ab8e-c4bfa54809a1-memcached-tls-certs\") pod \"memcached-0\" (UID: \"09948818-c683-4cab-ab8e-c4bfa54809a1\") " pod="openstack/memcached-0" Nov 28 10:14:50 crc kubenswrapper[4838]: I1128 10:14:50.974200 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/09948818-c683-4cab-ab8e-c4bfa54809a1-combined-ca-bundle\") pod \"memcached-0\" (UID: \"09948818-c683-4cab-ab8e-c4bfa54809a1\") " pod="openstack/memcached-0" Nov 28 10:14:50 crc kubenswrapper[4838]: I1128 10:14:50.983812 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pjj2w\" (UniqueName: \"kubernetes.io/projected/09948818-c683-4cab-ab8e-c4bfa54809a1-kube-api-access-pjj2w\") pod \"memcached-0\" (UID: \"09948818-c683-4cab-ab8e-c4bfa54809a1\") " pod="openstack/memcached-0" Nov 28 10:14:51 crc kubenswrapper[4838]: I1128 10:14:51.028539 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Nov 28 10:14:52 crc kubenswrapper[4838]: I1128 10:14:52.763366 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Nov 28 10:14:52 crc kubenswrapper[4838]: I1128 10:14:52.764361 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 28 10:14:52 crc kubenswrapper[4838]: I1128 10:14:52.769029 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-ceilometer-dockercfg-fgzt6" Nov 28 10:14:52 crc kubenswrapper[4838]: I1128 10:14:52.781792 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 28 10:14:52 crc kubenswrapper[4838]: I1128 10:14:52.811345 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qd8rc\" (UniqueName: \"kubernetes.io/projected/229a7c23-7909-4e77-bfa9-92d7d4f0f0eb-kube-api-access-qd8rc\") pod \"kube-state-metrics-0\" (UID: \"229a7c23-7909-4e77-bfa9-92d7d4f0f0eb\") " pod="openstack/kube-state-metrics-0" Nov 28 10:14:52 crc kubenswrapper[4838]: I1128 10:14:52.913088 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qd8rc\" (UniqueName: \"kubernetes.io/projected/229a7c23-7909-4e77-bfa9-92d7d4f0f0eb-kube-api-access-qd8rc\") pod \"kube-state-metrics-0\" (UID: \"229a7c23-7909-4e77-bfa9-92d7d4f0f0eb\") " pod="openstack/kube-state-metrics-0" Nov 28 10:14:52 crc kubenswrapper[4838]: I1128 10:14:52.929603 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qd8rc\" (UniqueName: \"kubernetes.io/projected/229a7c23-7909-4e77-bfa9-92d7d4f0f0eb-kube-api-access-qd8rc\") pod \"kube-state-metrics-0\" (UID: \"229a7c23-7909-4e77-bfa9-92d7d4f0f0eb\") " pod="openstack/kube-state-metrics-0" Nov 28 10:14:53 crc kubenswrapper[4838]: I1128 10:14:53.098461 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 28 10:14:53 crc kubenswrapper[4838]: I1128 10:14:53.940689 4838 patch_prober.go:28] interesting pod/machine-config-daemon-5dxdd container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 10:14:53 crc kubenswrapper[4838]: I1128 10:14:53.940802 4838 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 10:14:56 crc kubenswrapper[4838]: I1128 10:14:56.435873 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 28 10:14:56 crc kubenswrapper[4838]: I1128 10:14:56.437359 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Nov 28 10:14:56 crc kubenswrapper[4838]: I1128 10:14:56.440897 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-nb-ovndbs" Nov 28 10:14:56 crc kubenswrapper[4838]: I1128 10:14:56.440950 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovn-metrics" Nov 28 10:14:56 crc kubenswrapper[4838]: I1128 10:14:56.440975 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-config" Nov 28 10:14:56 crc kubenswrapper[4838]: I1128 10:14:56.441135 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-scripts" Nov 28 10:14:56 crc kubenswrapper[4838]: I1128 10:14:56.444706 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-nb-dockercfg-hsn89" Nov 28 10:14:56 crc kubenswrapper[4838]: I1128 10:14:56.464447 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 28 10:14:56 crc kubenswrapper[4838]: I1128 10:14:56.586142 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dslx4\" (UniqueName: \"kubernetes.io/projected/20476ab2-8070-42b3-a05c-d2c07c111ea9-kube-api-access-dslx4\") pod \"ovsdbserver-nb-0\" (UID: \"20476ab2-8070-42b3-a05c-d2c07c111ea9\") " pod="openstack/ovsdbserver-nb-0" Nov 28 10:14:56 crc kubenswrapper[4838]: I1128 10:14:56.586217 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/20476ab2-8070-42b3-a05c-d2c07c111ea9-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"20476ab2-8070-42b3-a05c-d2c07c111ea9\") " pod="openstack/ovsdbserver-nb-0" Nov 28 10:14:56 crc kubenswrapper[4838]: I1128 10:14:56.586281 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/20476ab2-8070-42b3-a05c-d2c07c111ea9-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"20476ab2-8070-42b3-a05c-d2c07c111ea9\") " pod="openstack/ovsdbserver-nb-0" Nov 28 10:14:56 crc kubenswrapper[4838]: I1128 10:14:56.586383 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/20476ab2-8070-42b3-a05c-d2c07c111ea9-config\") pod \"ovsdbserver-nb-0\" (UID: \"20476ab2-8070-42b3-a05c-d2c07c111ea9\") " pod="openstack/ovsdbserver-nb-0" Nov 28 10:14:56 crc kubenswrapper[4838]: I1128 10:14:56.586458 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/20476ab2-8070-42b3-a05c-d2c07c111ea9-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"20476ab2-8070-42b3-a05c-d2c07c111ea9\") " pod="openstack/ovsdbserver-nb-0" Nov 28 10:14:56 crc kubenswrapper[4838]: I1128 10:14:56.586562 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"ovsdbserver-nb-0\" (UID: \"20476ab2-8070-42b3-a05c-d2c07c111ea9\") " pod="openstack/ovsdbserver-nb-0" Nov 28 10:14:56 crc kubenswrapper[4838]: I1128 10:14:56.586613 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/20476ab2-8070-42b3-a05c-d2c07c111ea9-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"20476ab2-8070-42b3-a05c-d2c07c111ea9\") " pod="openstack/ovsdbserver-nb-0" Nov 28 10:14:56 crc kubenswrapper[4838]: I1128 10:14:56.586666 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/20476ab2-8070-42b3-a05c-d2c07c111ea9-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"20476ab2-8070-42b3-a05c-d2c07c111ea9\") " pod="openstack/ovsdbserver-nb-0" Nov 28 10:14:56 crc kubenswrapper[4838]: I1128 10:14:56.688627 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dslx4\" (UniqueName: \"kubernetes.io/projected/20476ab2-8070-42b3-a05c-d2c07c111ea9-kube-api-access-dslx4\") pod \"ovsdbserver-nb-0\" (UID: \"20476ab2-8070-42b3-a05c-d2c07c111ea9\") " pod="openstack/ovsdbserver-nb-0" Nov 28 10:14:56 crc kubenswrapper[4838]: I1128 10:14:56.688681 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/20476ab2-8070-42b3-a05c-d2c07c111ea9-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"20476ab2-8070-42b3-a05c-d2c07c111ea9\") " pod="openstack/ovsdbserver-nb-0" Nov 28 10:14:56 crc kubenswrapper[4838]: I1128 10:14:56.688711 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/20476ab2-8070-42b3-a05c-d2c07c111ea9-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"20476ab2-8070-42b3-a05c-d2c07c111ea9\") " pod="openstack/ovsdbserver-nb-0" Nov 28 10:14:56 crc kubenswrapper[4838]: I1128 10:14:56.688756 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/20476ab2-8070-42b3-a05c-d2c07c111ea9-config\") pod \"ovsdbserver-nb-0\" (UID: \"20476ab2-8070-42b3-a05c-d2c07c111ea9\") " pod="openstack/ovsdbserver-nb-0" Nov 28 10:14:56 crc kubenswrapper[4838]: I1128 10:14:56.688786 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/20476ab2-8070-42b3-a05c-d2c07c111ea9-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"20476ab2-8070-42b3-a05c-d2c07c111ea9\") " pod="openstack/ovsdbserver-nb-0" Nov 28 10:14:56 crc kubenswrapper[4838]: I1128 10:14:56.688825 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"ovsdbserver-nb-0\" (UID: \"20476ab2-8070-42b3-a05c-d2c07c111ea9\") " pod="openstack/ovsdbserver-nb-0" Nov 28 10:14:56 crc kubenswrapper[4838]: I1128 10:14:56.688860 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/20476ab2-8070-42b3-a05c-d2c07c111ea9-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"20476ab2-8070-42b3-a05c-d2c07c111ea9\") " pod="openstack/ovsdbserver-nb-0" Nov 28 10:14:56 crc kubenswrapper[4838]: I1128 10:14:56.688884 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/20476ab2-8070-42b3-a05c-d2c07c111ea9-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"20476ab2-8070-42b3-a05c-d2c07c111ea9\") " pod="openstack/ovsdbserver-nb-0" Nov 28 10:14:56 crc kubenswrapper[4838]: I1128 10:14:56.689608 4838 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"ovsdbserver-nb-0\" (UID: \"20476ab2-8070-42b3-a05c-d2c07c111ea9\") device mount path \"/mnt/openstack/pv11\"" pod="openstack/ovsdbserver-nb-0" Nov 28 10:14:56 crc kubenswrapper[4838]: I1128 10:14:56.689884 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/20476ab2-8070-42b3-a05c-d2c07c111ea9-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"20476ab2-8070-42b3-a05c-d2c07c111ea9\") " pod="openstack/ovsdbserver-nb-0" Nov 28 10:14:56 crc kubenswrapper[4838]: I1128 10:14:56.691100 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/20476ab2-8070-42b3-a05c-d2c07c111ea9-config\") pod \"ovsdbserver-nb-0\" (UID: \"20476ab2-8070-42b3-a05c-d2c07c111ea9\") " pod="openstack/ovsdbserver-nb-0" Nov 28 10:14:56 crc kubenswrapper[4838]: I1128 10:14:56.691340 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/20476ab2-8070-42b3-a05c-d2c07c111ea9-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"20476ab2-8070-42b3-a05c-d2c07c111ea9\") " pod="openstack/ovsdbserver-nb-0" Nov 28 10:14:56 crc kubenswrapper[4838]: I1128 10:14:56.700059 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/20476ab2-8070-42b3-a05c-d2c07c111ea9-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"20476ab2-8070-42b3-a05c-d2c07c111ea9\") " pod="openstack/ovsdbserver-nb-0" Nov 28 10:14:56 crc kubenswrapper[4838]: I1128 10:14:56.709309 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/20476ab2-8070-42b3-a05c-d2c07c111ea9-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"20476ab2-8070-42b3-a05c-d2c07c111ea9\") " pod="openstack/ovsdbserver-nb-0" Nov 28 10:14:56 crc kubenswrapper[4838]: I1128 10:14:56.711147 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dslx4\" (UniqueName: \"kubernetes.io/projected/20476ab2-8070-42b3-a05c-d2c07c111ea9-kube-api-access-dslx4\") pod \"ovsdbserver-nb-0\" (UID: \"20476ab2-8070-42b3-a05c-d2c07c111ea9\") " pod="openstack/ovsdbserver-nb-0" Nov 28 10:14:56 crc kubenswrapper[4838]: I1128 10:14:56.715419 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/20476ab2-8070-42b3-a05c-d2c07c111ea9-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"20476ab2-8070-42b3-a05c-d2c07c111ea9\") " pod="openstack/ovsdbserver-nb-0" Nov 28 10:14:56 crc kubenswrapper[4838]: I1128 10:14:56.718470 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"ovsdbserver-nb-0\" (UID: \"20476ab2-8070-42b3-a05c-d2c07c111ea9\") " pod="openstack/ovsdbserver-nb-0" Nov 28 10:14:56 crc kubenswrapper[4838]: I1128 10:14:56.769189 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Nov 28 10:14:57 crc kubenswrapper[4838]: I1128 10:14:57.047187 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-dqjd8"] Nov 28 10:14:57 crc kubenswrapper[4838]: I1128 10:14:57.048393 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-dqjd8" Nov 28 10:14:57 crc kubenswrapper[4838]: I1128 10:14:57.051209 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-scripts" Nov 28 10:14:57 crc kubenswrapper[4838]: I1128 10:14:57.051355 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovncontroller-ovndbs" Nov 28 10:14:57 crc kubenswrapper[4838]: I1128 10:14:57.052825 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-ovs-zt4fz"] Nov 28 10:14:57 crc kubenswrapper[4838]: I1128 10:14:57.053169 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncontroller-ovncontroller-dockercfg-25k9p" Nov 28 10:14:57 crc kubenswrapper[4838]: I1128 10:14:57.054784 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-zt4fz" Nov 28 10:14:57 crc kubenswrapper[4838]: I1128 10:14:57.061788 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-dqjd8"] Nov 28 10:14:57 crc kubenswrapper[4838]: I1128 10:14:57.080564 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-zt4fz"] Nov 28 10:14:57 crc kubenswrapper[4838]: I1128 10:14:57.200109 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/50cdff0a-cfe5-41e1-8eed-67b23079335f-var-log-ovn\") pod \"ovn-controller-dqjd8\" (UID: \"50cdff0a-cfe5-41e1-8eed-67b23079335f\") " pod="openstack/ovn-controller-dqjd8" Nov 28 10:14:57 crc kubenswrapper[4838]: I1128 10:14:57.200171 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/50cdff0a-cfe5-41e1-8eed-67b23079335f-var-run\") pod \"ovn-controller-dqjd8\" (UID: \"50cdff0a-cfe5-41e1-8eed-67b23079335f\") " pod="openstack/ovn-controller-dqjd8" Nov 28 10:14:57 crc kubenswrapper[4838]: I1128 10:14:57.200299 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/4cb6513a-e07a-40b9-a3ad-f147b8b4a96d-scripts\") pod \"ovn-controller-ovs-zt4fz\" (UID: \"4cb6513a-e07a-40b9-a3ad-f147b8b4a96d\") " pod="openstack/ovn-controller-ovs-zt4fz" Nov 28 10:14:57 crc kubenswrapper[4838]: I1128 10:14:57.200332 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m2nfw\" (UniqueName: \"kubernetes.io/projected/4cb6513a-e07a-40b9-a3ad-f147b8b4a96d-kube-api-access-m2nfw\") pod \"ovn-controller-ovs-zt4fz\" (UID: \"4cb6513a-e07a-40b9-a3ad-f147b8b4a96d\") " pod="openstack/ovn-controller-ovs-zt4fz" Nov 28 10:14:57 crc kubenswrapper[4838]: I1128 10:14:57.200365 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/4cb6513a-e07a-40b9-a3ad-f147b8b4a96d-var-lib\") pod \"ovn-controller-ovs-zt4fz\" (UID: \"4cb6513a-e07a-40b9-a3ad-f147b8b4a96d\") " pod="openstack/ovn-controller-ovs-zt4fz" Nov 28 10:14:57 crc kubenswrapper[4838]: I1128 10:14:57.200423 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k6v9q\" (UniqueName: \"kubernetes.io/projected/50cdff0a-cfe5-41e1-8eed-67b23079335f-kube-api-access-k6v9q\") pod \"ovn-controller-dqjd8\" (UID: \"50cdff0a-cfe5-41e1-8eed-67b23079335f\") " pod="openstack/ovn-controller-dqjd8" Nov 28 10:14:57 crc kubenswrapper[4838]: I1128 10:14:57.200478 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/50cdff0a-cfe5-41e1-8eed-67b23079335f-ovn-controller-tls-certs\") pod \"ovn-controller-dqjd8\" (UID: \"50cdff0a-cfe5-41e1-8eed-67b23079335f\") " pod="openstack/ovn-controller-dqjd8" Nov 28 10:14:57 crc kubenswrapper[4838]: I1128 10:14:57.200513 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/50cdff0a-cfe5-41e1-8eed-67b23079335f-scripts\") pod \"ovn-controller-dqjd8\" (UID: \"50cdff0a-cfe5-41e1-8eed-67b23079335f\") " pod="openstack/ovn-controller-dqjd8" Nov 28 10:14:57 crc kubenswrapper[4838]: I1128 10:14:57.200544 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/4cb6513a-e07a-40b9-a3ad-f147b8b4a96d-var-log\") pod \"ovn-controller-ovs-zt4fz\" (UID: \"4cb6513a-e07a-40b9-a3ad-f147b8b4a96d\") " pod="openstack/ovn-controller-ovs-zt4fz" Nov 28 10:14:57 crc kubenswrapper[4838]: I1128 10:14:57.200600 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/4cb6513a-e07a-40b9-a3ad-f147b8b4a96d-var-run\") pod \"ovn-controller-ovs-zt4fz\" (UID: \"4cb6513a-e07a-40b9-a3ad-f147b8b4a96d\") " pod="openstack/ovn-controller-ovs-zt4fz" Nov 28 10:14:57 crc kubenswrapper[4838]: I1128 10:14:57.200632 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/50cdff0a-cfe5-41e1-8eed-67b23079335f-var-run-ovn\") pod \"ovn-controller-dqjd8\" (UID: \"50cdff0a-cfe5-41e1-8eed-67b23079335f\") " pod="openstack/ovn-controller-dqjd8" Nov 28 10:14:57 crc kubenswrapper[4838]: I1128 10:14:57.200661 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/50cdff0a-cfe5-41e1-8eed-67b23079335f-combined-ca-bundle\") pod \"ovn-controller-dqjd8\" (UID: \"50cdff0a-cfe5-41e1-8eed-67b23079335f\") " pod="openstack/ovn-controller-dqjd8" Nov 28 10:14:57 crc kubenswrapper[4838]: I1128 10:14:57.200681 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/4cb6513a-e07a-40b9-a3ad-f147b8b4a96d-etc-ovs\") pod \"ovn-controller-ovs-zt4fz\" (UID: \"4cb6513a-e07a-40b9-a3ad-f147b8b4a96d\") " pod="openstack/ovn-controller-ovs-zt4fz" Nov 28 10:14:57 crc kubenswrapper[4838]: I1128 10:14:57.301654 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/4cb6513a-e07a-40b9-a3ad-f147b8b4a96d-scripts\") pod \"ovn-controller-ovs-zt4fz\" (UID: \"4cb6513a-e07a-40b9-a3ad-f147b8b4a96d\") " pod="openstack/ovn-controller-ovs-zt4fz" Nov 28 10:14:57 crc kubenswrapper[4838]: I1128 10:14:57.301692 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m2nfw\" (UniqueName: \"kubernetes.io/projected/4cb6513a-e07a-40b9-a3ad-f147b8b4a96d-kube-api-access-m2nfw\") pod \"ovn-controller-ovs-zt4fz\" (UID: \"4cb6513a-e07a-40b9-a3ad-f147b8b4a96d\") " pod="openstack/ovn-controller-ovs-zt4fz" Nov 28 10:14:57 crc kubenswrapper[4838]: I1128 10:14:57.301735 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/4cb6513a-e07a-40b9-a3ad-f147b8b4a96d-var-lib\") pod \"ovn-controller-ovs-zt4fz\" (UID: \"4cb6513a-e07a-40b9-a3ad-f147b8b4a96d\") " pod="openstack/ovn-controller-ovs-zt4fz" Nov 28 10:14:57 crc kubenswrapper[4838]: I1128 10:14:57.301760 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k6v9q\" (UniqueName: \"kubernetes.io/projected/50cdff0a-cfe5-41e1-8eed-67b23079335f-kube-api-access-k6v9q\") pod \"ovn-controller-dqjd8\" (UID: \"50cdff0a-cfe5-41e1-8eed-67b23079335f\") " pod="openstack/ovn-controller-dqjd8" Nov 28 10:14:57 crc kubenswrapper[4838]: I1128 10:14:57.301786 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/50cdff0a-cfe5-41e1-8eed-67b23079335f-ovn-controller-tls-certs\") pod \"ovn-controller-dqjd8\" (UID: \"50cdff0a-cfe5-41e1-8eed-67b23079335f\") " pod="openstack/ovn-controller-dqjd8" Nov 28 10:14:57 crc kubenswrapper[4838]: I1128 10:14:57.301819 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/50cdff0a-cfe5-41e1-8eed-67b23079335f-scripts\") pod \"ovn-controller-dqjd8\" (UID: \"50cdff0a-cfe5-41e1-8eed-67b23079335f\") " pod="openstack/ovn-controller-dqjd8" Nov 28 10:14:57 crc kubenswrapper[4838]: I1128 10:14:57.301850 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/4cb6513a-e07a-40b9-a3ad-f147b8b4a96d-var-log\") pod \"ovn-controller-ovs-zt4fz\" (UID: \"4cb6513a-e07a-40b9-a3ad-f147b8b4a96d\") " pod="openstack/ovn-controller-ovs-zt4fz" Nov 28 10:14:57 crc kubenswrapper[4838]: I1128 10:14:57.301872 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/4cb6513a-e07a-40b9-a3ad-f147b8b4a96d-var-run\") pod \"ovn-controller-ovs-zt4fz\" (UID: \"4cb6513a-e07a-40b9-a3ad-f147b8b4a96d\") " pod="openstack/ovn-controller-ovs-zt4fz" Nov 28 10:14:57 crc kubenswrapper[4838]: I1128 10:14:57.301896 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/50cdff0a-cfe5-41e1-8eed-67b23079335f-var-run-ovn\") pod \"ovn-controller-dqjd8\" (UID: \"50cdff0a-cfe5-41e1-8eed-67b23079335f\") " pod="openstack/ovn-controller-dqjd8" Nov 28 10:14:57 crc kubenswrapper[4838]: I1128 10:14:57.301922 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/50cdff0a-cfe5-41e1-8eed-67b23079335f-combined-ca-bundle\") pod \"ovn-controller-dqjd8\" (UID: \"50cdff0a-cfe5-41e1-8eed-67b23079335f\") " pod="openstack/ovn-controller-dqjd8" Nov 28 10:14:57 crc kubenswrapper[4838]: I1128 10:14:57.301940 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/4cb6513a-e07a-40b9-a3ad-f147b8b4a96d-etc-ovs\") pod \"ovn-controller-ovs-zt4fz\" (UID: \"4cb6513a-e07a-40b9-a3ad-f147b8b4a96d\") " pod="openstack/ovn-controller-ovs-zt4fz" Nov 28 10:14:57 crc kubenswrapper[4838]: I1128 10:14:57.301988 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/50cdff0a-cfe5-41e1-8eed-67b23079335f-var-log-ovn\") pod \"ovn-controller-dqjd8\" (UID: \"50cdff0a-cfe5-41e1-8eed-67b23079335f\") " pod="openstack/ovn-controller-dqjd8" Nov 28 10:14:57 crc kubenswrapper[4838]: I1128 10:14:57.302013 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/50cdff0a-cfe5-41e1-8eed-67b23079335f-var-run\") pod \"ovn-controller-dqjd8\" (UID: \"50cdff0a-cfe5-41e1-8eed-67b23079335f\") " pod="openstack/ovn-controller-dqjd8" Nov 28 10:14:57 crc kubenswrapper[4838]: I1128 10:14:57.302344 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/4cb6513a-e07a-40b9-a3ad-f147b8b4a96d-var-lib\") pod \"ovn-controller-ovs-zt4fz\" (UID: \"4cb6513a-e07a-40b9-a3ad-f147b8b4a96d\") " pod="openstack/ovn-controller-ovs-zt4fz" Nov 28 10:14:57 crc kubenswrapper[4838]: I1128 10:14:57.302400 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/50cdff0a-cfe5-41e1-8eed-67b23079335f-var-run\") pod \"ovn-controller-dqjd8\" (UID: \"50cdff0a-cfe5-41e1-8eed-67b23079335f\") " pod="openstack/ovn-controller-dqjd8" Nov 28 10:14:57 crc kubenswrapper[4838]: I1128 10:14:57.302426 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/4cb6513a-e07a-40b9-a3ad-f147b8b4a96d-var-run\") pod \"ovn-controller-ovs-zt4fz\" (UID: \"4cb6513a-e07a-40b9-a3ad-f147b8b4a96d\") " pod="openstack/ovn-controller-ovs-zt4fz" Nov 28 10:14:57 crc kubenswrapper[4838]: I1128 10:14:57.302460 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/50cdff0a-cfe5-41e1-8eed-67b23079335f-var-run-ovn\") pod \"ovn-controller-dqjd8\" (UID: \"50cdff0a-cfe5-41e1-8eed-67b23079335f\") " pod="openstack/ovn-controller-dqjd8" Nov 28 10:14:57 crc kubenswrapper[4838]: I1128 10:14:57.302467 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/4cb6513a-e07a-40b9-a3ad-f147b8b4a96d-var-log\") pod \"ovn-controller-ovs-zt4fz\" (UID: \"4cb6513a-e07a-40b9-a3ad-f147b8b4a96d\") " pod="openstack/ovn-controller-ovs-zt4fz" Nov 28 10:14:57 crc kubenswrapper[4838]: I1128 10:14:57.302521 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/4cb6513a-e07a-40b9-a3ad-f147b8b4a96d-etc-ovs\") pod \"ovn-controller-ovs-zt4fz\" (UID: \"4cb6513a-e07a-40b9-a3ad-f147b8b4a96d\") " pod="openstack/ovn-controller-ovs-zt4fz" Nov 28 10:14:57 crc kubenswrapper[4838]: I1128 10:14:57.302595 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/50cdff0a-cfe5-41e1-8eed-67b23079335f-var-log-ovn\") pod \"ovn-controller-dqjd8\" (UID: \"50cdff0a-cfe5-41e1-8eed-67b23079335f\") " pod="openstack/ovn-controller-dqjd8" Nov 28 10:14:57 crc kubenswrapper[4838]: I1128 10:14:57.304211 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/50cdff0a-cfe5-41e1-8eed-67b23079335f-scripts\") pod \"ovn-controller-dqjd8\" (UID: \"50cdff0a-cfe5-41e1-8eed-67b23079335f\") " pod="openstack/ovn-controller-dqjd8" Nov 28 10:14:57 crc kubenswrapper[4838]: I1128 10:14:57.304756 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/4cb6513a-e07a-40b9-a3ad-f147b8b4a96d-scripts\") pod \"ovn-controller-ovs-zt4fz\" (UID: \"4cb6513a-e07a-40b9-a3ad-f147b8b4a96d\") " pod="openstack/ovn-controller-ovs-zt4fz" Nov 28 10:14:57 crc kubenswrapper[4838]: I1128 10:14:57.305447 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/50cdff0a-cfe5-41e1-8eed-67b23079335f-combined-ca-bundle\") pod \"ovn-controller-dqjd8\" (UID: \"50cdff0a-cfe5-41e1-8eed-67b23079335f\") " pod="openstack/ovn-controller-dqjd8" Nov 28 10:14:57 crc kubenswrapper[4838]: I1128 10:14:57.306187 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/50cdff0a-cfe5-41e1-8eed-67b23079335f-ovn-controller-tls-certs\") pod \"ovn-controller-dqjd8\" (UID: \"50cdff0a-cfe5-41e1-8eed-67b23079335f\") " pod="openstack/ovn-controller-dqjd8" Nov 28 10:14:57 crc kubenswrapper[4838]: I1128 10:14:57.319335 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m2nfw\" (UniqueName: \"kubernetes.io/projected/4cb6513a-e07a-40b9-a3ad-f147b8b4a96d-kube-api-access-m2nfw\") pod \"ovn-controller-ovs-zt4fz\" (UID: \"4cb6513a-e07a-40b9-a3ad-f147b8b4a96d\") " pod="openstack/ovn-controller-ovs-zt4fz" Nov 28 10:14:57 crc kubenswrapper[4838]: I1128 10:14:57.330692 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k6v9q\" (UniqueName: \"kubernetes.io/projected/50cdff0a-cfe5-41e1-8eed-67b23079335f-kube-api-access-k6v9q\") pod \"ovn-controller-dqjd8\" (UID: \"50cdff0a-cfe5-41e1-8eed-67b23079335f\") " pod="openstack/ovn-controller-dqjd8" Nov 28 10:14:57 crc kubenswrapper[4838]: I1128 10:14:57.450687 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-zt4fz" Nov 28 10:14:57 crc kubenswrapper[4838]: I1128 10:14:57.450935 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-dqjd8" Nov 28 10:14:59 crc kubenswrapper[4838]: I1128 10:14:59.979904 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 28 10:14:59 crc kubenswrapper[4838]: I1128 10:14:59.981402 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Nov 28 10:14:59 crc kubenswrapper[4838]: I1128 10:14:59.987414 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-sb-dockercfg-drlgh" Nov 28 10:14:59 crc kubenswrapper[4838]: I1128 10:14:59.987696 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-scripts" Nov 28 10:14:59 crc kubenswrapper[4838]: I1128 10:14:59.987740 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-sb-ovndbs" Nov 28 10:14:59 crc kubenswrapper[4838]: I1128 10:14:59.987795 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-config" Nov 28 10:15:00 crc kubenswrapper[4838]: I1128 10:15:00.004876 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 28 10:15:00 crc kubenswrapper[4838]: I1128 10:15:00.090278 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/c2037801-5d52-402d-9d8c-4b17928fb33a-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"c2037801-5d52-402d-9d8c-4b17928fb33a\") " pod="openstack/ovsdbserver-sb-0" Nov 28 10:15:00 crc kubenswrapper[4838]: I1128 10:15:00.090345 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c2037801-5d52-402d-9d8c-4b17928fb33a-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"c2037801-5d52-402d-9d8c-4b17928fb33a\") " pod="openstack/ovsdbserver-sb-0" Nov 28 10:15:00 crc kubenswrapper[4838]: I1128 10:15:00.090373 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d84hk\" (UniqueName: \"kubernetes.io/projected/c2037801-5d52-402d-9d8c-4b17928fb33a-kube-api-access-d84hk\") pod \"ovsdbserver-sb-0\" (UID: \"c2037801-5d52-402d-9d8c-4b17928fb33a\") " pod="openstack/ovsdbserver-sb-0" Nov 28 10:15:00 crc kubenswrapper[4838]: I1128 10:15:00.090569 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/c2037801-5d52-402d-9d8c-4b17928fb33a-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"c2037801-5d52-402d-9d8c-4b17928fb33a\") " pod="openstack/ovsdbserver-sb-0" Nov 28 10:15:00 crc kubenswrapper[4838]: I1128 10:15:00.090685 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c2037801-5d52-402d-9d8c-4b17928fb33a-config\") pod \"ovsdbserver-sb-0\" (UID: \"c2037801-5d52-402d-9d8c-4b17928fb33a\") " pod="openstack/ovsdbserver-sb-0" Nov 28 10:15:00 crc kubenswrapper[4838]: I1128 10:15:00.090729 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c2037801-5d52-402d-9d8c-4b17928fb33a-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"c2037801-5d52-402d-9d8c-4b17928fb33a\") " pod="openstack/ovsdbserver-sb-0" Nov 28 10:15:00 crc kubenswrapper[4838]: I1128 10:15:00.090807 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"ovsdbserver-sb-0\" (UID: \"c2037801-5d52-402d-9d8c-4b17928fb33a\") " pod="openstack/ovsdbserver-sb-0" Nov 28 10:15:00 crc kubenswrapper[4838]: I1128 10:15:00.090849 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/c2037801-5d52-402d-9d8c-4b17928fb33a-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"c2037801-5d52-402d-9d8c-4b17928fb33a\") " pod="openstack/ovsdbserver-sb-0" Nov 28 10:15:00 crc kubenswrapper[4838]: I1128 10:15:00.153583 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405415-t4j6r"] Nov 28 10:15:00 crc kubenswrapper[4838]: I1128 10:15:00.154992 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405415-t4j6r" Nov 28 10:15:00 crc kubenswrapper[4838]: I1128 10:15:00.157941 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 28 10:15:00 crc kubenswrapper[4838]: I1128 10:15:00.162137 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 28 10:15:00 crc kubenswrapper[4838]: I1128 10:15:00.167045 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405415-t4j6r"] Nov 28 10:15:00 crc kubenswrapper[4838]: I1128 10:15:00.192290 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c2037801-5d52-402d-9d8c-4b17928fb33a-config\") pod \"ovsdbserver-sb-0\" (UID: \"c2037801-5d52-402d-9d8c-4b17928fb33a\") " pod="openstack/ovsdbserver-sb-0" Nov 28 10:15:00 crc kubenswrapper[4838]: I1128 10:15:00.192344 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c2037801-5d52-402d-9d8c-4b17928fb33a-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"c2037801-5d52-402d-9d8c-4b17928fb33a\") " pod="openstack/ovsdbserver-sb-0" Nov 28 10:15:00 crc kubenswrapper[4838]: I1128 10:15:00.192401 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"ovsdbserver-sb-0\" (UID: \"c2037801-5d52-402d-9d8c-4b17928fb33a\") " pod="openstack/ovsdbserver-sb-0" Nov 28 10:15:00 crc kubenswrapper[4838]: I1128 10:15:00.192443 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/c2037801-5d52-402d-9d8c-4b17928fb33a-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"c2037801-5d52-402d-9d8c-4b17928fb33a\") " pod="openstack/ovsdbserver-sb-0" Nov 28 10:15:00 crc kubenswrapper[4838]: I1128 10:15:00.192488 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/c2037801-5d52-402d-9d8c-4b17928fb33a-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"c2037801-5d52-402d-9d8c-4b17928fb33a\") " pod="openstack/ovsdbserver-sb-0" Nov 28 10:15:00 crc kubenswrapper[4838]: I1128 10:15:00.192519 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c2037801-5d52-402d-9d8c-4b17928fb33a-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"c2037801-5d52-402d-9d8c-4b17928fb33a\") " pod="openstack/ovsdbserver-sb-0" Nov 28 10:15:00 crc kubenswrapper[4838]: I1128 10:15:00.192546 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d84hk\" (UniqueName: \"kubernetes.io/projected/c2037801-5d52-402d-9d8c-4b17928fb33a-kube-api-access-d84hk\") pod \"ovsdbserver-sb-0\" (UID: \"c2037801-5d52-402d-9d8c-4b17928fb33a\") " pod="openstack/ovsdbserver-sb-0" Nov 28 10:15:00 crc kubenswrapper[4838]: I1128 10:15:00.192614 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/c2037801-5d52-402d-9d8c-4b17928fb33a-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"c2037801-5d52-402d-9d8c-4b17928fb33a\") " pod="openstack/ovsdbserver-sb-0" Nov 28 10:15:00 crc kubenswrapper[4838]: I1128 10:15:00.193032 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/c2037801-5d52-402d-9d8c-4b17928fb33a-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"c2037801-5d52-402d-9d8c-4b17928fb33a\") " pod="openstack/ovsdbserver-sb-0" Nov 28 10:15:00 crc kubenswrapper[4838]: I1128 10:15:00.193174 4838 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"ovsdbserver-sb-0\" (UID: \"c2037801-5d52-402d-9d8c-4b17928fb33a\") device mount path \"/mnt/openstack/pv02\"" pod="openstack/ovsdbserver-sb-0" Nov 28 10:15:00 crc kubenswrapper[4838]: I1128 10:15:00.193681 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c2037801-5d52-402d-9d8c-4b17928fb33a-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"c2037801-5d52-402d-9d8c-4b17928fb33a\") " pod="openstack/ovsdbserver-sb-0" Nov 28 10:15:00 crc kubenswrapper[4838]: I1128 10:15:00.193942 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c2037801-5d52-402d-9d8c-4b17928fb33a-config\") pod \"ovsdbserver-sb-0\" (UID: \"c2037801-5d52-402d-9d8c-4b17928fb33a\") " pod="openstack/ovsdbserver-sb-0" Nov 28 10:15:00 crc kubenswrapper[4838]: I1128 10:15:00.198409 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/c2037801-5d52-402d-9d8c-4b17928fb33a-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"c2037801-5d52-402d-9d8c-4b17928fb33a\") " pod="openstack/ovsdbserver-sb-0" Nov 28 10:15:00 crc kubenswrapper[4838]: I1128 10:15:00.198885 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/c2037801-5d52-402d-9d8c-4b17928fb33a-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"c2037801-5d52-402d-9d8c-4b17928fb33a\") " pod="openstack/ovsdbserver-sb-0" Nov 28 10:15:00 crc kubenswrapper[4838]: I1128 10:15:00.201153 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c2037801-5d52-402d-9d8c-4b17928fb33a-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"c2037801-5d52-402d-9d8c-4b17928fb33a\") " pod="openstack/ovsdbserver-sb-0" Nov 28 10:15:00 crc kubenswrapper[4838]: I1128 10:15:00.211268 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d84hk\" (UniqueName: \"kubernetes.io/projected/c2037801-5d52-402d-9d8c-4b17928fb33a-kube-api-access-d84hk\") pod \"ovsdbserver-sb-0\" (UID: \"c2037801-5d52-402d-9d8c-4b17928fb33a\") " pod="openstack/ovsdbserver-sb-0" Nov 28 10:15:00 crc kubenswrapper[4838]: I1128 10:15:00.219613 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"ovsdbserver-sb-0\" (UID: \"c2037801-5d52-402d-9d8c-4b17928fb33a\") " pod="openstack/ovsdbserver-sb-0" Nov 28 10:15:00 crc kubenswrapper[4838]: I1128 10:15:00.293822 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/996c85bb-be41-4eaa-9e6e-a912a0a3db0d-config-volume\") pod \"collect-profiles-29405415-t4j6r\" (UID: \"996c85bb-be41-4eaa-9e6e-a912a0a3db0d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405415-t4j6r" Nov 28 10:15:00 crc kubenswrapper[4838]: I1128 10:15:00.293883 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9mv2g\" (UniqueName: \"kubernetes.io/projected/996c85bb-be41-4eaa-9e6e-a912a0a3db0d-kube-api-access-9mv2g\") pod \"collect-profiles-29405415-t4j6r\" (UID: \"996c85bb-be41-4eaa-9e6e-a912a0a3db0d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405415-t4j6r" Nov 28 10:15:00 crc kubenswrapper[4838]: I1128 10:15:00.293921 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/996c85bb-be41-4eaa-9e6e-a912a0a3db0d-secret-volume\") pod \"collect-profiles-29405415-t4j6r\" (UID: \"996c85bb-be41-4eaa-9e6e-a912a0a3db0d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405415-t4j6r" Nov 28 10:15:00 crc kubenswrapper[4838]: I1128 10:15:00.314673 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Nov 28 10:15:00 crc kubenswrapper[4838]: I1128 10:15:00.395509 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/996c85bb-be41-4eaa-9e6e-a912a0a3db0d-config-volume\") pod \"collect-profiles-29405415-t4j6r\" (UID: \"996c85bb-be41-4eaa-9e6e-a912a0a3db0d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405415-t4j6r" Nov 28 10:15:00 crc kubenswrapper[4838]: I1128 10:15:00.395567 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9mv2g\" (UniqueName: \"kubernetes.io/projected/996c85bb-be41-4eaa-9e6e-a912a0a3db0d-kube-api-access-9mv2g\") pod \"collect-profiles-29405415-t4j6r\" (UID: \"996c85bb-be41-4eaa-9e6e-a912a0a3db0d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405415-t4j6r" Nov 28 10:15:00 crc kubenswrapper[4838]: I1128 10:15:00.395608 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/996c85bb-be41-4eaa-9e6e-a912a0a3db0d-secret-volume\") pod \"collect-profiles-29405415-t4j6r\" (UID: \"996c85bb-be41-4eaa-9e6e-a912a0a3db0d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405415-t4j6r" Nov 28 10:15:00 crc kubenswrapper[4838]: I1128 10:15:00.396892 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/996c85bb-be41-4eaa-9e6e-a912a0a3db0d-config-volume\") pod \"collect-profiles-29405415-t4j6r\" (UID: \"996c85bb-be41-4eaa-9e6e-a912a0a3db0d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405415-t4j6r" Nov 28 10:15:00 crc kubenswrapper[4838]: I1128 10:15:00.399146 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/996c85bb-be41-4eaa-9e6e-a912a0a3db0d-secret-volume\") pod \"collect-profiles-29405415-t4j6r\" (UID: \"996c85bb-be41-4eaa-9e6e-a912a0a3db0d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405415-t4j6r" Nov 28 10:15:00 crc kubenswrapper[4838]: I1128 10:15:00.411744 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9mv2g\" (UniqueName: \"kubernetes.io/projected/996c85bb-be41-4eaa-9e6e-a912a0a3db0d-kube-api-access-9mv2g\") pod \"collect-profiles-29405415-t4j6r\" (UID: \"996c85bb-be41-4eaa-9e6e-a912a0a3db0d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405415-t4j6r" Nov 28 10:15:00 crc kubenswrapper[4838]: I1128 10:15:00.474453 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405415-t4j6r" Nov 28 10:15:08 crc kubenswrapper[4838]: E1128 10:15:08.749042 4838 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified" Nov 28 10:15:08 crc kubenswrapper[4838]: E1128 10:15:08.749955 4838 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:setup-container,Image:quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified,Command:[sh -c cp /tmp/erlang-cookie-secret/.erlang.cookie /var/lib/rabbitmq/.erlang.cookie && chmod 600 /var/lib/rabbitmq/.erlang.cookie ; cp /tmp/rabbitmq-plugins/enabled_plugins /operator/enabled_plugins ; echo '[default]' > /var/lib/rabbitmq/.rabbitmqadmin.conf && sed -e 's/default_user/username/' -e 's/default_pass/password/' /tmp/default_user.conf >> /var/lib/rabbitmq/.rabbitmqadmin.conf && chmod 600 /var/lib/rabbitmq/.rabbitmqadmin.conf ; sleep 30],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{67108864 0} {} BinarySI},},Requests:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:plugins-conf,ReadOnly:false,MountPath:/tmp/rabbitmq-plugins/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-erlang-cookie,ReadOnly:false,MountPath:/var/lib/rabbitmq/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:erlang-cookie-secret,ReadOnly:false,MountPath:/tmp/erlang-cookie-secret/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-plugins,ReadOnly:false,MountPath:/operator,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:persistence,ReadOnly:false,MountPath:/var/lib/rabbitmq/mnesia/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-confd,ReadOnly:false,MountPath:/tmp/default_user.conf,SubPath:default_user.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-4hqzv,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-server-0_openstack(366c721a-0e79-44a0-aa02-761c4ddc6936): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 28 10:15:08 crc kubenswrapper[4838]: E1128 10:15:08.751265 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"setup-container\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/rabbitmq-server-0" podUID="366c721a-0e79-44a0-aa02-761c4ddc6936" Nov 28 10:15:08 crc kubenswrapper[4838]: E1128 10:15:08.783025 4838 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified" Nov 28 10:15:08 crc kubenswrapper[4838]: E1128 10:15:08.783237 4838 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:setup-container,Image:quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified,Command:[sh -c cp /tmp/erlang-cookie-secret/.erlang.cookie /var/lib/rabbitmq/.erlang.cookie && chmod 600 /var/lib/rabbitmq/.erlang.cookie ; cp /tmp/rabbitmq-plugins/enabled_plugins /operator/enabled_plugins ; echo '[default]' > /var/lib/rabbitmq/.rabbitmqadmin.conf && sed -e 's/default_user/username/' -e 's/default_pass/password/' /tmp/default_user.conf >> /var/lib/rabbitmq/.rabbitmqadmin.conf && chmod 600 /var/lib/rabbitmq/.rabbitmqadmin.conf ; sleep 30],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{67108864 0} {} BinarySI},},Requests:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:plugins-conf,ReadOnly:false,MountPath:/tmp/rabbitmq-plugins/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-erlang-cookie,ReadOnly:false,MountPath:/var/lib/rabbitmq/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:erlang-cookie-secret,ReadOnly:false,MountPath:/tmp/erlang-cookie-secret/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-plugins,ReadOnly:false,MountPath:/operator,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:persistence,ReadOnly:false,MountPath:/var/lib/rabbitmq/mnesia/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-confd,ReadOnly:false,MountPath:/tmp/default_user.conf,SubPath:default_user.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-skjvs,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-cell1-server-0_openstack(dbe39b78-c198-480e-9bca-17eaed6183bf): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 28 10:15:08 crc kubenswrapper[4838]: E1128 10:15:08.784400 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"setup-container\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/rabbitmq-cell1-server-0" podUID="dbe39b78-c198-480e-9bca-17eaed6183bf" Nov 28 10:15:09 crc kubenswrapper[4838]: E1128 10:15:09.104823 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"setup-container\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified\\\"\"" pod="openstack/rabbitmq-cell1-server-0" podUID="dbe39b78-c198-480e-9bca-17eaed6183bf" Nov 28 10:15:09 crc kubenswrapper[4838]: E1128 10:15:09.105553 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"setup-container\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified\\\"\"" pod="openstack/rabbitmq-server-0" podUID="366c721a-0e79-44a0-aa02-761c4ddc6936" Nov 28 10:15:09 crc kubenswrapper[4838]: E1128 10:15:09.635101 4838 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Nov 28 10:15:09 crc kubenswrapper[4838]: E1128 10:15:09.635262 4838 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:nfdh5dfhb6h64h676hc4h78h97h669h54chfbh696hb5h54bh5d4h6bh64h644h677h584h5cbh698h9dh5bbh5f8h5b8hcdh644h5c7h694hbfh589q,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-cb8n5,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-5ccc8479f9-sr898_openstack(f31fe79b-92dd-4237-b387-9d2c825fdacb): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 28 10:15:09 crc kubenswrapper[4838]: E1128 10:15:09.636320 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-5ccc8479f9-sr898" podUID="f31fe79b-92dd-4237-b387-9d2c825fdacb" Nov 28 10:15:09 crc kubenswrapper[4838]: E1128 10:15:09.638499 4838 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Nov 28 10:15:09 crc kubenswrapper[4838]: E1128 10:15:09.638706 4838 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:ndfhb5h667h568h584h5f9h58dh565h664h587h597h577h64bh5c4h66fh647hbdh68ch5c5h68dh686h5f7h64hd7hc6h55fh57bh98h57fh87h5fh57fq,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-nt6mq,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-78dd6ddcc-zqrtr_openstack(8d508c37-bed9-4481-b086-0159800059e5): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 28 10:15:09 crc kubenswrapper[4838]: E1128 10:15:09.639969 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-78dd6ddcc-zqrtr" podUID="8d508c37-bed9-4481-b086-0159800059e5" Nov 28 10:15:09 crc kubenswrapper[4838]: E1128 10:15:09.669941 4838 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Nov 28 10:15:09 crc kubenswrapper[4838]: E1128 10:15:09.669965 4838 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Nov 28 10:15:09 crc kubenswrapper[4838]: E1128 10:15:09.670161 4838 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n659h4h664hbh658h587h67ch89h587h8fh679hc6hf9h55fh644h5d5h698h68dh5cdh5ffh669h54ch9h689hb8hd4h5bfhd8h5d7h5fh665h574q,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-q7ptw,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-57d769cc4f-pkbwk_openstack(cb0df889-52d3-4150-87df-82886662e952): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 28 10:15:09 crc kubenswrapper[4838]: E1128 10:15:09.670197 4838 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:nffh5bdhf4h5f8h79h55h77h58fh56dh7bh6fh578hbch55dh68h56bhd9h65dh57ch658hc9h566h666h688h58h65dh684h5d7h6ch575h5d6h88q,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-qkmrt,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-675f4bcbfc-qrzr7_openstack(5d83d658-68c9-4bad-a05b-221d397b88ec): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 28 10:15:09 crc kubenswrapper[4838]: E1128 10:15:09.671466 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-675f4bcbfc-qrzr7" podUID="5d83d658-68c9-4bad-a05b-221d397b88ec" Nov 28 10:15:09 crc kubenswrapper[4838]: E1128 10:15:09.671492 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-57d769cc4f-pkbwk" podUID="cb0df889-52d3-4150-87df-82886662e952" Nov 28 10:15:10 crc kubenswrapper[4838]: E1128 10:15:10.112222 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified\\\"\"" pod="openstack/dnsmasq-dns-5ccc8479f9-sr898" podUID="f31fe79b-92dd-4237-b387-9d2c825fdacb" Nov 28 10:15:10 crc kubenswrapper[4838]: E1128 10:15:10.112836 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified\\\"\"" pod="openstack/dnsmasq-dns-57d769cc4f-pkbwk" podUID="cb0df889-52d3-4150-87df-82886662e952" Nov 28 10:15:10 crc kubenswrapper[4838]: I1128 10:15:10.173311 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Nov 28 10:15:10 crc kubenswrapper[4838]: I1128 10:15:10.198628 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 28 10:15:10 crc kubenswrapper[4838]: W1128 10:15:10.223880 4838 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod55cfa883_d16d_4231_95e7_fd0b3ad9b702.slice/crio-a26b7e76b2cd58997778d7c102971838ad18db2126e7cae52e2ebafdbf43807a WatchSource:0}: Error finding container a26b7e76b2cd58997778d7c102971838ad18db2126e7cae52e2ebafdbf43807a: Status 404 returned error can't find the container with id a26b7e76b2cd58997778d7c102971838ad18db2126e7cae52e2ebafdbf43807a Nov 28 10:15:10 crc kubenswrapper[4838]: I1128 10:15:10.266843 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-dqjd8"] Nov 28 10:15:10 crc kubenswrapper[4838]: I1128 10:15:10.276669 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405415-t4j6r"] Nov 28 10:15:10 crc kubenswrapper[4838]: I1128 10:15:10.417585 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 28 10:15:10 crc kubenswrapper[4838]: I1128 10:15:10.609661 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-zqrtr" Nov 28 10:15:10 crc kubenswrapper[4838]: I1128 10:15:10.614986 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-qrzr7" Nov 28 10:15:10 crc kubenswrapper[4838]: W1128 10:15:10.647262 4838 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod061baebe_5a1a_4090_a396_84571f88b105.slice/crio-28d65b10f09121656d5bc82111248ec31172aaa37dc76188b3dffeb078310ac9 WatchSource:0}: Error finding container 28d65b10f09121656d5bc82111248ec31172aaa37dc76188b3dffeb078310ac9: Status 404 returned error can't find the container with id 28d65b10f09121656d5bc82111248ec31172aaa37dc76188b3dffeb078310ac9 Nov 28 10:15:10 crc kubenswrapper[4838]: I1128 10:15:10.647643 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 28 10:15:10 crc kubenswrapper[4838]: I1128 10:15:10.703821 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Nov 28 10:15:10 crc kubenswrapper[4838]: I1128 10:15:10.773500 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 28 10:15:10 crc kubenswrapper[4838]: I1128 10:15:10.795276 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8d508c37-bed9-4481-b086-0159800059e5-config\") pod \"8d508c37-bed9-4481-b086-0159800059e5\" (UID: \"8d508c37-bed9-4481-b086-0159800059e5\") " Nov 28 10:15:10 crc kubenswrapper[4838]: I1128 10:15:10.795329 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nt6mq\" (UniqueName: \"kubernetes.io/projected/8d508c37-bed9-4481-b086-0159800059e5-kube-api-access-nt6mq\") pod \"8d508c37-bed9-4481-b086-0159800059e5\" (UID: \"8d508c37-bed9-4481-b086-0159800059e5\") " Nov 28 10:15:10 crc kubenswrapper[4838]: I1128 10:15:10.795374 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8d508c37-bed9-4481-b086-0159800059e5-dns-svc\") pod \"8d508c37-bed9-4481-b086-0159800059e5\" (UID: \"8d508c37-bed9-4481-b086-0159800059e5\") " Nov 28 10:15:10 crc kubenswrapper[4838]: I1128 10:15:10.795407 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qkmrt\" (UniqueName: \"kubernetes.io/projected/5d83d658-68c9-4bad-a05b-221d397b88ec-kube-api-access-qkmrt\") pod \"5d83d658-68c9-4bad-a05b-221d397b88ec\" (UID: \"5d83d658-68c9-4bad-a05b-221d397b88ec\") " Nov 28 10:15:10 crc kubenswrapper[4838]: I1128 10:15:10.795445 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5d83d658-68c9-4bad-a05b-221d397b88ec-config\") pod \"5d83d658-68c9-4bad-a05b-221d397b88ec\" (UID: \"5d83d658-68c9-4bad-a05b-221d397b88ec\") " Nov 28 10:15:10 crc kubenswrapper[4838]: I1128 10:15:10.795820 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8d508c37-bed9-4481-b086-0159800059e5-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "8d508c37-bed9-4481-b086-0159800059e5" (UID: "8d508c37-bed9-4481-b086-0159800059e5"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 10:15:10 crc kubenswrapper[4838]: I1128 10:15:10.795875 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8d508c37-bed9-4481-b086-0159800059e5-config" (OuterVolumeSpecName: "config") pod "8d508c37-bed9-4481-b086-0159800059e5" (UID: "8d508c37-bed9-4481-b086-0159800059e5"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 10:15:10 crc kubenswrapper[4838]: I1128 10:15:10.796179 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5d83d658-68c9-4bad-a05b-221d397b88ec-config" (OuterVolumeSpecName: "config") pod "5d83d658-68c9-4bad-a05b-221d397b88ec" (UID: "5d83d658-68c9-4bad-a05b-221d397b88ec"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 10:15:10 crc kubenswrapper[4838]: I1128 10:15:10.801045 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8d508c37-bed9-4481-b086-0159800059e5-kube-api-access-nt6mq" (OuterVolumeSpecName: "kube-api-access-nt6mq") pod "8d508c37-bed9-4481-b086-0159800059e5" (UID: "8d508c37-bed9-4481-b086-0159800059e5"). InnerVolumeSpecName "kube-api-access-nt6mq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:15:10 crc kubenswrapper[4838]: I1128 10:15:10.801663 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5d83d658-68c9-4bad-a05b-221d397b88ec-kube-api-access-qkmrt" (OuterVolumeSpecName: "kube-api-access-qkmrt") pod "5d83d658-68c9-4bad-a05b-221d397b88ec" (UID: "5d83d658-68c9-4bad-a05b-221d397b88ec"). InnerVolumeSpecName "kube-api-access-qkmrt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:15:10 crc kubenswrapper[4838]: I1128 10:15:10.897678 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nt6mq\" (UniqueName: \"kubernetes.io/projected/8d508c37-bed9-4481-b086-0159800059e5-kube-api-access-nt6mq\") on node \"crc\" DevicePath \"\"" Nov 28 10:15:10 crc kubenswrapper[4838]: I1128 10:15:10.897711 4838 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8d508c37-bed9-4481-b086-0159800059e5-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 10:15:10 crc kubenswrapper[4838]: I1128 10:15:10.897737 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qkmrt\" (UniqueName: \"kubernetes.io/projected/5d83d658-68c9-4bad-a05b-221d397b88ec-kube-api-access-qkmrt\") on node \"crc\" DevicePath \"\"" Nov 28 10:15:10 crc kubenswrapper[4838]: I1128 10:15:10.897747 4838 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5d83d658-68c9-4bad-a05b-221d397b88ec-config\") on node \"crc\" DevicePath \"\"" Nov 28 10:15:10 crc kubenswrapper[4838]: I1128 10:15:10.897760 4838 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8d508c37-bed9-4481-b086-0159800059e5-config\") on node \"crc\" DevicePath \"\"" Nov 28 10:15:11 crc kubenswrapper[4838]: I1128 10:15:11.116499 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"09948818-c683-4cab-ab8e-c4bfa54809a1","Type":"ContainerStarted","Data":"9347a9e4a57b15f5484a130ff94322768ec00ae9b4798ec87e7847f7edca692d"} Nov 28 10:15:11 crc kubenswrapper[4838]: I1128 10:15:11.130669 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"c2037801-5d52-402d-9d8c-4b17928fb33a","Type":"ContainerStarted","Data":"f1d88e92cb94174edbd2dc931920debacf70615f2618e83386f092ec6d640bf2"} Nov 28 10:15:11 crc kubenswrapper[4838]: I1128 10:15:11.132513 4838 generic.go:334] "Generic (PLEG): container finished" podID="996c85bb-be41-4eaa-9e6e-a912a0a3db0d" containerID="a3fcc5758e976db53e516a97358221b13c2285d66c5486e9da43a9c380a51e8a" exitCode=0 Nov 28 10:15:11 crc kubenswrapper[4838]: I1128 10:15:11.132635 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405415-t4j6r" event={"ID":"996c85bb-be41-4eaa-9e6e-a912a0a3db0d","Type":"ContainerDied","Data":"a3fcc5758e976db53e516a97358221b13c2285d66c5486e9da43a9c380a51e8a"} Nov 28 10:15:11 crc kubenswrapper[4838]: I1128 10:15:11.132687 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405415-t4j6r" event={"ID":"996c85bb-be41-4eaa-9e6e-a912a0a3db0d","Type":"ContainerStarted","Data":"5b55536e2837b69d1665a6e6bff75f489d6b488cf4f38ca9248cbd37320eb4c1"} Nov 28 10:15:11 crc kubenswrapper[4838]: I1128 10:15:11.134363 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-675f4bcbfc-qrzr7" event={"ID":"5d83d658-68c9-4bad-a05b-221d397b88ec","Type":"ContainerDied","Data":"7270f940d77814bf1e44d082d84685ea444deaa118d25d41ade9f6cc1501be91"} Nov 28 10:15:11 crc kubenswrapper[4838]: I1128 10:15:11.134383 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-qrzr7" Nov 28 10:15:11 crc kubenswrapper[4838]: I1128 10:15:11.135525 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-78dd6ddcc-zqrtr" event={"ID":"8d508c37-bed9-4481-b086-0159800059e5","Type":"ContainerDied","Data":"97226bc51f87332c5294fcfab4eaeb433c58351d1364989ee98d13a97018dd96"} Nov 28 10:15:11 crc kubenswrapper[4838]: I1128 10:15:11.135603 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-zqrtr" Nov 28 10:15:11 crc kubenswrapper[4838]: I1128 10:15:11.137050 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"061baebe-5a1a-4090-a396-84571f88b105","Type":"ContainerStarted","Data":"28d65b10f09121656d5bc82111248ec31172aaa37dc76188b3dffeb078310ac9"} Nov 28 10:15:11 crc kubenswrapper[4838]: I1128 10:15:11.138588 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"20476ab2-8070-42b3-a05c-d2c07c111ea9","Type":"ContainerStarted","Data":"67ab561d91f7f18bd4aea30e1130e64447d7b10819ab03531ebf62b911393e63"} Nov 28 10:15:11 crc kubenswrapper[4838]: I1128 10:15:11.139932 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-dqjd8" event={"ID":"50cdff0a-cfe5-41e1-8eed-67b23079335f","Type":"ContainerStarted","Data":"8034a158d67d068c32f103ad85f12aa05d8f6e0b175c81aed67762458dc2daba"} Nov 28 10:15:11 crc kubenswrapper[4838]: I1128 10:15:11.142057 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"229a7c23-7909-4e77-bfa9-92d7d4f0f0eb","Type":"ContainerStarted","Data":"d99bb9ccfbb55633ae99fb8330f117a93958acd3aaf42364ea48d0e8b3450567"} Nov 28 10:15:11 crc kubenswrapper[4838]: I1128 10:15:11.145615 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"55cfa883-d16d-4231-95e7-fd0b3ad9b702","Type":"ContainerStarted","Data":"a26b7e76b2cd58997778d7c102971838ad18db2126e7cae52e2ebafdbf43807a"} Nov 28 10:15:11 crc kubenswrapper[4838]: I1128 10:15:11.228975 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-zqrtr"] Nov 28 10:15:11 crc kubenswrapper[4838]: I1128 10:15:11.236264 4838 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-zqrtr"] Nov 28 10:15:11 crc kubenswrapper[4838]: I1128 10:15:11.247807 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-qrzr7"] Nov 28 10:15:11 crc kubenswrapper[4838]: I1128 10:15:11.253034 4838 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-qrzr7"] Nov 28 10:15:11 crc kubenswrapper[4838]: I1128 10:15:11.624004 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-zt4fz"] Nov 28 10:15:11 crc kubenswrapper[4838]: W1128 10:15:11.701172 4838 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4cb6513a_e07a_40b9_a3ad_f147b8b4a96d.slice/crio-fd6c21e50b5223f0200405829c7827eba5e284b931c345233297c7f9dc94b9db WatchSource:0}: Error finding container fd6c21e50b5223f0200405829c7827eba5e284b931c345233297c7f9dc94b9db: Status 404 returned error can't find the container with id fd6c21e50b5223f0200405829c7827eba5e284b931c345233297c7f9dc94b9db Nov 28 10:15:12 crc kubenswrapper[4838]: I1128 10:15:12.160659 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-zt4fz" event={"ID":"4cb6513a-e07a-40b9-a3ad-f147b8b4a96d","Type":"ContainerStarted","Data":"fd6c21e50b5223f0200405829c7827eba5e284b931c345233297c7f9dc94b9db"} Nov 28 10:15:12 crc kubenswrapper[4838]: I1128 10:15:12.573888 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5d83d658-68c9-4bad-a05b-221d397b88ec" path="/var/lib/kubelet/pods/5d83d658-68c9-4bad-a05b-221d397b88ec/volumes" Nov 28 10:15:12 crc kubenswrapper[4838]: I1128 10:15:12.574580 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8d508c37-bed9-4481-b086-0159800059e5" path="/var/lib/kubelet/pods/8d508c37-bed9-4481-b086-0159800059e5/volumes" Nov 28 10:15:12 crc kubenswrapper[4838]: I1128 10:15:12.656685 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405415-t4j6r" Nov 28 10:15:12 crc kubenswrapper[4838]: I1128 10:15:12.734291 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9mv2g\" (UniqueName: \"kubernetes.io/projected/996c85bb-be41-4eaa-9e6e-a912a0a3db0d-kube-api-access-9mv2g\") pod \"996c85bb-be41-4eaa-9e6e-a912a0a3db0d\" (UID: \"996c85bb-be41-4eaa-9e6e-a912a0a3db0d\") " Nov 28 10:15:12 crc kubenswrapper[4838]: I1128 10:15:12.734469 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/996c85bb-be41-4eaa-9e6e-a912a0a3db0d-config-volume\") pod \"996c85bb-be41-4eaa-9e6e-a912a0a3db0d\" (UID: \"996c85bb-be41-4eaa-9e6e-a912a0a3db0d\") " Nov 28 10:15:12 crc kubenswrapper[4838]: I1128 10:15:12.734522 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/996c85bb-be41-4eaa-9e6e-a912a0a3db0d-secret-volume\") pod \"996c85bb-be41-4eaa-9e6e-a912a0a3db0d\" (UID: \"996c85bb-be41-4eaa-9e6e-a912a0a3db0d\") " Nov 28 10:15:12 crc kubenswrapper[4838]: I1128 10:15:12.735665 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/996c85bb-be41-4eaa-9e6e-a912a0a3db0d-config-volume" (OuterVolumeSpecName: "config-volume") pod "996c85bb-be41-4eaa-9e6e-a912a0a3db0d" (UID: "996c85bb-be41-4eaa-9e6e-a912a0a3db0d"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 10:15:12 crc kubenswrapper[4838]: I1128 10:15:12.740991 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/996c85bb-be41-4eaa-9e6e-a912a0a3db0d-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "996c85bb-be41-4eaa-9e6e-a912a0a3db0d" (UID: "996c85bb-be41-4eaa-9e6e-a912a0a3db0d"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:15:12 crc kubenswrapper[4838]: I1128 10:15:12.743554 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/996c85bb-be41-4eaa-9e6e-a912a0a3db0d-kube-api-access-9mv2g" (OuterVolumeSpecName: "kube-api-access-9mv2g") pod "996c85bb-be41-4eaa-9e6e-a912a0a3db0d" (UID: "996c85bb-be41-4eaa-9e6e-a912a0a3db0d"). InnerVolumeSpecName "kube-api-access-9mv2g". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:15:12 crc kubenswrapper[4838]: I1128 10:15:12.836286 4838 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/996c85bb-be41-4eaa-9e6e-a912a0a3db0d-config-volume\") on node \"crc\" DevicePath \"\"" Nov 28 10:15:12 crc kubenswrapper[4838]: I1128 10:15:12.836321 4838 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/996c85bb-be41-4eaa-9e6e-a912a0a3db0d-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 28 10:15:12 crc kubenswrapper[4838]: I1128 10:15:12.836331 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9mv2g\" (UniqueName: \"kubernetes.io/projected/996c85bb-be41-4eaa-9e6e-a912a0a3db0d-kube-api-access-9mv2g\") on node \"crc\" DevicePath \"\"" Nov 28 10:15:13 crc kubenswrapper[4838]: I1128 10:15:13.173380 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405415-t4j6r" event={"ID":"996c85bb-be41-4eaa-9e6e-a912a0a3db0d","Type":"ContainerDied","Data":"5b55536e2837b69d1665a6e6bff75f489d6b488cf4f38ca9248cbd37320eb4c1"} Nov 28 10:15:13 crc kubenswrapper[4838]: I1128 10:15:13.173422 4838 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5b55536e2837b69d1665a6e6bff75f489d6b488cf4f38ca9248cbd37320eb4c1" Nov 28 10:15:13 crc kubenswrapper[4838]: I1128 10:15:13.173489 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405415-t4j6r" Nov 28 10:15:13 crc kubenswrapper[4838]: E1128 10:15:13.267224 4838 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod996c85bb_be41_4eaa_9e6e_a912a0a3db0d.slice/crio-5b55536e2837b69d1665a6e6bff75f489d6b488cf4f38ca9248cbd37320eb4c1\": RecentStats: unable to find data in memory cache]" Nov 28 10:15:19 crc kubenswrapper[4838]: I1128 10:15:19.216630 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"09948818-c683-4cab-ab8e-c4bfa54809a1","Type":"ContainerStarted","Data":"2cb6417db20c64b3ead00f0e04f22f9ae30fdef34f6e33d44bdda1e4d8f80711"} Nov 28 10:15:19 crc kubenswrapper[4838]: I1128 10:15:19.217256 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/memcached-0" Nov 28 10:15:19 crc kubenswrapper[4838]: I1128 10:15:19.218350 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"c2037801-5d52-402d-9d8c-4b17928fb33a","Type":"ContainerStarted","Data":"5013dd52f69b7c4a00ad8d88cd1c6d7223705de1837f4287bbd30fb9125411ed"} Nov 28 10:15:19 crc kubenswrapper[4838]: I1128 10:15:19.220138 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"061baebe-5a1a-4090-a396-84571f88b105","Type":"ContainerStarted","Data":"d61c60b65c0fe883c41638955d179c5a640c5f44f8b5985549a965f3be860a7f"} Nov 28 10:15:19 crc kubenswrapper[4838]: I1128 10:15:19.222063 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"20476ab2-8070-42b3-a05c-d2c07c111ea9","Type":"ContainerStarted","Data":"c42e821df0e242c2fdae84c70d728069856b725586543816da63dbe9412f21a5"} Nov 28 10:15:19 crc kubenswrapper[4838]: I1128 10:15:19.223474 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-dqjd8" event={"ID":"50cdff0a-cfe5-41e1-8eed-67b23079335f","Type":"ContainerStarted","Data":"e644cff210382d44241e4034cae5705ae19c9180a2c8737a91e35e99134fecc1"} Nov 28 10:15:19 crc kubenswrapper[4838]: I1128 10:15:19.223591 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-dqjd8" Nov 28 10:15:19 crc kubenswrapper[4838]: I1128 10:15:19.225177 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"229a7c23-7909-4e77-bfa9-92d7d4f0f0eb","Type":"ContainerStarted","Data":"6e9254aec5f2e5d8018bdc7354b5d1fbaa00f511bf8312253bd9b4c3cc48b94d"} Nov 28 10:15:19 crc kubenswrapper[4838]: I1128 10:15:19.225222 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Nov 28 10:15:19 crc kubenswrapper[4838]: I1128 10:15:19.226577 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"55cfa883-d16d-4231-95e7-fd0b3ad9b702","Type":"ContainerStarted","Data":"51be7e290d8bfccb82b2911526c87cb56c72e3e43599778eac8107af5c580eed"} Nov 28 10:15:19 crc kubenswrapper[4838]: I1128 10:15:19.228266 4838 generic.go:334] "Generic (PLEG): container finished" podID="4cb6513a-e07a-40b9-a3ad-f147b8b4a96d" containerID="7df8aaa65097d90a7fec12ad0320a398c92bed849ac352c7262ac80a35678796" exitCode=0 Nov 28 10:15:19 crc kubenswrapper[4838]: I1128 10:15:19.228294 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-zt4fz" event={"ID":"4cb6513a-e07a-40b9-a3ad-f147b8b4a96d","Type":"ContainerDied","Data":"7df8aaa65097d90a7fec12ad0320a398c92bed849ac352c7262ac80a35678796"} Nov 28 10:15:19 crc kubenswrapper[4838]: I1128 10:15:19.240294 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/memcached-0" podStartSLOduration=21.713967361 podStartE2EDuration="29.240277083s" podCreationTimestamp="2025-11-28 10:14:50 +0000 UTC" firstStartedPulling="2025-11-28 10:15:10.716187497 +0000 UTC m=+1082.415161667" lastFinishedPulling="2025-11-28 10:15:18.242497219 +0000 UTC m=+1089.941471389" observedRunningTime="2025-11-28 10:15:19.23644364 +0000 UTC m=+1090.935417810" watchObservedRunningTime="2025-11-28 10:15:19.240277083 +0000 UTC m=+1090.939251253" Nov 28 10:15:19 crc kubenswrapper[4838]: I1128 10:15:19.340360 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=19.348961811 podStartE2EDuration="27.34034154s" podCreationTimestamp="2025-11-28 10:14:52 +0000 UTC" firstStartedPulling="2025-11-28 10:15:10.240714292 +0000 UTC m=+1081.939688462" lastFinishedPulling="2025-11-28 10:15:18.232094011 +0000 UTC m=+1089.931068191" observedRunningTime="2025-11-28 10:15:19.335366717 +0000 UTC m=+1091.034340897" watchObservedRunningTime="2025-11-28 10:15:19.34034154 +0000 UTC m=+1091.039315710" Nov 28 10:15:19 crc kubenswrapper[4838]: I1128 10:15:19.386861 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-dqjd8" podStartSLOduration=14.436263938 podStartE2EDuration="22.386839305s" podCreationTimestamp="2025-11-28 10:14:57 +0000 UTC" firstStartedPulling="2025-11-28 10:15:10.280914397 +0000 UTC m=+1081.979888567" lastFinishedPulling="2025-11-28 10:15:18.231489754 +0000 UTC m=+1089.930463934" observedRunningTime="2025-11-28 10:15:19.383772833 +0000 UTC m=+1091.082746993" watchObservedRunningTime="2025-11-28 10:15:19.386839305 +0000 UTC m=+1091.085813475" Nov 28 10:15:20 crc kubenswrapper[4838]: I1128 10:15:20.238040 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-zt4fz" event={"ID":"4cb6513a-e07a-40b9-a3ad-f147b8b4a96d","Type":"ContainerStarted","Data":"5fa8e7fafe5017d1b6d72e9e6cf2685c0ce3b16bf7592481fd691dd5fe100cfb"} Nov 28 10:15:20 crc kubenswrapper[4838]: I1128 10:15:20.238379 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-zt4fz" event={"ID":"4cb6513a-e07a-40b9-a3ad-f147b8b4a96d","Type":"ContainerStarted","Data":"11f3f833e5a79d71b35a28f59fb3d3c4950cd85fb01ce769f9081167cfad3e8f"} Nov 28 10:15:20 crc kubenswrapper[4838]: I1128 10:15:20.238595 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-zt4fz" Nov 28 10:15:21 crc kubenswrapper[4838]: I1128 10:15:21.247220 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-zt4fz" Nov 28 10:15:22 crc kubenswrapper[4838]: I1128 10:15:22.256599 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"c2037801-5d52-402d-9d8c-4b17928fb33a","Type":"ContainerStarted","Data":"6688c27b37e9e965a77376eabc33a5bfc8f455a30ef48e0a9266d9100265ed30"} Nov 28 10:15:22 crc kubenswrapper[4838]: I1128 10:15:22.260637 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"20476ab2-8070-42b3-a05c-d2c07c111ea9","Type":"ContainerStarted","Data":"c4453c899b1e5bb72bb9f774ec22b08d3182f972383aeb77bc5488b61a4ff36c"} Nov 28 10:15:22 crc kubenswrapper[4838]: I1128 10:15:22.283564 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-ovs-zt4fz" podStartSLOduration=18.751142175 podStartE2EDuration="25.283544058s" podCreationTimestamp="2025-11-28 10:14:57 +0000 UTC" firstStartedPulling="2025-11-28 10:15:11.704340442 +0000 UTC m=+1083.403314622" lastFinishedPulling="2025-11-28 10:15:18.236742315 +0000 UTC m=+1089.935716505" observedRunningTime="2025-11-28 10:15:20.262452278 +0000 UTC m=+1091.961426458" watchObservedRunningTime="2025-11-28 10:15:22.283544058 +0000 UTC m=+1093.982518248" Nov 28 10:15:22 crc kubenswrapper[4838]: I1128 10:15:22.291513 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-sb-0" podStartSLOduration=13.332032759 podStartE2EDuration="24.291492211s" podCreationTimestamp="2025-11-28 10:14:58 +0000 UTC" firstStartedPulling="2025-11-28 10:15:10.791802511 +0000 UTC m=+1082.490776671" lastFinishedPulling="2025-11-28 10:15:21.751261943 +0000 UTC m=+1093.450236123" observedRunningTime="2025-11-28 10:15:22.285055808 +0000 UTC m=+1093.984029978" watchObservedRunningTime="2025-11-28 10:15:22.291492211 +0000 UTC m=+1093.990466391" Nov 28 10:15:22 crc kubenswrapper[4838]: I1128 10:15:22.319640 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-nb-0" podStartSLOduration=15.986677295 podStartE2EDuration="27.319622234s" podCreationTimestamp="2025-11-28 10:14:55 +0000 UTC" firstStartedPulling="2025-11-28 10:15:10.45179546 +0000 UTC m=+1082.150769630" lastFinishedPulling="2025-11-28 10:15:21.784740399 +0000 UTC m=+1093.483714569" observedRunningTime="2025-11-28 10:15:22.312926104 +0000 UTC m=+1094.011900274" watchObservedRunningTime="2025-11-28 10:15:22.319622234 +0000 UTC m=+1094.018596394" Nov 28 10:15:23 crc kubenswrapper[4838]: I1128 10:15:23.103749 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Nov 28 10:15:23 crc kubenswrapper[4838]: I1128 10:15:23.270031 4838 generic.go:334] "Generic (PLEG): container finished" podID="061baebe-5a1a-4090-a396-84571f88b105" containerID="d61c60b65c0fe883c41638955d179c5a640c5f44f8b5985549a965f3be860a7f" exitCode=0 Nov 28 10:15:23 crc kubenswrapper[4838]: I1128 10:15:23.270097 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"061baebe-5a1a-4090-a396-84571f88b105","Type":"ContainerDied","Data":"d61c60b65c0fe883c41638955d179c5a640c5f44f8b5985549a965f3be860a7f"} Nov 28 10:15:23 crc kubenswrapper[4838]: I1128 10:15:23.272149 4838 generic.go:334] "Generic (PLEG): container finished" podID="55cfa883-d16d-4231-95e7-fd0b3ad9b702" containerID="51be7e290d8bfccb82b2911526c87cb56c72e3e43599778eac8107af5c580eed" exitCode=0 Nov 28 10:15:23 crc kubenswrapper[4838]: I1128 10:15:23.272223 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"55cfa883-d16d-4231-95e7-fd0b3ad9b702","Type":"ContainerDied","Data":"51be7e290d8bfccb82b2911526c87cb56c72e3e43599778eac8107af5c580eed"} Nov 28 10:15:23 crc kubenswrapper[4838]: I1128 10:15:23.770457 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-nb-0" Nov 28 10:15:23 crc kubenswrapper[4838]: I1128 10:15:23.813790 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-nb-0" Nov 28 10:15:23 crc kubenswrapper[4838]: I1128 10:15:23.939899 4838 patch_prober.go:28] interesting pod/machine-config-daemon-5dxdd container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 10:15:23 crc kubenswrapper[4838]: I1128 10:15:23.939965 4838 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 10:15:23 crc kubenswrapper[4838]: I1128 10:15:23.940015 4838 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" Nov 28 10:15:23 crc kubenswrapper[4838]: I1128 10:15:23.940826 4838 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"3ce7b45b9fd71f6cdec20d6a8542bb19cf78bbe5928e243b6058e07f9eb4cc79"} pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 10:15:23 crc kubenswrapper[4838]: I1128 10:15:23.940902 4838 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" containerName="machine-config-daemon" containerID="cri-o://3ce7b45b9fd71f6cdec20d6a8542bb19cf78bbe5928e243b6058e07f9eb4cc79" gracePeriod=600 Nov 28 10:15:24 crc kubenswrapper[4838]: I1128 10:15:24.288290 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"55cfa883-d16d-4231-95e7-fd0b3ad9b702","Type":"ContainerStarted","Data":"c6134940b91d1375d7fe8e8704a29197b43e720c9508a617c748fed48a683824"} Nov 28 10:15:24 crc kubenswrapper[4838]: I1128 10:15:24.294660 4838 generic.go:334] "Generic (PLEG): container finished" podID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" containerID="3ce7b45b9fd71f6cdec20d6a8542bb19cf78bbe5928e243b6058e07f9eb4cc79" exitCode=0 Nov 28 10:15:24 crc kubenswrapper[4838]: I1128 10:15:24.294773 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" event={"ID":"5c3daa53-8c4e-4e30-aeba-146602dd45cd","Type":"ContainerDied","Data":"3ce7b45b9fd71f6cdec20d6a8542bb19cf78bbe5928e243b6058e07f9eb4cc79"} Nov 28 10:15:24 crc kubenswrapper[4838]: I1128 10:15:24.294822 4838 scope.go:117] "RemoveContainer" containerID="e44c3d5f2db51d0905366ef1f77dd84ac3c3e496e157394cd047f58af85a3fca" Nov 28 10:15:24 crc kubenswrapper[4838]: I1128 10:15:24.297270 4838 generic.go:334] "Generic (PLEG): container finished" podID="f31fe79b-92dd-4237-b387-9d2c825fdacb" containerID="64eab544bbc687e56fe3aac30e4489d2a08e6d683dd0a7256f3123351edd8273" exitCode=0 Nov 28 10:15:24 crc kubenswrapper[4838]: I1128 10:15:24.297337 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5ccc8479f9-sr898" event={"ID":"f31fe79b-92dd-4237-b387-9d2c825fdacb","Type":"ContainerDied","Data":"64eab544bbc687e56fe3aac30e4489d2a08e6d683dd0a7256f3123351edd8273"} Nov 28 10:15:24 crc kubenswrapper[4838]: I1128 10:15:24.305065 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-pkbwk" event={"ID":"cb0df889-52d3-4150-87df-82886662e952","Type":"ContainerStarted","Data":"aecdd9f722e278235b8102bcff27efbbb2ce5e4811df14678aca77ced5f8c97f"} Nov 28 10:15:24 crc kubenswrapper[4838]: I1128 10:15:24.314344 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-galera-0" podStartSLOduration=28.322292921 podStartE2EDuration="36.314324477s" podCreationTimestamp="2025-11-28 10:14:48 +0000 UTC" firstStartedPulling="2025-11-28 10:15:10.240409044 +0000 UTC m=+1081.939383214" lastFinishedPulling="2025-11-28 10:15:18.2324406 +0000 UTC m=+1089.931414770" observedRunningTime="2025-11-28 10:15:24.314283906 +0000 UTC m=+1096.013258076" watchObservedRunningTime="2025-11-28 10:15:24.314324477 +0000 UTC m=+1096.013298657" Nov 28 10:15:24 crc kubenswrapper[4838]: I1128 10:15:24.315015 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-sb-0" Nov 28 10:15:24 crc kubenswrapper[4838]: I1128 10:15:24.317229 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"061baebe-5a1a-4090-a396-84571f88b105","Type":"ContainerStarted","Data":"7814ea663b79cfd7822d953ea39d520e6fab8ba702b91cbce25a16e60e0e08a5"} Nov 28 10:15:24 crc kubenswrapper[4838]: I1128 10:15:24.317271 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-nb-0" Nov 28 10:15:24 crc kubenswrapper[4838]: I1128 10:15:24.370595 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-sb-0" Nov 28 10:15:24 crc kubenswrapper[4838]: I1128 10:15:24.390942 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-nb-0" Nov 28 10:15:24 crc kubenswrapper[4838]: I1128 10:15:24.397731 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-cell1-galera-0" podStartSLOduration=27.80427778 podStartE2EDuration="35.397696828s" podCreationTimestamp="2025-11-28 10:14:49 +0000 UTC" firstStartedPulling="2025-11-28 10:15:10.651217468 +0000 UTC m=+1082.350191638" lastFinishedPulling="2025-11-28 10:15:18.244636526 +0000 UTC m=+1089.943610686" observedRunningTime="2025-11-28 10:15:24.386826067 +0000 UTC m=+1096.085800267" watchObservedRunningTime="2025-11-28 10:15:24.397696828 +0000 UTC m=+1096.096671018" Nov 28 10:15:24 crc kubenswrapper[4838]: I1128 10:15:24.684518 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-pkbwk"] Nov 28 10:15:24 crc kubenswrapper[4838]: I1128 10:15:24.714460 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-7fd796d7df-2l8n2"] Nov 28 10:15:24 crc kubenswrapper[4838]: E1128 10:15:24.714786 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="996c85bb-be41-4eaa-9e6e-a912a0a3db0d" containerName="collect-profiles" Nov 28 10:15:24 crc kubenswrapper[4838]: I1128 10:15:24.714802 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="996c85bb-be41-4eaa-9e6e-a912a0a3db0d" containerName="collect-profiles" Nov 28 10:15:24 crc kubenswrapper[4838]: I1128 10:15:24.714972 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="996c85bb-be41-4eaa-9e6e-a912a0a3db0d" containerName="collect-profiles" Nov 28 10:15:24 crc kubenswrapper[4838]: I1128 10:15:24.715754 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7fd796d7df-2l8n2" Nov 28 10:15:24 crc kubenswrapper[4838]: I1128 10:15:24.718652 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-nb" Nov 28 10:15:24 crc kubenswrapper[4838]: I1128 10:15:24.733744 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7fd796d7df-2l8n2"] Nov 28 10:15:24 crc kubenswrapper[4838]: I1128 10:15:24.791907 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-metrics-b7fh7"] Nov 28 10:15:24 crc kubenswrapper[4838]: I1128 10:15:24.792873 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-b7fh7" Nov 28 10:15:24 crc kubenswrapper[4838]: I1128 10:15:24.796259 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-metrics-config" Nov 28 10:15:24 crc kubenswrapper[4838]: I1128 10:15:24.805547 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-b7fh7"] Nov 28 10:15:24 crc kubenswrapper[4838]: I1128 10:15:24.860499 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nls7h\" (UniqueName: \"kubernetes.io/projected/3dfeb40f-6bbe-4fea-96e9-83f3b4996c79-kube-api-access-nls7h\") pod \"dnsmasq-dns-7fd796d7df-2l8n2\" (UID: \"3dfeb40f-6bbe-4fea-96e9-83f3b4996c79\") " pod="openstack/dnsmasq-dns-7fd796d7df-2l8n2" Nov 28 10:15:24 crc kubenswrapper[4838]: I1128 10:15:24.860582 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3dfeb40f-6bbe-4fea-96e9-83f3b4996c79-config\") pod \"dnsmasq-dns-7fd796d7df-2l8n2\" (UID: \"3dfeb40f-6bbe-4fea-96e9-83f3b4996c79\") " pod="openstack/dnsmasq-dns-7fd796d7df-2l8n2" Nov 28 10:15:24 crc kubenswrapper[4838]: I1128 10:15:24.860644 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/77ac18d8-c660-4742-8367-281a06a82e37-config\") pod \"ovn-controller-metrics-b7fh7\" (UID: \"77ac18d8-c660-4742-8367-281a06a82e37\") " pod="openstack/ovn-controller-metrics-b7fh7" Nov 28 10:15:24 crc kubenswrapper[4838]: I1128 10:15:24.860775 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3dfeb40f-6bbe-4fea-96e9-83f3b4996c79-dns-svc\") pod \"dnsmasq-dns-7fd796d7df-2l8n2\" (UID: \"3dfeb40f-6bbe-4fea-96e9-83f3b4996c79\") " pod="openstack/dnsmasq-dns-7fd796d7df-2l8n2" Nov 28 10:15:24 crc kubenswrapper[4838]: I1128 10:15:24.860811 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/77ac18d8-c660-4742-8367-281a06a82e37-ovs-rundir\") pod \"ovn-controller-metrics-b7fh7\" (UID: \"77ac18d8-c660-4742-8367-281a06a82e37\") " pod="openstack/ovn-controller-metrics-b7fh7" Nov 28 10:15:24 crc kubenswrapper[4838]: I1128 10:15:24.860978 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/77ac18d8-c660-4742-8367-281a06a82e37-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-b7fh7\" (UID: \"77ac18d8-c660-4742-8367-281a06a82e37\") " pod="openstack/ovn-controller-metrics-b7fh7" Nov 28 10:15:24 crc kubenswrapper[4838]: I1128 10:15:24.861055 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/77ac18d8-c660-4742-8367-281a06a82e37-ovn-rundir\") pod \"ovn-controller-metrics-b7fh7\" (UID: \"77ac18d8-c660-4742-8367-281a06a82e37\") " pod="openstack/ovn-controller-metrics-b7fh7" Nov 28 10:15:24 crc kubenswrapper[4838]: I1128 10:15:24.861073 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3dfeb40f-6bbe-4fea-96e9-83f3b4996c79-ovsdbserver-nb\") pod \"dnsmasq-dns-7fd796d7df-2l8n2\" (UID: \"3dfeb40f-6bbe-4fea-96e9-83f3b4996c79\") " pod="openstack/dnsmasq-dns-7fd796d7df-2l8n2" Nov 28 10:15:24 crc kubenswrapper[4838]: I1128 10:15:24.861094 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/77ac18d8-c660-4742-8367-281a06a82e37-combined-ca-bundle\") pod \"ovn-controller-metrics-b7fh7\" (UID: \"77ac18d8-c660-4742-8367-281a06a82e37\") " pod="openstack/ovn-controller-metrics-b7fh7" Nov 28 10:15:24 crc kubenswrapper[4838]: I1128 10:15:24.861181 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tl7w5\" (UniqueName: \"kubernetes.io/projected/77ac18d8-c660-4742-8367-281a06a82e37-kube-api-access-tl7w5\") pod \"ovn-controller-metrics-b7fh7\" (UID: \"77ac18d8-c660-4742-8367-281a06a82e37\") " pod="openstack/ovn-controller-metrics-b7fh7" Nov 28 10:15:24 crc kubenswrapper[4838]: I1128 10:15:24.940553 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5ccc8479f9-sr898"] Nov 28 10:15:24 crc kubenswrapper[4838]: I1128 10:15:24.962472 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nls7h\" (UniqueName: \"kubernetes.io/projected/3dfeb40f-6bbe-4fea-96e9-83f3b4996c79-kube-api-access-nls7h\") pod \"dnsmasq-dns-7fd796d7df-2l8n2\" (UID: \"3dfeb40f-6bbe-4fea-96e9-83f3b4996c79\") " pod="openstack/dnsmasq-dns-7fd796d7df-2l8n2" Nov 28 10:15:24 crc kubenswrapper[4838]: I1128 10:15:24.962508 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3dfeb40f-6bbe-4fea-96e9-83f3b4996c79-config\") pod \"dnsmasq-dns-7fd796d7df-2l8n2\" (UID: \"3dfeb40f-6bbe-4fea-96e9-83f3b4996c79\") " pod="openstack/dnsmasq-dns-7fd796d7df-2l8n2" Nov 28 10:15:24 crc kubenswrapper[4838]: I1128 10:15:24.962530 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/77ac18d8-c660-4742-8367-281a06a82e37-config\") pod \"ovn-controller-metrics-b7fh7\" (UID: \"77ac18d8-c660-4742-8367-281a06a82e37\") " pod="openstack/ovn-controller-metrics-b7fh7" Nov 28 10:15:24 crc kubenswrapper[4838]: I1128 10:15:24.962551 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3dfeb40f-6bbe-4fea-96e9-83f3b4996c79-dns-svc\") pod \"dnsmasq-dns-7fd796d7df-2l8n2\" (UID: \"3dfeb40f-6bbe-4fea-96e9-83f3b4996c79\") " pod="openstack/dnsmasq-dns-7fd796d7df-2l8n2" Nov 28 10:15:24 crc kubenswrapper[4838]: I1128 10:15:24.962568 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/77ac18d8-c660-4742-8367-281a06a82e37-ovs-rundir\") pod \"ovn-controller-metrics-b7fh7\" (UID: \"77ac18d8-c660-4742-8367-281a06a82e37\") " pod="openstack/ovn-controller-metrics-b7fh7" Nov 28 10:15:24 crc kubenswrapper[4838]: I1128 10:15:24.962627 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/77ac18d8-c660-4742-8367-281a06a82e37-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-b7fh7\" (UID: \"77ac18d8-c660-4742-8367-281a06a82e37\") " pod="openstack/ovn-controller-metrics-b7fh7" Nov 28 10:15:24 crc kubenswrapper[4838]: I1128 10:15:24.962702 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/77ac18d8-c660-4742-8367-281a06a82e37-ovn-rundir\") pod \"ovn-controller-metrics-b7fh7\" (UID: \"77ac18d8-c660-4742-8367-281a06a82e37\") " pod="openstack/ovn-controller-metrics-b7fh7" Nov 28 10:15:24 crc kubenswrapper[4838]: I1128 10:15:24.962736 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3dfeb40f-6bbe-4fea-96e9-83f3b4996c79-ovsdbserver-nb\") pod \"dnsmasq-dns-7fd796d7df-2l8n2\" (UID: \"3dfeb40f-6bbe-4fea-96e9-83f3b4996c79\") " pod="openstack/dnsmasq-dns-7fd796d7df-2l8n2" Nov 28 10:15:24 crc kubenswrapper[4838]: I1128 10:15:24.962753 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/77ac18d8-c660-4742-8367-281a06a82e37-combined-ca-bundle\") pod \"ovn-controller-metrics-b7fh7\" (UID: \"77ac18d8-c660-4742-8367-281a06a82e37\") " pod="openstack/ovn-controller-metrics-b7fh7" Nov 28 10:15:24 crc kubenswrapper[4838]: I1128 10:15:24.962777 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tl7w5\" (UniqueName: \"kubernetes.io/projected/77ac18d8-c660-4742-8367-281a06a82e37-kube-api-access-tl7w5\") pod \"ovn-controller-metrics-b7fh7\" (UID: \"77ac18d8-c660-4742-8367-281a06a82e37\") " pod="openstack/ovn-controller-metrics-b7fh7" Nov 28 10:15:24 crc kubenswrapper[4838]: I1128 10:15:24.962935 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/77ac18d8-c660-4742-8367-281a06a82e37-ovs-rundir\") pod \"ovn-controller-metrics-b7fh7\" (UID: \"77ac18d8-c660-4742-8367-281a06a82e37\") " pod="openstack/ovn-controller-metrics-b7fh7" Nov 28 10:15:24 crc kubenswrapper[4838]: I1128 10:15:24.963008 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/77ac18d8-c660-4742-8367-281a06a82e37-ovn-rundir\") pod \"ovn-controller-metrics-b7fh7\" (UID: \"77ac18d8-c660-4742-8367-281a06a82e37\") " pod="openstack/ovn-controller-metrics-b7fh7" Nov 28 10:15:24 crc kubenswrapper[4838]: I1128 10:15:24.963439 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3dfeb40f-6bbe-4fea-96e9-83f3b4996c79-config\") pod \"dnsmasq-dns-7fd796d7df-2l8n2\" (UID: \"3dfeb40f-6bbe-4fea-96e9-83f3b4996c79\") " pod="openstack/dnsmasq-dns-7fd796d7df-2l8n2" Nov 28 10:15:24 crc kubenswrapper[4838]: I1128 10:15:24.963473 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/77ac18d8-c660-4742-8367-281a06a82e37-config\") pod \"ovn-controller-metrics-b7fh7\" (UID: \"77ac18d8-c660-4742-8367-281a06a82e37\") " pod="openstack/ovn-controller-metrics-b7fh7" Nov 28 10:15:24 crc kubenswrapper[4838]: I1128 10:15:24.964201 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3dfeb40f-6bbe-4fea-96e9-83f3b4996c79-dns-svc\") pod \"dnsmasq-dns-7fd796d7df-2l8n2\" (UID: \"3dfeb40f-6bbe-4fea-96e9-83f3b4996c79\") " pod="openstack/dnsmasq-dns-7fd796d7df-2l8n2" Nov 28 10:15:24 crc kubenswrapper[4838]: I1128 10:15:24.967417 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3dfeb40f-6bbe-4fea-96e9-83f3b4996c79-ovsdbserver-nb\") pod \"dnsmasq-dns-7fd796d7df-2l8n2\" (UID: \"3dfeb40f-6bbe-4fea-96e9-83f3b4996c79\") " pod="openstack/dnsmasq-dns-7fd796d7df-2l8n2" Nov 28 10:15:24 crc kubenswrapper[4838]: I1128 10:15:24.977661 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-86db49b7ff-r85mc"] Nov 28 10:15:24 crc kubenswrapper[4838]: I1128 10:15:24.978891 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-86db49b7ff-r85mc" Nov 28 10:15:24 crc kubenswrapper[4838]: I1128 10:15:24.983338 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/77ac18d8-c660-4742-8367-281a06a82e37-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-b7fh7\" (UID: \"77ac18d8-c660-4742-8367-281a06a82e37\") " pod="openstack/ovn-controller-metrics-b7fh7" Nov 28 10:15:24 crc kubenswrapper[4838]: I1128 10:15:24.983389 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/77ac18d8-c660-4742-8367-281a06a82e37-combined-ca-bundle\") pod \"ovn-controller-metrics-b7fh7\" (UID: \"77ac18d8-c660-4742-8367-281a06a82e37\") " pod="openstack/ovn-controller-metrics-b7fh7" Nov 28 10:15:24 crc kubenswrapper[4838]: I1128 10:15:24.983727 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-sb" Nov 28 10:15:24 crc kubenswrapper[4838]: I1128 10:15:24.988616 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-86db49b7ff-r85mc"] Nov 28 10:15:24 crc kubenswrapper[4838]: I1128 10:15:24.995347 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tl7w5\" (UniqueName: \"kubernetes.io/projected/77ac18d8-c660-4742-8367-281a06a82e37-kube-api-access-tl7w5\") pod \"ovn-controller-metrics-b7fh7\" (UID: \"77ac18d8-c660-4742-8367-281a06a82e37\") " pod="openstack/ovn-controller-metrics-b7fh7" Nov 28 10:15:24 crc kubenswrapper[4838]: I1128 10:15:24.997894 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-b7fh7" Nov 28 10:15:25 crc kubenswrapper[4838]: I1128 10:15:25.033178 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nls7h\" (UniqueName: \"kubernetes.io/projected/3dfeb40f-6bbe-4fea-96e9-83f3b4996c79-kube-api-access-nls7h\") pod \"dnsmasq-dns-7fd796d7df-2l8n2\" (UID: \"3dfeb40f-6bbe-4fea-96e9-83f3b4996c79\") " pod="openstack/dnsmasq-dns-7fd796d7df-2l8n2" Nov 28 10:15:25 crc kubenswrapper[4838]: I1128 10:15:25.063636 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9pxb4\" (UniqueName: \"kubernetes.io/projected/db94f019-727d-48df-a297-7007e4133cf6-kube-api-access-9pxb4\") pod \"dnsmasq-dns-86db49b7ff-r85mc\" (UID: \"db94f019-727d-48df-a297-7007e4133cf6\") " pod="openstack/dnsmasq-dns-86db49b7ff-r85mc" Nov 28 10:15:25 crc kubenswrapper[4838]: I1128 10:15:25.063695 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/db94f019-727d-48df-a297-7007e4133cf6-ovsdbserver-nb\") pod \"dnsmasq-dns-86db49b7ff-r85mc\" (UID: \"db94f019-727d-48df-a297-7007e4133cf6\") " pod="openstack/dnsmasq-dns-86db49b7ff-r85mc" Nov 28 10:15:25 crc kubenswrapper[4838]: I1128 10:15:25.063785 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/db94f019-727d-48df-a297-7007e4133cf6-config\") pod \"dnsmasq-dns-86db49b7ff-r85mc\" (UID: \"db94f019-727d-48df-a297-7007e4133cf6\") " pod="openstack/dnsmasq-dns-86db49b7ff-r85mc" Nov 28 10:15:25 crc kubenswrapper[4838]: I1128 10:15:25.063915 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/db94f019-727d-48df-a297-7007e4133cf6-dns-svc\") pod \"dnsmasq-dns-86db49b7ff-r85mc\" (UID: \"db94f019-727d-48df-a297-7007e4133cf6\") " pod="openstack/dnsmasq-dns-86db49b7ff-r85mc" Nov 28 10:15:25 crc kubenswrapper[4838]: I1128 10:15:25.064457 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/db94f019-727d-48df-a297-7007e4133cf6-ovsdbserver-sb\") pod \"dnsmasq-dns-86db49b7ff-r85mc\" (UID: \"db94f019-727d-48df-a297-7007e4133cf6\") " pod="openstack/dnsmasq-dns-86db49b7ff-r85mc" Nov 28 10:15:25 crc kubenswrapper[4838]: I1128 10:15:25.166567 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/db94f019-727d-48df-a297-7007e4133cf6-config\") pod \"dnsmasq-dns-86db49b7ff-r85mc\" (UID: \"db94f019-727d-48df-a297-7007e4133cf6\") " pod="openstack/dnsmasq-dns-86db49b7ff-r85mc" Nov 28 10:15:25 crc kubenswrapper[4838]: I1128 10:15:25.166936 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/db94f019-727d-48df-a297-7007e4133cf6-dns-svc\") pod \"dnsmasq-dns-86db49b7ff-r85mc\" (UID: \"db94f019-727d-48df-a297-7007e4133cf6\") " pod="openstack/dnsmasq-dns-86db49b7ff-r85mc" Nov 28 10:15:25 crc kubenswrapper[4838]: I1128 10:15:25.166996 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/db94f019-727d-48df-a297-7007e4133cf6-ovsdbserver-sb\") pod \"dnsmasq-dns-86db49b7ff-r85mc\" (UID: \"db94f019-727d-48df-a297-7007e4133cf6\") " pod="openstack/dnsmasq-dns-86db49b7ff-r85mc" Nov 28 10:15:25 crc kubenswrapper[4838]: I1128 10:15:25.167040 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9pxb4\" (UniqueName: \"kubernetes.io/projected/db94f019-727d-48df-a297-7007e4133cf6-kube-api-access-9pxb4\") pod \"dnsmasq-dns-86db49b7ff-r85mc\" (UID: \"db94f019-727d-48df-a297-7007e4133cf6\") " pod="openstack/dnsmasq-dns-86db49b7ff-r85mc" Nov 28 10:15:25 crc kubenswrapper[4838]: I1128 10:15:25.167080 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/db94f019-727d-48df-a297-7007e4133cf6-ovsdbserver-nb\") pod \"dnsmasq-dns-86db49b7ff-r85mc\" (UID: \"db94f019-727d-48df-a297-7007e4133cf6\") " pod="openstack/dnsmasq-dns-86db49b7ff-r85mc" Nov 28 10:15:25 crc kubenswrapper[4838]: I1128 10:15:25.167835 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/db94f019-727d-48df-a297-7007e4133cf6-config\") pod \"dnsmasq-dns-86db49b7ff-r85mc\" (UID: \"db94f019-727d-48df-a297-7007e4133cf6\") " pod="openstack/dnsmasq-dns-86db49b7ff-r85mc" Nov 28 10:15:25 crc kubenswrapper[4838]: I1128 10:15:25.168032 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/db94f019-727d-48df-a297-7007e4133cf6-ovsdbserver-nb\") pod \"dnsmasq-dns-86db49b7ff-r85mc\" (UID: \"db94f019-727d-48df-a297-7007e4133cf6\") " pod="openstack/dnsmasq-dns-86db49b7ff-r85mc" Nov 28 10:15:25 crc kubenswrapper[4838]: I1128 10:15:25.168388 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/db94f019-727d-48df-a297-7007e4133cf6-ovsdbserver-sb\") pod \"dnsmasq-dns-86db49b7ff-r85mc\" (UID: \"db94f019-727d-48df-a297-7007e4133cf6\") " pod="openstack/dnsmasq-dns-86db49b7ff-r85mc" Nov 28 10:15:25 crc kubenswrapper[4838]: I1128 10:15:25.174527 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/db94f019-727d-48df-a297-7007e4133cf6-dns-svc\") pod \"dnsmasq-dns-86db49b7ff-r85mc\" (UID: \"db94f019-727d-48df-a297-7007e4133cf6\") " pod="openstack/dnsmasq-dns-86db49b7ff-r85mc" Nov 28 10:15:25 crc kubenswrapper[4838]: I1128 10:15:25.192111 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9pxb4\" (UniqueName: \"kubernetes.io/projected/db94f019-727d-48df-a297-7007e4133cf6-kube-api-access-9pxb4\") pod \"dnsmasq-dns-86db49b7ff-r85mc\" (UID: \"db94f019-727d-48df-a297-7007e4133cf6\") " pod="openstack/dnsmasq-dns-86db49b7ff-r85mc" Nov 28 10:15:25 crc kubenswrapper[4838]: I1128 10:15:25.262151 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7fd796d7df-2l8n2" Nov 28 10:15:25 crc kubenswrapper[4838]: I1128 10:15:25.311328 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-86db49b7ff-r85mc" Nov 28 10:15:25 crc kubenswrapper[4838]: I1128 10:15:25.314861 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-sb-0" Nov 28 10:15:25 crc kubenswrapper[4838]: I1128 10:15:25.330044 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5ccc8479f9-sr898" event={"ID":"f31fe79b-92dd-4237-b387-9d2c825fdacb","Type":"ContainerStarted","Data":"c5c166efdaa1b902ce9dbaa399a3a273b636162224957f462566508d9c3366fd"} Nov 28 10:15:25 crc kubenswrapper[4838]: I1128 10:15:25.330219 4838 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-5ccc8479f9-sr898" podUID="f31fe79b-92dd-4237-b387-9d2c825fdacb" containerName="dnsmasq-dns" containerID="cri-o://c5c166efdaa1b902ce9dbaa399a3a273b636162224957f462566508d9c3366fd" gracePeriod=10 Nov 28 10:15:25 crc kubenswrapper[4838]: I1128 10:15:25.330501 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5ccc8479f9-sr898" Nov 28 10:15:25 crc kubenswrapper[4838]: I1128 10:15:25.341213 4838 generic.go:334] "Generic (PLEG): container finished" podID="cb0df889-52d3-4150-87df-82886662e952" containerID="aecdd9f722e278235b8102bcff27efbbb2ce5e4811df14678aca77ced5f8c97f" exitCode=0 Nov 28 10:15:25 crc kubenswrapper[4838]: I1128 10:15:25.341289 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-pkbwk" event={"ID":"cb0df889-52d3-4150-87df-82886662e952","Type":"ContainerDied","Data":"aecdd9f722e278235b8102bcff27efbbb2ce5e4811df14678aca77ced5f8c97f"} Nov 28 10:15:25 crc kubenswrapper[4838]: I1128 10:15:25.341321 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-pkbwk" event={"ID":"cb0df889-52d3-4150-87df-82886662e952","Type":"ContainerStarted","Data":"59812f6497c00d6ebaee9aebdd392920b4d66095abda1f0d4407a00aec406035"} Nov 28 10:15:25 crc kubenswrapper[4838]: I1128 10:15:25.341488 4838 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-57d769cc4f-pkbwk" podUID="cb0df889-52d3-4150-87df-82886662e952" containerName="dnsmasq-dns" containerID="cri-o://59812f6497c00d6ebaee9aebdd392920b4d66095abda1f0d4407a00aec406035" gracePeriod=10 Nov 28 10:15:25 crc kubenswrapper[4838]: I1128 10:15:25.341577 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-57d769cc4f-pkbwk" Nov 28 10:15:25 crc kubenswrapper[4838]: I1128 10:15:25.357138 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" event={"ID":"5c3daa53-8c4e-4e30-aeba-146602dd45cd","Type":"ContainerStarted","Data":"1815c6b644c08c4a75da2a50900db223999a631363ba83a16dba3176b263bb61"} Nov 28 10:15:25 crc kubenswrapper[4838]: I1128 10:15:25.360500 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"366c721a-0e79-44a0-aa02-761c4ddc6936","Type":"ContainerStarted","Data":"c72266b9b0f0228a2212b582e2d53a73166e50af00c9674d71d6a8b1fbb59a1d"} Nov 28 10:15:25 crc kubenswrapper[4838]: I1128 10:15:25.367097 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5ccc8479f9-sr898" podStartSLOduration=2.423394017 podStartE2EDuration="39.367073251s" podCreationTimestamp="2025-11-28 10:14:46 +0000 UTC" firstStartedPulling="2025-11-28 10:14:47.063379219 +0000 UTC m=+1058.762353389" lastFinishedPulling="2025-11-28 10:15:24.007058453 +0000 UTC m=+1095.706032623" observedRunningTime="2025-11-28 10:15:25.362753065 +0000 UTC m=+1097.061727255" watchObservedRunningTime="2025-11-28 10:15:25.367073251 +0000 UTC m=+1097.066047431" Nov 28 10:15:25 crc kubenswrapper[4838]: I1128 10:15:25.387800 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-sb-0" Nov 28 10:15:25 crc kubenswrapper[4838]: I1128 10:15:25.436068 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-57d769cc4f-pkbwk" podStartSLOduration=2.746098844 podStartE2EDuration="39.436049306s" podCreationTimestamp="2025-11-28 10:14:46 +0000 UTC" firstStartedPulling="2025-11-28 10:14:47.301058181 +0000 UTC m=+1059.000032351" lastFinishedPulling="2025-11-28 10:15:23.991008623 +0000 UTC m=+1095.689982813" observedRunningTime="2025-11-28 10:15:25.433930389 +0000 UTC m=+1097.132904559" watchObservedRunningTime="2025-11-28 10:15:25.436049306 +0000 UTC m=+1097.135023476" Nov 28 10:15:25 crc kubenswrapper[4838]: I1128 10:15:25.505138 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-b7fh7"] Nov 28 10:15:25 crc kubenswrapper[4838]: W1128 10:15:25.560442 4838 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod77ac18d8_c660_4742_8367_281a06a82e37.slice/crio-83dba9519840edbea9d624a10d0475a40a544c9e65499cd5fdb13ceca5ebaa73 WatchSource:0}: Error finding container 83dba9519840edbea9d624a10d0475a40a544c9e65499cd5fdb13ceca5ebaa73: Status 404 returned error can't find the container with id 83dba9519840edbea9d624a10d0475a40a544c9e65499cd5fdb13ceca5ebaa73 Nov 28 10:15:25 crc kubenswrapper[4838]: I1128 10:15:25.603506 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-northd-0"] Nov 28 10:15:25 crc kubenswrapper[4838]: I1128 10:15:25.605177 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Nov 28 10:15:25 crc kubenswrapper[4838]: I1128 10:15:25.608412 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovnnorthd-ovnnorthd-dockercfg-z26dh" Nov 28 10:15:25 crc kubenswrapper[4838]: I1128 10:15:25.608665 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovnnorthd-ovndbs" Nov 28 10:15:25 crc kubenswrapper[4838]: I1128 10:15:25.608856 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-scripts" Nov 28 10:15:25 crc kubenswrapper[4838]: I1128 10:15:25.609017 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-config" Nov 28 10:15:25 crc kubenswrapper[4838]: I1128 10:15:25.614885 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Nov 28 10:15:25 crc kubenswrapper[4838]: I1128 10:15:25.677477 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4v9n2\" (UniqueName: \"kubernetes.io/projected/d62996a9-1816-49c7-9280-f115770a83ad-kube-api-access-4v9n2\") pod \"ovn-northd-0\" (UID: \"d62996a9-1816-49c7-9280-f115770a83ad\") " pod="openstack/ovn-northd-0" Nov 28 10:15:25 crc kubenswrapper[4838]: I1128 10:15:25.677785 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/d62996a9-1816-49c7-9280-f115770a83ad-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"d62996a9-1816-49c7-9280-f115770a83ad\") " pod="openstack/ovn-northd-0" Nov 28 10:15:25 crc kubenswrapper[4838]: I1128 10:15:25.677829 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d62996a9-1816-49c7-9280-f115770a83ad-config\") pod \"ovn-northd-0\" (UID: \"d62996a9-1816-49c7-9280-f115770a83ad\") " pod="openstack/ovn-northd-0" Nov 28 10:15:25 crc kubenswrapper[4838]: I1128 10:15:25.677862 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d62996a9-1816-49c7-9280-f115770a83ad-scripts\") pod \"ovn-northd-0\" (UID: \"d62996a9-1816-49c7-9280-f115770a83ad\") " pod="openstack/ovn-northd-0" Nov 28 10:15:25 crc kubenswrapper[4838]: I1128 10:15:25.678102 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/d62996a9-1816-49c7-9280-f115770a83ad-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"d62996a9-1816-49c7-9280-f115770a83ad\") " pod="openstack/ovn-northd-0" Nov 28 10:15:25 crc kubenswrapper[4838]: I1128 10:15:25.678140 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/d62996a9-1816-49c7-9280-f115770a83ad-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"d62996a9-1816-49c7-9280-f115770a83ad\") " pod="openstack/ovn-northd-0" Nov 28 10:15:25 crc kubenswrapper[4838]: I1128 10:15:25.678158 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d62996a9-1816-49c7-9280-f115770a83ad-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"d62996a9-1816-49c7-9280-f115770a83ad\") " pod="openstack/ovn-northd-0" Nov 28 10:15:25 crc kubenswrapper[4838]: I1128 10:15:25.736380 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7fd796d7df-2l8n2"] Nov 28 10:15:25 crc kubenswrapper[4838]: W1128 10:15:25.744813 4838 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3dfeb40f_6bbe_4fea_96e9_83f3b4996c79.slice/crio-0cfab2365ad078099727b216b5e80ed907a838d6cc889e431e48dc112b933d9c WatchSource:0}: Error finding container 0cfab2365ad078099727b216b5e80ed907a838d6cc889e431e48dc112b933d9c: Status 404 returned error can't find the container with id 0cfab2365ad078099727b216b5e80ed907a838d6cc889e431e48dc112b933d9c Nov 28 10:15:25 crc kubenswrapper[4838]: I1128 10:15:25.779357 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/d62996a9-1816-49c7-9280-f115770a83ad-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"d62996a9-1816-49c7-9280-f115770a83ad\") " pod="openstack/ovn-northd-0" Nov 28 10:15:25 crc kubenswrapper[4838]: I1128 10:15:25.779419 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d62996a9-1816-49c7-9280-f115770a83ad-config\") pod \"ovn-northd-0\" (UID: \"d62996a9-1816-49c7-9280-f115770a83ad\") " pod="openstack/ovn-northd-0" Nov 28 10:15:25 crc kubenswrapper[4838]: I1128 10:15:25.779453 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d62996a9-1816-49c7-9280-f115770a83ad-scripts\") pod \"ovn-northd-0\" (UID: \"d62996a9-1816-49c7-9280-f115770a83ad\") " pod="openstack/ovn-northd-0" Nov 28 10:15:25 crc kubenswrapper[4838]: I1128 10:15:25.779508 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/d62996a9-1816-49c7-9280-f115770a83ad-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"d62996a9-1816-49c7-9280-f115770a83ad\") " pod="openstack/ovn-northd-0" Nov 28 10:15:25 crc kubenswrapper[4838]: I1128 10:15:25.779547 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/d62996a9-1816-49c7-9280-f115770a83ad-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"d62996a9-1816-49c7-9280-f115770a83ad\") " pod="openstack/ovn-northd-0" Nov 28 10:15:25 crc kubenswrapper[4838]: I1128 10:15:25.779562 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d62996a9-1816-49c7-9280-f115770a83ad-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"d62996a9-1816-49c7-9280-f115770a83ad\") " pod="openstack/ovn-northd-0" Nov 28 10:15:25 crc kubenswrapper[4838]: I1128 10:15:25.779588 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4v9n2\" (UniqueName: \"kubernetes.io/projected/d62996a9-1816-49c7-9280-f115770a83ad-kube-api-access-4v9n2\") pod \"ovn-northd-0\" (UID: \"d62996a9-1816-49c7-9280-f115770a83ad\") " pod="openstack/ovn-northd-0" Nov 28 10:15:25 crc kubenswrapper[4838]: I1128 10:15:25.782642 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/d62996a9-1816-49c7-9280-f115770a83ad-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"d62996a9-1816-49c7-9280-f115770a83ad\") " pod="openstack/ovn-northd-0" Nov 28 10:15:25 crc kubenswrapper[4838]: I1128 10:15:25.782889 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d62996a9-1816-49c7-9280-f115770a83ad-config\") pod \"ovn-northd-0\" (UID: \"d62996a9-1816-49c7-9280-f115770a83ad\") " pod="openstack/ovn-northd-0" Nov 28 10:15:25 crc kubenswrapper[4838]: I1128 10:15:25.782925 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d62996a9-1816-49c7-9280-f115770a83ad-scripts\") pod \"ovn-northd-0\" (UID: \"d62996a9-1816-49c7-9280-f115770a83ad\") " pod="openstack/ovn-northd-0" Nov 28 10:15:25 crc kubenswrapper[4838]: I1128 10:15:25.785895 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d62996a9-1816-49c7-9280-f115770a83ad-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"d62996a9-1816-49c7-9280-f115770a83ad\") " pod="openstack/ovn-northd-0" Nov 28 10:15:25 crc kubenswrapper[4838]: I1128 10:15:25.785952 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/d62996a9-1816-49c7-9280-f115770a83ad-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"d62996a9-1816-49c7-9280-f115770a83ad\") " pod="openstack/ovn-northd-0" Nov 28 10:15:25 crc kubenswrapper[4838]: I1128 10:15:25.786798 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/d62996a9-1816-49c7-9280-f115770a83ad-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"d62996a9-1816-49c7-9280-f115770a83ad\") " pod="openstack/ovn-northd-0" Nov 28 10:15:25 crc kubenswrapper[4838]: I1128 10:15:25.807234 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4v9n2\" (UniqueName: \"kubernetes.io/projected/d62996a9-1816-49c7-9280-f115770a83ad-kube-api-access-4v9n2\") pod \"ovn-northd-0\" (UID: \"d62996a9-1816-49c7-9280-f115770a83ad\") " pod="openstack/ovn-northd-0" Nov 28 10:15:25 crc kubenswrapper[4838]: I1128 10:15:25.922176 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-86db49b7ff-r85mc"] Nov 28 10:15:25 crc kubenswrapper[4838]: I1128 10:15:25.942105 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Nov 28 10:15:26 crc kubenswrapper[4838]: I1128 10:15:26.029907 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/memcached-0" Nov 28 10:15:26 crc kubenswrapper[4838]: W1128 10:15:26.034743 4838 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poddb94f019_727d_48df_a297_7007e4133cf6.slice/crio-4412dd745cc5c5431ceb7a65635d5c2f5f237cff3e6cf654298847dd72b2f6b8 WatchSource:0}: Error finding container 4412dd745cc5c5431ceb7a65635d5c2f5f237cff3e6cf654298847dd72b2f6b8: Status 404 returned error can't find the container with id 4412dd745cc5c5431ceb7a65635d5c2f5f237cff3e6cf654298847dd72b2f6b8 Nov 28 10:15:26 crc kubenswrapper[4838]: I1128 10:15:26.060965 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5ccc8479f9-sr898" Nov 28 10:15:26 crc kubenswrapper[4838]: I1128 10:15:26.064198 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-pkbwk" Nov 28 10:15:26 crc kubenswrapper[4838]: I1128 10:15:26.083943 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f31fe79b-92dd-4237-b387-9d2c825fdacb-config\") pod \"f31fe79b-92dd-4237-b387-9d2c825fdacb\" (UID: \"f31fe79b-92dd-4237-b387-9d2c825fdacb\") " Nov 28 10:15:26 crc kubenswrapper[4838]: I1128 10:15:26.084021 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q7ptw\" (UniqueName: \"kubernetes.io/projected/cb0df889-52d3-4150-87df-82886662e952-kube-api-access-q7ptw\") pod \"cb0df889-52d3-4150-87df-82886662e952\" (UID: \"cb0df889-52d3-4150-87df-82886662e952\") " Nov 28 10:15:26 crc kubenswrapper[4838]: I1128 10:15:26.084091 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cb0df889-52d3-4150-87df-82886662e952-config\") pod \"cb0df889-52d3-4150-87df-82886662e952\" (UID: \"cb0df889-52d3-4150-87df-82886662e952\") " Nov 28 10:15:26 crc kubenswrapper[4838]: I1128 10:15:26.084147 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f31fe79b-92dd-4237-b387-9d2c825fdacb-dns-svc\") pod \"f31fe79b-92dd-4237-b387-9d2c825fdacb\" (UID: \"f31fe79b-92dd-4237-b387-9d2c825fdacb\") " Nov 28 10:15:26 crc kubenswrapper[4838]: I1128 10:15:26.084198 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cb8n5\" (UniqueName: \"kubernetes.io/projected/f31fe79b-92dd-4237-b387-9d2c825fdacb-kube-api-access-cb8n5\") pod \"f31fe79b-92dd-4237-b387-9d2c825fdacb\" (UID: \"f31fe79b-92dd-4237-b387-9d2c825fdacb\") " Nov 28 10:15:26 crc kubenswrapper[4838]: I1128 10:15:26.084234 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/cb0df889-52d3-4150-87df-82886662e952-dns-svc\") pod \"cb0df889-52d3-4150-87df-82886662e952\" (UID: \"cb0df889-52d3-4150-87df-82886662e952\") " Nov 28 10:15:26 crc kubenswrapper[4838]: I1128 10:15:26.089652 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cb0df889-52d3-4150-87df-82886662e952-kube-api-access-q7ptw" (OuterVolumeSpecName: "kube-api-access-q7ptw") pod "cb0df889-52d3-4150-87df-82886662e952" (UID: "cb0df889-52d3-4150-87df-82886662e952"). InnerVolumeSpecName "kube-api-access-q7ptw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:15:26 crc kubenswrapper[4838]: I1128 10:15:26.149128 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f31fe79b-92dd-4237-b387-9d2c825fdacb-kube-api-access-cb8n5" (OuterVolumeSpecName: "kube-api-access-cb8n5") pod "f31fe79b-92dd-4237-b387-9d2c825fdacb" (UID: "f31fe79b-92dd-4237-b387-9d2c825fdacb"). InnerVolumeSpecName "kube-api-access-cb8n5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:15:26 crc kubenswrapper[4838]: I1128 10:15:26.186210 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q7ptw\" (UniqueName: \"kubernetes.io/projected/cb0df889-52d3-4150-87df-82886662e952-kube-api-access-q7ptw\") on node \"crc\" DevicePath \"\"" Nov 28 10:15:26 crc kubenswrapper[4838]: I1128 10:15:26.186237 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cb8n5\" (UniqueName: \"kubernetes.io/projected/f31fe79b-92dd-4237-b387-9d2c825fdacb-kube-api-access-cb8n5\") on node \"crc\" DevicePath \"\"" Nov 28 10:15:26 crc kubenswrapper[4838]: I1128 10:15:26.193943 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cb0df889-52d3-4150-87df-82886662e952-config" (OuterVolumeSpecName: "config") pod "cb0df889-52d3-4150-87df-82886662e952" (UID: "cb0df889-52d3-4150-87df-82886662e952"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 10:15:26 crc kubenswrapper[4838]: I1128 10:15:26.212342 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f31fe79b-92dd-4237-b387-9d2c825fdacb-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "f31fe79b-92dd-4237-b387-9d2c825fdacb" (UID: "f31fe79b-92dd-4237-b387-9d2c825fdacb"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 10:15:26 crc kubenswrapper[4838]: I1128 10:15:26.225218 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f31fe79b-92dd-4237-b387-9d2c825fdacb-config" (OuterVolumeSpecName: "config") pod "f31fe79b-92dd-4237-b387-9d2c825fdacb" (UID: "f31fe79b-92dd-4237-b387-9d2c825fdacb"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 10:15:26 crc kubenswrapper[4838]: I1128 10:15:26.235836 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cb0df889-52d3-4150-87df-82886662e952-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "cb0df889-52d3-4150-87df-82886662e952" (UID: "cb0df889-52d3-4150-87df-82886662e952"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 10:15:26 crc kubenswrapper[4838]: I1128 10:15:26.290225 4838 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f31fe79b-92dd-4237-b387-9d2c825fdacb-config\") on node \"crc\" DevicePath \"\"" Nov 28 10:15:26 crc kubenswrapper[4838]: I1128 10:15:26.290470 4838 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cb0df889-52d3-4150-87df-82886662e952-config\") on node \"crc\" DevicePath \"\"" Nov 28 10:15:26 crc kubenswrapper[4838]: I1128 10:15:26.290480 4838 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f31fe79b-92dd-4237-b387-9d2c825fdacb-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 10:15:26 crc kubenswrapper[4838]: I1128 10:15:26.290490 4838 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/cb0df889-52d3-4150-87df-82886662e952-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 10:15:26 crc kubenswrapper[4838]: I1128 10:15:26.376353 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7fd796d7df-2l8n2" event={"ID":"3dfeb40f-6bbe-4fea-96e9-83f3b4996c79","Type":"ContainerStarted","Data":"0cfab2365ad078099727b216b5e80ed907a838d6cc889e431e48dc112b933d9c"} Nov 28 10:15:26 crc kubenswrapper[4838]: I1128 10:15:26.378968 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-b7fh7" event={"ID":"77ac18d8-c660-4742-8367-281a06a82e37","Type":"ContainerStarted","Data":"83dba9519840edbea9d624a10d0475a40a544c9e65499cd5fdb13ceca5ebaa73"} Nov 28 10:15:26 crc kubenswrapper[4838]: I1128 10:15:26.388319 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-86db49b7ff-r85mc" event={"ID":"db94f019-727d-48df-a297-7007e4133cf6","Type":"ContainerStarted","Data":"4412dd745cc5c5431ceb7a65635d5c2f5f237cff3e6cf654298847dd72b2f6b8"} Nov 28 10:15:26 crc kubenswrapper[4838]: I1128 10:15:26.404935 4838 generic.go:334] "Generic (PLEG): container finished" podID="f31fe79b-92dd-4237-b387-9d2c825fdacb" containerID="c5c166efdaa1b902ce9dbaa399a3a273b636162224957f462566508d9c3366fd" exitCode=0 Nov 28 10:15:26 crc kubenswrapper[4838]: I1128 10:15:26.405013 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5ccc8479f9-sr898" event={"ID":"f31fe79b-92dd-4237-b387-9d2c825fdacb","Type":"ContainerDied","Data":"c5c166efdaa1b902ce9dbaa399a3a273b636162224957f462566508d9c3366fd"} Nov 28 10:15:26 crc kubenswrapper[4838]: I1128 10:15:26.405039 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5ccc8479f9-sr898" event={"ID":"f31fe79b-92dd-4237-b387-9d2c825fdacb","Type":"ContainerDied","Data":"5fafbe3dbf0e4f1b139ce9f8f8ca0fd799088853671118eefe5057ebc272f41c"} Nov 28 10:15:26 crc kubenswrapper[4838]: I1128 10:15:26.405054 4838 scope.go:117] "RemoveContainer" containerID="c5c166efdaa1b902ce9dbaa399a3a273b636162224957f462566508d9c3366fd" Nov 28 10:15:26 crc kubenswrapper[4838]: I1128 10:15:26.405174 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5ccc8479f9-sr898" Nov 28 10:15:26 crc kubenswrapper[4838]: I1128 10:15:26.421000 4838 generic.go:334] "Generic (PLEG): container finished" podID="cb0df889-52d3-4150-87df-82886662e952" containerID="59812f6497c00d6ebaee9aebdd392920b4d66095abda1f0d4407a00aec406035" exitCode=0 Nov 28 10:15:26 crc kubenswrapper[4838]: I1128 10:15:26.421058 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-pkbwk" Nov 28 10:15:26 crc kubenswrapper[4838]: I1128 10:15:26.421112 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-pkbwk" event={"ID":"cb0df889-52d3-4150-87df-82886662e952","Type":"ContainerDied","Data":"59812f6497c00d6ebaee9aebdd392920b4d66095abda1f0d4407a00aec406035"} Nov 28 10:15:26 crc kubenswrapper[4838]: I1128 10:15:26.421144 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-pkbwk" event={"ID":"cb0df889-52d3-4150-87df-82886662e952","Type":"ContainerDied","Data":"563eecb49fe4d744f840d52eb3ef9e64570800eb3a3a29ffddcc582d553bdbdf"} Nov 28 10:15:26 crc kubenswrapper[4838]: I1128 10:15:26.432859 4838 scope.go:117] "RemoveContainer" containerID="64eab544bbc687e56fe3aac30e4489d2a08e6d683dd0a7256f3123351edd8273" Nov 28 10:15:26 crc kubenswrapper[4838]: I1128 10:15:26.439445 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Nov 28 10:15:26 crc kubenswrapper[4838]: W1128 10:15:26.447190 4838 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd62996a9_1816_49c7_9280_f115770a83ad.slice/crio-a37600cf2161a43f11fe6d33a6be1cbf6519cc9697601fb9edcda333034c0eca WatchSource:0}: Error finding container a37600cf2161a43f11fe6d33a6be1cbf6519cc9697601fb9edcda333034c0eca: Status 404 returned error can't find the container with id a37600cf2161a43f11fe6d33a6be1cbf6519cc9697601fb9edcda333034c0eca Nov 28 10:15:26 crc kubenswrapper[4838]: I1128 10:15:26.451653 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5ccc8479f9-sr898"] Nov 28 10:15:26 crc kubenswrapper[4838]: I1128 10:15:26.451834 4838 scope.go:117] "RemoveContainer" containerID="c5c166efdaa1b902ce9dbaa399a3a273b636162224957f462566508d9c3366fd" Nov 28 10:15:26 crc kubenswrapper[4838]: E1128 10:15:26.452363 4838 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c5c166efdaa1b902ce9dbaa399a3a273b636162224957f462566508d9c3366fd\": container with ID starting with c5c166efdaa1b902ce9dbaa399a3a273b636162224957f462566508d9c3366fd not found: ID does not exist" containerID="c5c166efdaa1b902ce9dbaa399a3a273b636162224957f462566508d9c3366fd" Nov 28 10:15:26 crc kubenswrapper[4838]: I1128 10:15:26.452401 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c5c166efdaa1b902ce9dbaa399a3a273b636162224957f462566508d9c3366fd"} err="failed to get container status \"c5c166efdaa1b902ce9dbaa399a3a273b636162224957f462566508d9c3366fd\": rpc error: code = NotFound desc = could not find container \"c5c166efdaa1b902ce9dbaa399a3a273b636162224957f462566508d9c3366fd\": container with ID starting with c5c166efdaa1b902ce9dbaa399a3a273b636162224957f462566508d9c3366fd not found: ID does not exist" Nov 28 10:15:26 crc kubenswrapper[4838]: I1128 10:15:26.452424 4838 scope.go:117] "RemoveContainer" containerID="64eab544bbc687e56fe3aac30e4489d2a08e6d683dd0a7256f3123351edd8273" Nov 28 10:15:26 crc kubenswrapper[4838]: E1128 10:15:26.453164 4838 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"64eab544bbc687e56fe3aac30e4489d2a08e6d683dd0a7256f3123351edd8273\": container with ID starting with 64eab544bbc687e56fe3aac30e4489d2a08e6d683dd0a7256f3123351edd8273 not found: ID does not exist" containerID="64eab544bbc687e56fe3aac30e4489d2a08e6d683dd0a7256f3123351edd8273" Nov 28 10:15:26 crc kubenswrapper[4838]: I1128 10:15:26.453215 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"64eab544bbc687e56fe3aac30e4489d2a08e6d683dd0a7256f3123351edd8273"} err="failed to get container status \"64eab544bbc687e56fe3aac30e4489d2a08e6d683dd0a7256f3123351edd8273\": rpc error: code = NotFound desc = could not find container \"64eab544bbc687e56fe3aac30e4489d2a08e6d683dd0a7256f3123351edd8273\": container with ID starting with 64eab544bbc687e56fe3aac30e4489d2a08e6d683dd0a7256f3123351edd8273 not found: ID does not exist" Nov 28 10:15:26 crc kubenswrapper[4838]: I1128 10:15:26.453240 4838 scope.go:117] "RemoveContainer" containerID="59812f6497c00d6ebaee9aebdd392920b4d66095abda1f0d4407a00aec406035" Nov 28 10:15:26 crc kubenswrapper[4838]: I1128 10:15:26.460225 4838 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5ccc8479f9-sr898"] Nov 28 10:15:26 crc kubenswrapper[4838]: I1128 10:15:26.509387 4838 scope.go:117] "RemoveContainer" containerID="aecdd9f722e278235b8102bcff27efbbb2ce5e4811df14678aca77ced5f8c97f" Nov 28 10:15:26 crc kubenswrapper[4838]: I1128 10:15:26.517758 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-pkbwk"] Nov 28 10:15:26 crc kubenswrapper[4838]: I1128 10:15:26.519684 4838 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-pkbwk"] Nov 28 10:15:26 crc kubenswrapper[4838]: I1128 10:15:26.530498 4838 scope.go:117] "RemoveContainer" containerID="59812f6497c00d6ebaee9aebdd392920b4d66095abda1f0d4407a00aec406035" Nov 28 10:15:26 crc kubenswrapper[4838]: E1128 10:15:26.530908 4838 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"59812f6497c00d6ebaee9aebdd392920b4d66095abda1f0d4407a00aec406035\": container with ID starting with 59812f6497c00d6ebaee9aebdd392920b4d66095abda1f0d4407a00aec406035 not found: ID does not exist" containerID="59812f6497c00d6ebaee9aebdd392920b4d66095abda1f0d4407a00aec406035" Nov 28 10:15:26 crc kubenswrapper[4838]: I1128 10:15:26.530942 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"59812f6497c00d6ebaee9aebdd392920b4d66095abda1f0d4407a00aec406035"} err="failed to get container status \"59812f6497c00d6ebaee9aebdd392920b4d66095abda1f0d4407a00aec406035\": rpc error: code = NotFound desc = could not find container \"59812f6497c00d6ebaee9aebdd392920b4d66095abda1f0d4407a00aec406035\": container with ID starting with 59812f6497c00d6ebaee9aebdd392920b4d66095abda1f0d4407a00aec406035 not found: ID does not exist" Nov 28 10:15:26 crc kubenswrapper[4838]: I1128 10:15:26.530985 4838 scope.go:117] "RemoveContainer" containerID="aecdd9f722e278235b8102bcff27efbbb2ce5e4811df14678aca77ced5f8c97f" Nov 28 10:15:26 crc kubenswrapper[4838]: E1128 10:15:26.531567 4838 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"aecdd9f722e278235b8102bcff27efbbb2ce5e4811df14678aca77ced5f8c97f\": container with ID starting with aecdd9f722e278235b8102bcff27efbbb2ce5e4811df14678aca77ced5f8c97f not found: ID does not exist" containerID="aecdd9f722e278235b8102bcff27efbbb2ce5e4811df14678aca77ced5f8c97f" Nov 28 10:15:26 crc kubenswrapper[4838]: I1128 10:15:26.531619 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"aecdd9f722e278235b8102bcff27efbbb2ce5e4811df14678aca77ced5f8c97f"} err="failed to get container status \"aecdd9f722e278235b8102bcff27efbbb2ce5e4811df14678aca77ced5f8c97f\": rpc error: code = NotFound desc = could not find container \"aecdd9f722e278235b8102bcff27efbbb2ce5e4811df14678aca77ced5f8c97f\": container with ID starting with aecdd9f722e278235b8102bcff27efbbb2ce5e4811df14678aca77ced5f8c97f not found: ID does not exist" Nov 28 10:15:26 crc kubenswrapper[4838]: I1128 10:15:26.573070 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cb0df889-52d3-4150-87df-82886662e952" path="/var/lib/kubelet/pods/cb0df889-52d3-4150-87df-82886662e952/volumes" Nov 28 10:15:26 crc kubenswrapper[4838]: I1128 10:15:26.573740 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f31fe79b-92dd-4237-b387-9d2c825fdacb" path="/var/lib/kubelet/pods/f31fe79b-92dd-4237-b387-9d2c825fdacb/volumes" Nov 28 10:15:27 crc kubenswrapper[4838]: I1128 10:15:27.431585 4838 generic.go:334] "Generic (PLEG): container finished" podID="3dfeb40f-6bbe-4fea-96e9-83f3b4996c79" containerID="b6d686543b4e08665860e69d07ffa365af877f51803720a552be5713f55c528f" exitCode=0 Nov 28 10:15:27 crc kubenswrapper[4838]: I1128 10:15:27.431785 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7fd796d7df-2l8n2" event={"ID":"3dfeb40f-6bbe-4fea-96e9-83f3b4996c79","Type":"ContainerDied","Data":"b6d686543b4e08665860e69d07ffa365af877f51803720a552be5713f55c528f"} Nov 28 10:15:27 crc kubenswrapper[4838]: I1128 10:15:27.434885 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"dbe39b78-c198-480e-9bca-17eaed6183bf","Type":"ContainerStarted","Data":"4556fc74aa9f704938030bc686cf395afad6eac12531f3c48a0fbaeeaf2d8910"} Nov 28 10:15:27 crc kubenswrapper[4838]: I1128 10:15:27.437430 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-b7fh7" event={"ID":"77ac18d8-c660-4742-8367-281a06a82e37","Type":"ContainerStarted","Data":"5984aca8f5af7b102eea0a67c7c17086a00259b234024683c5a602f685203eed"} Nov 28 10:15:27 crc kubenswrapper[4838]: I1128 10:15:27.441077 4838 generic.go:334] "Generic (PLEG): container finished" podID="db94f019-727d-48df-a297-7007e4133cf6" containerID="f3d207828f4e088114c9b3e3082ff4d147b1a1c27cdac5ba1fb1078f696c631a" exitCode=0 Nov 28 10:15:27 crc kubenswrapper[4838]: I1128 10:15:27.441191 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-86db49b7ff-r85mc" event={"ID":"db94f019-727d-48df-a297-7007e4133cf6","Type":"ContainerDied","Data":"f3d207828f4e088114c9b3e3082ff4d147b1a1c27cdac5ba1fb1078f696c631a"} Nov 28 10:15:27 crc kubenswrapper[4838]: I1128 10:15:27.442361 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"d62996a9-1816-49c7-9280-f115770a83ad","Type":"ContainerStarted","Data":"a37600cf2161a43f11fe6d33a6be1cbf6519cc9697601fb9edcda333034c0eca"} Nov 28 10:15:27 crc kubenswrapper[4838]: I1128 10:15:27.498280 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-metrics-b7fh7" podStartSLOduration=3.498260766 podStartE2EDuration="3.498260766s" podCreationTimestamp="2025-11-28 10:15:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 10:15:27.497153367 +0000 UTC m=+1099.196127547" watchObservedRunningTime="2025-11-28 10:15:27.498260766 +0000 UTC m=+1099.197234946" Nov 28 10:15:28 crc kubenswrapper[4838]: I1128 10:15:28.454156 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7fd796d7df-2l8n2" event={"ID":"3dfeb40f-6bbe-4fea-96e9-83f3b4996c79","Type":"ContainerStarted","Data":"f1013d6748c0587d238275c145b677197ab3945c896eb3c22870af418aca490c"} Nov 28 10:15:28 crc kubenswrapper[4838]: I1128 10:15:28.454541 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-7fd796d7df-2l8n2" Nov 28 10:15:28 crc kubenswrapper[4838]: I1128 10:15:28.456252 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-86db49b7ff-r85mc" event={"ID":"db94f019-727d-48df-a297-7007e4133cf6","Type":"ContainerStarted","Data":"11d211432847e858379127214f174f46122922ea4f842948fdc12c095cb853b6"} Nov 28 10:15:28 crc kubenswrapper[4838]: I1128 10:15:28.456365 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-86db49b7ff-r85mc" Nov 28 10:15:28 crc kubenswrapper[4838]: I1128 10:15:28.458104 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"d62996a9-1816-49c7-9280-f115770a83ad","Type":"ContainerStarted","Data":"2533026dd9b3e6fc766f1021475b2527a272bc7fcd22e628cf53d8b451b4f6a9"} Nov 28 10:15:28 crc kubenswrapper[4838]: I1128 10:15:28.458140 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"d62996a9-1816-49c7-9280-f115770a83ad","Type":"ContainerStarted","Data":"05514dccd78a869eb8209514a851b22453eb6197d30085b420ac009966c27250"} Nov 28 10:15:28 crc kubenswrapper[4838]: I1128 10:15:28.458301 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-northd-0" Nov 28 10:15:28 crc kubenswrapper[4838]: I1128 10:15:28.479311 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-7fd796d7df-2l8n2" podStartSLOduration=4.479294612 podStartE2EDuration="4.479294612s" podCreationTimestamp="2025-11-28 10:15:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 10:15:28.471974225 +0000 UTC m=+1100.170948395" watchObservedRunningTime="2025-11-28 10:15:28.479294612 +0000 UTC m=+1100.178268782" Nov 28 10:15:28 crc kubenswrapper[4838]: I1128 10:15:28.500413 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-86db49b7ff-r85mc" podStartSLOduration=4.500392736 podStartE2EDuration="4.500392736s" podCreationTimestamp="2025-11-28 10:15:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 10:15:28.498009122 +0000 UTC m=+1100.196983292" watchObservedRunningTime="2025-11-28 10:15:28.500392736 +0000 UTC m=+1100.199366906" Nov 28 10:15:28 crc kubenswrapper[4838]: I1128 10:15:28.520203 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-northd-0" podStartSLOduration=2.151497655 podStartE2EDuration="3.520182965s" podCreationTimestamp="2025-11-28 10:15:25 +0000 UTC" firstStartedPulling="2025-11-28 10:15:26.45513935 +0000 UTC m=+1098.154113520" lastFinishedPulling="2025-11-28 10:15:27.82382466 +0000 UTC m=+1099.522798830" observedRunningTime="2025-11-28 10:15:28.514227406 +0000 UTC m=+1100.213201576" watchObservedRunningTime="2025-11-28 10:15:28.520182965 +0000 UTC m=+1100.219157145" Nov 28 10:15:29 crc kubenswrapper[4838]: I1128 10:15:29.518382 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-galera-0" Nov 28 10:15:29 crc kubenswrapper[4838]: I1128 10:15:29.518696 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-galera-0" Nov 28 10:15:29 crc kubenswrapper[4838]: I1128 10:15:29.622742 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-galera-0" Nov 28 10:15:30 crc kubenswrapper[4838]: I1128 10:15:30.594707 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-galera-0" Nov 28 10:15:30 crc kubenswrapper[4838]: I1128 10:15:30.838016 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-cell1-galera-0" Nov 28 10:15:30 crc kubenswrapper[4838]: I1128 10:15:30.838075 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-cell1-galera-0" Nov 28 10:15:30 crc kubenswrapper[4838]: I1128 10:15:30.967035 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-4158-account-create-update-qzc2x"] Nov 28 10:15:30 crc kubenswrapper[4838]: E1128 10:15:30.968040 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cb0df889-52d3-4150-87df-82886662e952" containerName="init" Nov 28 10:15:30 crc kubenswrapper[4838]: I1128 10:15:30.968093 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="cb0df889-52d3-4150-87df-82886662e952" containerName="init" Nov 28 10:15:30 crc kubenswrapper[4838]: E1128 10:15:30.968117 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f31fe79b-92dd-4237-b387-9d2c825fdacb" containerName="dnsmasq-dns" Nov 28 10:15:30 crc kubenswrapper[4838]: I1128 10:15:30.968131 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="f31fe79b-92dd-4237-b387-9d2c825fdacb" containerName="dnsmasq-dns" Nov 28 10:15:30 crc kubenswrapper[4838]: E1128 10:15:30.968164 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cb0df889-52d3-4150-87df-82886662e952" containerName="dnsmasq-dns" Nov 28 10:15:30 crc kubenswrapper[4838]: I1128 10:15:30.968177 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="cb0df889-52d3-4150-87df-82886662e952" containerName="dnsmasq-dns" Nov 28 10:15:30 crc kubenswrapper[4838]: E1128 10:15:30.968200 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f31fe79b-92dd-4237-b387-9d2c825fdacb" containerName="init" Nov 28 10:15:30 crc kubenswrapper[4838]: I1128 10:15:30.968212 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="f31fe79b-92dd-4237-b387-9d2c825fdacb" containerName="init" Nov 28 10:15:30 crc kubenswrapper[4838]: I1128 10:15:30.968555 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="cb0df889-52d3-4150-87df-82886662e952" containerName="dnsmasq-dns" Nov 28 10:15:30 crc kubenswrapper[4838]: I1128 10:15:30.968619 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="f31fe79b-92dd-4237-b387-9d2c825fdacb" containerName="dnsmasq-dns" Nov 28 10:15:30 crc kubenswrapper[4838]: I1128 10:15:30.969771 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-4158-account-create-update-qzc2x" Nov 28 10:15:30 crc kubenswrapper[4838]: I1128 10:15:30.971879 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-db-secret" Nov 28 10:15:30 crc kubenswrapper[4838]: I1128 10:15:30.975128 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-4158-account-create-update-qzc2x"] Nov 28 10:15:30 crc kubenswrapper[4838]: I1128 10:15:30.993529 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-cell1-galera-0" Nov 28 10:15:31 crc kubenswrapper[4838]: I1128 10:15:31.031456 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-create-bhznq"] Nov 28 10:15:31 crc kubenswrapper[4838]: I1128 10:15:31.032879 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-bhznq" Nov 28 10:15:31 crc kubenswrapper[4838]: I1128 10:15:31.055198 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-bhznq"] Nov 28 10:15:31 crc kubenswrapper[4838]: I1128 10:15:31.095397 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1e2876f4-8898-4b65-b2e2-98eb5635ddd6-operator-scripts\") pod \"placement-4158-account-create-update-qzc2x\" (UID: \"1e2876f4-8898-4b65-b2e2-98eb5635ddd6\") " pod="openstack/placement-4158-account-create-update-qzc2x" Nov 28 10:15:31 crc kubenswrapper[4838]: I1128 10:15:31.095450 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nk9rl\" (UniqueName: \"kubernetes.io/projected/1e2876f4-8898-4b65-b2e2-98eb5635ddd6-kube-api-access-nk9rl\") pod \"placement-4158-account-create-update-qzc2x\" (UID: \"1e2876f4-8898-4b65-b2e2-98eb5635ddd6\") " pod="openstack/placement-4158-account-create-update-qzc2x" Nov 28 10:15:31 crc kubenswrapper[4838]: I1128 10:15:31.197238 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8b5tm\" (UniqueName: \"kubernetes.io/projected/5ed1c826-381e-40f2-bf20-470d6ceca80d-kube-api-access-8b5tm\") pod \"placement-db-create-bhznq\" (UID: \"5ed1c826-381e-40f2-bf20-470d6ceca80d\") " pod="openstack/placement-db-create-bhznq" Nov 28 10:15:31 crc kubenswrapper[4838]: I1128 10:15:31.197287 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5ed1c826-381e-40f2-bf20-470d6ceca80d-operator-scripts\") pod \"placement-db-create-bhznq\" (UID: \"5ed1c826-381e-40f2-bf20-470d6ceca80d\") " pod="openstack/placement-db-create-bhznq" Nov 28 10:15:31 crc kubenswrapper[4838]: I1128 10:15:31.197458 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1e2876f4-8898-4b65-b2e2-98eb5635ddd6-operator-scripts\") pod \"placement-4158-account-create-update-qzc2x\" (UID: \"1e2876f4-8898-4b65-b2e2-98eb5635ddd6\") " pod="openstack/placement-4158-account-create-update-qzc2x" Nov 28 10:15:31 crc kubenswrapper[4838]: I1128 10:15:31.197503 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nk9rl\" (UniqueName: \"kubernetes.io/projected/1e2876f4-8898-4b65-b2e2-98eb5635ddd6-kube-api-access-nk9rl\") pod \"placement-4158-account-create-update-qzc2x\" (UID: \"1e2876f4-8898-4b65-b2e2-98eb5635ddd6\") " pod="openstack/placement-4158-account-create-update-qzc2x" Nov 28 10:15:31 crc kubenswrapper[4838]: I1128 10:15:31.198841 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1e2876f4-8898-4b65-b2e2-98eb5635ddd6-operator-scripts\") pod \"placement-4158-account-create-update-qzc2x\" (UID: \"1e2876f4-8898-4b65-b2e2-98eb5635ddd6\") " pod="openstack/placement-4158-account-create-update-qzc2x" Nov 28 10:15:31 crc kubenswrapper[4838]: I1128 10:15:31.220455 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nk9rl\" (UniqueName: \"kubernetes.io/projected/1e2876f4-8898-4b65-b2e2-98eb5635ddd6-kube-api-access-nk9rl\") pod \"placement-4158-account-create-update-qzc2x\" (UID: \"1e2876f4-8898-4b65-b2e2-98eb5635ddd6\") " pod="openstack/placement-4158-account-create-update-qzc2x" Nov 28 10:15:31 crc kubenswrapper[4838]: I1128 10:15:31.299005 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8b5tm\" (UniqueName: \"kubernetes.io/projected/5ed1c826-381e-40f2-bf20-470d6ceca80d-kube-api-access-8b5tm\") pod \"placement-db-create-bhznq\" (UID: \"5ed1c826-381e-40f2-bf20-470d6ceca80d\") " pod="openstack/placement-db-create-bhznq" Nov 28 10:15:31 crc kubenswrapper[4838]: I1128 10:15:31.299100 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5ed1c826-381e-40f2-bf20-470d6ceca80d-operator-scripts\") pod \"placement-db-create-bhznq\" (UID: \"5ed1c826-381e-40f2-bf20-470d6ceca80d\") " pod="openstack/placement-db-create-bhznq" Nov 28 10:15:31 crc kubenswrapper[4838]: I1128 10:15:31.299835 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5ed1c826-381e-40f2-bf20-470d6ceca80d-operator-scripts\") pod \"placement-db-create-bhznq\" (UID: \"5ed1c826-381e-40f2-bf20-470d6ceca80d\") " pod="openstack/placement-db-create-bhznq" Nov 28 10:15:31 crc kubenswrapper[4838]: I1128 10:15:31.311655 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-4158-account-create-update-qzc2x" Nov 28 10:15:31 crc kubenswrapper[4838]: I1128 10:15:31.322508 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8b5tm\" (UniqueName: \"kubernetes.io/projected/5ed1c826-381e-40f2-bf20-470d6ceca80d-kube-api-access-8b5tm\") pod \"placement-db-create-bhznq\" (UID: \"5ed1c826-381e-40f2-bf20-470d6ceca80d\") " pod="openstack/placement-db-create-bhznq" Nov 28 10:15:31 crc kubenswrapper[4838]: I1128 10:15:31.353505 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-bhznq" Nov 28 10:15:31 crc kubenswrapper[4838]: I1128 10:15:31.581697 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-cell1-galera-0" Nov 28 10:15:31 crc kubenswrapper[4838]: I1128 10:15:31.845573 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-4158-account-create-update-qzc2x"] Nov 28 10:15:31 crc kubenswrapper[4838]: I1128 10:15:31.938240 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-bhznq"] Nov 28 10:15:31 crc kubenswrapper[4838]: W1128 10:15:31.945136 4838 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5ed1c826_381e_40f2_bf20_470d6ceca80d.slice/crio-501d99033d95c2d6f5bad65a5f4ad10108df5df1f1b9407af068810bf74e4a07 WatchSource:0}: Error finding container 501d99033d95c2d6f5bad65a5f4ad10108df5df1f1b9407af068810bf74e4a07: Status 404 returned error can't find the container with id 501d99033d95c2d6f5bad65a5f4ad10108df5df1f1b9407af068810bf74e4a07 Nov 28 10:15:32 crc kubenswrapper[4838]: I1128 10:15:32.501268 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-4158-account-create-update-qzc2x" event={"ID":"1e2876f4-8898-4b65-b2e2-98eb5635ddd6","Type":"ContainerStarted","Data":"ac8e37be0b5e03f7348a3aa2a55c18240a23b542e0614f054771d1c6ffd021b8"} Nov 28 10:15:32 crc kubenswrapper[4838]: I1128 10:15:32.501430 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-4158-account-create-update-qzc2x" event={"ID":"1e2876f4-8898-4b65-b2e2-98eb5635ddd6","Type":"ContainerStarted","Data":"47f4130eb844f85243fb29ce163e1866774af66e8a46241818fa5345242cca2a"} Nov 28 10:15:32 crc kubenswrapper[4838]: I1128 10:15:32.504417 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-bhznq" event={"ID":"5ed1c826-381e-40f2-bf20-470d6ceca80d","Type":"ContainerStarted","Data":"501d99033d95c2d6f5bad65a5f4ad10108df5df1f1b9407af068810bf74e4a07"} Nov 28 10:15:33 crc kubenswrapper[4838]: I1128 10:15:33.513353 4838 generic.go:334] "Generic (PLEG): container finished" podID="1e2876f4-8898-4b65-b2e2-98eb5635ddd6" containerID="ac8e37be0b5e03f7348a3aa2a55c18240a23b542e0614f054771d1c6ffd021b8" exitCode=0 Nov 28 10:15:33 crc kubenswrapper[4838]: I1128 10:15:33.513503 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-4158-account-create-update-qzc2x" event={"ID":"1e2876f4-8898-4b65-b2e2-98eb5635ddd6","Type":"ContainerDied","Data":"ac8e37be0b5e03f7348a3aa2a55c18240a23b542e0614f054771d1c6ffd021b8"} Nov 28 10:15:33 crc kubenswrapper[4838]: I1128 10:15:33.517291 4838 generic.go:334] "Generic (PLEG): container finished" podID="5ed1c826-381e-40f2-bf20-470d6ceca80d" containerID="fcee5ec30121bc8f1755560539788ffa4888e2ba8580cfcf6d4dcd0acb0f6b55" exitCode=0 Nov 28 10:15:33 crc kubenswrapper[4838]: I1128 10:15:33.517334 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-bhznq" event={"ID":"5ed1c826-381e-40f2-bf20-470d6ceca80d","Type":"ContainerDied","Data":"fcee5ec30121bc8f1755560539788ffa4888e2ba8580cfcf6d4dcd0acb0f6b55"} Nov 28 10:15:35 crc kubenswrapper[4838]: I1128 10:15:35.035236 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-4158-account-create-update-qzc2x" Nov 28 10:15:35 crc kubenswrapper[4838]: I1128 10:15:35.043894 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-bhznq" Nov 28 10:15:35 crc kubenswrapper[4838]: I1128 10:15:35.176975 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8b5tm\" (UniqueName: \"kubernetes.io/projected/5ed1c826-381e-40f2-bf20-470d6ceca80d-kube-api-access-8b5tm\") pod \"5ed1c826-381e-40f2-bf20-470d6ceca80d\" (UID: \"5ed1c826-381e-40f2-bf20-470d6ceca80d\") " Nov 28 10:15:35 crc kubenswrapper[4838]: I1128 10:15:35.177245 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5ed1c826-381e-40f2-bf20-470d6ceca80d-operator-scripts\") pod \"5ed1c826-381e-40f2-bf20-470d6ceca80d\" (UID: \"5ed1c826-381e-40f2-bf20-470d6ceca80d\") " Nov 28 10:15:35 crc kubenswrapper[4838]: I1128 10:15:35.177372 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nk9rl\" (UniqueName: \"kubernetes.io/projected/1e2876f4-8898-4b65-b2e2-98eb5635ddd6-kube-api-access-nk9rl\") pod \"1e2876f4-8898-4b65-b2e2-98eb5635ddd6\" (UID: \"1e2876f4-8898-4b65-b2e2-98eb5635ddd6\") " Nov 28 10:15:35 crc kubenswrapper[4838]: I1128 10:15:35.177494 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1e2876f4-8898-4b65-b2e2-98eb5635ddd6-operator-scripts\") pod \"1e2876f4-8898-4b65-b2e2-98eb5635ddd6\" (UID: \"1e2876f4-8898-4b65-b2e2-98eb5635ddd6\") " Nov 28 10:15:35 crc kubenswrapper[4838]: I1128 10:15:35.179181 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1e2876f4-8898-4b65-b2e2-98eb5635ddd6-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "1e2876f4-8898-4b65-b2e2-98eb5635ddd6" (UID: "1e2876f4-8898-4b65-b2e2-98eb5635ddd6"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 10:15:35 crc kubenswrapper[4838]: I1128 10:15:35.179687 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5ed1c826-381e-40f2-bf20-470d6ceca80d-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "5ed1c826-381e-40f2-bf20-470d6ceca80d" (UID: "5ed1c826-381e-40f2-bf20-470d6ceca80d"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 10:15:35 crc kubenswrapper[4838]: I1128 10:15:35.184664 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5ed1c826-381e-40f2-bf20-470d6ceca80d-kube-api-access-8b5tm" (OuterVolumeSpecName: "kube-api-access-8b5tm") pod "5ed1c826-381e-40f2-bf20-470d6ceca80d" (UID: "5ed1c826-381e-40f2-bf20-470d6ceca80d"). InnerVolumeSpecName "kube-api-access-8b5tm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:15:35 crc kubenswrapper[4838]: I1128 10:15:35.188907 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1e2876f4-8898-4b65-b2e2-98eb5635ddd6-kube-api-access-nk9rl" (OuterVolumeSpecName: "kube-api-access-nk9rl") pod "1e2876f4-8898-4b65-b2e2-98eb5635ddd6" (UID: "1e2876f4-8898-4b65-b2e2-98eb5635ddd6"). InnerVolumeSpecName "kube-api-access-nk9rl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:15:35 crc kubenswrapper[4838]: I1128 10:15:35.263939 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-7fd796d7df-2l8n2" Nov 28 10:15:35 crc kubenswrapper[4838]: I1128 10:15:35.279703 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8b5tm\" (UniqueName: \"kubernetes.io/projected/5ed1c826-381e-40f2-bf20-470d6ceca80d-kube-api-access-8b5tm\") on node \"crc\" DevicePath \"\"" Nov 28 10:15:35 crc kubenswrapper[4838]: I1128 10:15:35.279803 4838 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5ed1c826-381e-40f2-bf20-470d6ceca80d-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 10:15:35 crc kubenswrapper[4838]: I1128 10:15:35.279829 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nk9rl\" (UniqueName: \"kubernetes.io/projected/1e2876f4-8898-4b65-b2e2-98eb5635ddd6-kube-api-access-nk9rl\") on node \"crc\" DevicePath \"\"" Nov 28 10:15:35 crc kubenswrapper[4838]: I1128 10:15:35.279854 4838 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1e2876f4-8898-4b65-b2e2-98eb5635ddd6-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 10:15:35 crc kubenswrapper[4838]: I1128 10:15:35.314932 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-86db49b7ff-r85mc" Nov 28 10:15:35 crc kubenswrapper[4838]: I1128 10:15:35.393328 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7fd796d7df-2l8n2"] Nov 28 10:15:35 crc kubenswrapper[4838]: I1128 10:15:35.537354 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-bhznq" event={"ID":"5ed1c826-381e-40f2-bf20-470d6ceca80d","Type":"ContainerDied","Data":"501d99033d95c2d6f5bad65a5f4ad10108df5df1f1b9407af068810bf74e4a07"} Nov 28 10:15:35 crc kubenswrapper[4838]: I1128 10:15:35.537394 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-bhznq" Nov 28 10:15:35 crc kubenswrapper[4838]: I1128 10:15:35.537397 4838 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="501d99033d95c2d6f5bad65a5f4ad10108df5df1f1b9407af068810bf74e4a07" Nov 28 10:15:35 crc kubenswrapper[4838]: I1128 10:15:35.539467 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-4158-account-create-update-qzc2x" event={"ID":"1e2876f4-8898-4b65-b2e2-98eb5635ddd6","Type":"ContainerDied","Data":"47f4130eb844f85243fb29ce163e1866774af66e8a46241818fa5345242cca2a"} Nov 28 10:15:35 crc kubenswrapper[4838]: I1128 10:15:35.539511 4838 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="47f4130eb844f85243fb29ce163e1866774af66e8a46241818fa5345242cca2a" Nov 28 10:15:35 crc kubenswrapper[4838]: I1128 10:15:35.539629 4838 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-7fd796d7df-2l8n2" podUID="3dfeb40f-6bbe-4fea-96e9-83f3b4996c79" containerName="dnsmasq-dns" containerID="cri-o://f1013d6748c0587d238275c145b677197ab3945c896eb3c22870af418aca490c" gracePeriod=10 Nov 28 10:15:35 crc kubenswrapper[4838]: I1128 10:15:35.539659 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-4158-account-create-update-qzc2x" Nov 28 10:15:36 crc kubenswrapper[4838]: I1128 10:15:36.020919 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7fd796d7df-2l8n2" Nov 28 10:15:36 crc kubenswrapper[4838]: I1128 10:15:36.193105 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3dfeb40f-6bbe-4fea-96e9-83f3b4996c79-config\") pod \"3dfeb40f-6bbe-4fea-96e9-83f3b4996c79\" (UID: \"3dfeb40f-6bbe-4fea-96e9-83f3b4996c79\") " Nov 28 10:15:36 crc kubenswrapper[4838]: I1128 10:15:36.193193 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3dfeb40f-6bbe-4fea-96e9-83f3b4996c79-dns-svc\") pod \"3dfeb40f-6bbe-4fea-96e9-83f3b4996c79\" (UID: \"3dfeb40f-6bbe-4fea-96e9-83f3b4996c79\") " Nov 28 10:15:36 crc kubenswrapper[4838]: I1128 10:15:36.193302 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nls7h\" (UniqueName: \"kubernetes.io/projected/3dfeb40f-6bbe-4fea-96e9-83f3b4996c79-kube-api-access-nls7h\") pod \"3dfeb40f-6bbe-4fea-96e9-83f3b4996c79\" (UID: \"3dfeb40f-6bbe-4fea-96e9-83f3b4996c79\") " Nov 28 10:15:36 crc kubenswrapper[4838]: I1128 10:15:36.193358 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3dfeb40f-6bbe-4fea-96e9-83f3b4996c79-ovsdbserver-nb\") pod \"3dfeb40f-6bbe-4fea-96e9-83f3b4996c79\" (UID: \"3dfeb40f-6bbe-4fea-96e9-83f3b4996c79\") " Nov 28 10:15:36 crc kubenswrapper[4838]: I1128 10:15:36.208378 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-create-g727w"] Nov 28 10:15:36 crc kubenswrapper[4838]: E1128 10:15:36.208895 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3dfeb40f-6bbe-4fea-96e9-83f3b4996c79" containerName="init" Nov 28 10:15:36 crc kubenswrapper[4838]: I1128 10:15:36.209017 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="3dfeb40f-6bbe-4fea-96e9-83f3b4996c79" containerName="init" Nov 28 10:15:36 crc kubenswrapper[4838]: E1128 10:15:36.209097 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5ed1c826-381e-40f2-bf20-470d6ceca80d" containerName="mariadb-database-create" Nov 28 10:15:36 crc kubenswrapper[4838]: I1128 10:15:36.209176 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="5ed1c826-381e-40f2-bf20-470d6ceca80d" containerName="mariadb-database-create" Nov 28 10:15:36 crc kubenswrapper[4838]: E1128 10:15:36.209258 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3dfeb40f-6bbe-4fea-96e9-83f3b4996c79" containerName="dnsmasq-dns" Nov 28 10:15:36 crc kubenswrapper[4838]: I1128 10:15:36.209328 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="3dfeb40f-6bbe-4fea-96e9-83f3b4996c79" containerName="dnsmasq-dns" Nov 28 10:15:36 crc kubenswrapper[4838]: E1128 10:15:36.209406 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1e2876f4-8898-4b65-b2e2-98eb5635ddd6" containerName="mariadb-account-create-update" Nov 28 10:15:36 crc kubenswrapper[4838]: I1128 10:15:36.209512 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="1e2876f4-8898-4b65-b2e2-98eb5635ddd6" containerName="mariadb-account-create-update" Nov 28 10:15:36 crc kubenswrapper[4838]: I1128 10:15:36.209793 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="5ed1c826-381e-40f2-bf20-470d6ceca80d" containerName="mariadb-database-create" Nov 28 10:15:36 crc kubenswrapper[4838]: I1128 10:15:36.209890 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="3dfeb40f-6bbe-4fea-96e9-83f3b4996c79" containerName="dnsmasq-dns" Nov 28 10:15:36 crc kubenswrapper[4838]: I1128 10:15:36.209962 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="1e2876f4-8898-4b65-b2e2-98eb5635ddd6" containerName="mariadb-account-create-update" Nov 28 10:15:36 crc kubenswrapper[4838]: I1128 10:15:36.210571 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-g727w" Nov 28 10:15:36 crc kubenswrapper[4838]: I1128 10:15:36.212745 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3dfeb40f-6bbe-4fea-96e9-83f3b4996c79-kube-api-access-nls7h" (OuterVolumeSpecName: "kube-api-access-nls7h") pod "3dfeb40f-6bbe-4fea-96e9-83f3b4996c79" (UID: "3dfeb40f-6bbe-4fea-96e9-83f3b4996c79"). InnerVolumeSpecName "kube-api-access-nls7h". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:15:36 crc kubenswrapper[4838]: I1128 10:15:36.230778 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-g727w"] Nov 28 10:15:36 crc kubenswrapper[4838]: I1128 10:15:36.240461 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3dfeb40f-6bbe-4fea-96e9-83f3b4996c79-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "3dfeb40f-6bbe-4fea-96e9-83f3b4996c79" (UID: "3dfeb40f-6bbe-4fea-96e9-83f3b4996c79"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 10:15:36 crc kubenswrapper[4838]: I1128 10:15:36.243227 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3dfeb40f-6bbe-4fea-96e9-83f3b4996c79-config" (OuterVolumeSpecName: "config") pod "3dfeb40f-6bbe-4fea-96e9-83f3b4996c79" (UID: "3dfeb40f-6bbe-4fea-96e9-83f3b4996c79"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 10:15:36 crc kubenswrapper[4838]: I1128 10:15:36.253280 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3dfeb40f-6bbe-4fea-96e9-83f3b4996c79-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "3dfeb40f-6bbe-4fea-96e9-83f3b4996c79" (UID: "3dfeb40f-6bbe-4fea-96e9-83f3b4996c79"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 10:15:36 crc kubenswrapper[4838]: I1128 10:15:36.296065 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nls7h\" (UniqueName: \"kubernetes.io/projected/3dfeb40f-6bbe-4fea-96e9-83f3b4996c79-kube-api-access-nls7h\") on node \"crc\" DevicePath \"\"" Nov 28 10:15:36 crc kubenswrapper[4838]: I1128 10:15:36.296106 4838 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3dfeb40f-6bbe-4fea-96e9-83f3b4996c79-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 28 10:15:36 crc kubenswrapper[4838]: I1128 10:15:36.296120 4838 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3dfeb40f-6bbe-4fea-96e9-83f3b4996c79-config\") on node \"crc\" DevicePath \"\"" Nov 28 10:15:36 crc kubenswrapper[4838]: I1128 10:15:36.296132 4838 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3dfeb40f-6bbe-4fea-96e9-83f3b4996c79-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 10:15:36 crc kubenswrapper[4838]: I1128 10:15:36.311364 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-a5d1-account-create-update-2m49r"] Nov 28 10:15:36 crc kubenswrapper[4838]: I1128 10:15:36.312446 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-a5d1-account-create-update-2m49r" Nov 28 10:15:36 crc kubenswrapper[4838]: I1128 10:15:36.316677 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-db-secret" Nov 28 10:15:36 crc kubenswrapper[4838]: I1128 10:15:36.317170 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-a5d1-account-create-update-2m49r"] Nov 28 10:15:36 crc kubenswrapper[4838]: I1128 10:15:36.398414 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a37aacc0-2d3e-4a20-abd2-3f0bcd87ab7e-operator-scripts\") pod \"glance-db-create-g727w\" (UID: \"a37aacc0-2d3e-4a20-abd2-3f0bcd87ab7e\") " pod="openstack/glance-db-create-g727w" Nov 28 10:15:36 crc kubenswrapper[4838]: I1128 10:15:36.398517 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-987bx\" (UniqueName: \"kubernetes.io/projected/a37aacc0-2d3e-4a20-abd2-3f0bcd87ab7e-kube-api-access-987bx\") pod \"glance-db-create-g727w\" (UID: \"a37aacc0-2d3e-4a20-abd2-3f0bcd87ab7e\") " pod="openstack/glance-db-create-g727w" Nov 28 10:15:36 crc kubenswrapper[4838]: I1128 10:15:36.499805 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-987bx\" (UniqueName: \"kubernetes.io/projected/a37aacc0-2d3e-4a20-abd2-3f0bcd87ab7e-kube-api-access-987bx\") pod \"glance-db-create-g727w\" (UID: \"a37aacc0-2d3e-4a20-abd2-3f0bcd87ab7e\") " pod="openstack/glance-db-create-g727w" Nov 28 10:15:36 crc kubenswrapper[4838]: I1128 10:15:36.499852 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b120d6f5-29b7-4dc8-9457-e908a8e4d6f0-operator-scripts\") pod \"glance-a5d1-account-create-update-2m49r\" (UID: \"b120d6f5-29b7-4dc8-9457-e908a8e4d6f0\") " pod="openstack/glance-a5d1-account-create-update-2m49r" Nov 28 10:15:36 crc kubenswrapper[4838]: I1128 10:15:36.499903 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lld9g\" (UniqueName: \"kubernetes.io/projected/b120d6f5-29b7-4dc8-9457-e908a8e4d6f0-kube-api-access-lld9g\") pod \"glance-a5d1-account-create-update-2m49r\" (UID: \"b120d6f5-29b7-4dc8-9457-e908a8e4d6f0\") " pod="openstack/glance-a5d1-account-create-update-2m49r" Nov 28 10:15:36 crc kubenswrapper[4838]: I1128 10:15:36.499937 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a37aacc0-2d3e-4a20-abd2-3f0bcd87ab7e-operator-scripts\") pod \"glance-db-create-g727w\" (UID: \"a37aacc0-2d3e-4a20-abd2-3f0bcd87ab7e\") " pod="openstack/glance-db-create-g727w" Nov 28 10:15:36 crc kubenswrapper[4838]: I1128 10:15:36.500579 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a37aacc0-2d3e-4a20-abd2-3f0bcd87ab7e-operator-scripts\") pod \"glance-db-create-g727w\" (UID: \"a37aacc0-2d3e-4a20-abd2-3f0bcd87ab7e\") " pod="openstack/glance-db-create-g727w" Nov 28 10:15:36 crc kubenswrapper[4838]: I1128 10:15:36.517578 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-987bx\" (UniqueName: \"kubernetes.io/projected/a37aacc0-2d3e-4a20-abd2-3f0bcd87ab7e-kube-api-access-987bx\") pod \"glance-db-create-g727w\" (UID: \"a37aacc0-2d3e-4a20-abd2-3f0bcd87ab7e\") " pod="openstack/glance-db-create-g727w" Nov 28 10:15:36 crc kubenswrapper[4838]: I1128 10:15:36.550656 4838 generic.go:334] "Generic (PLEG): container finished" podID="3dfeb40f-6bbe-4fea-96e9-83f3b4996c79" containerID="f1013d6748c0587d238275c145b677197ab3945c896eb3c22870af418aca490c" exitCode=0 Nov 28 10:15:36 crc kubenswrapper[4838]: I1128 10:15:36.550907 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7fd796d7df-2l8n2" event={"ID":"3dfeb40f-6bbe-4fea-96e9-83f3b4996c79","Type":"ContainerDied","Data":"f1013d6748c0587d238275c145b677197ab3945c896eb3c22870af418aca490c"} Nov 28 10:15:36 crc kubenswrapper[4838]: I1128 10:15:36.550994 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7fd796d7df-2l8n2" event={"ID":"3dfeb40f-6bbe-4fea-96e9-83f3b4996c79","Type":"ContainerDied","Data":"0cfab2365ad078099727b216b5e80ed907a838d6cc889e431e48dc112b933d9c"} Nov 28 10:15:36 crc kubenswrapper[4838]: I1128 10:15:36.551028 4838 scope.go:117] "RemoveContainer" containerID="f1013d6748c0587d238275c145b677197ab3945c896eb3c22870af418aca490c" Nov 28 10:15:36 crc kubenswrapper[4838]: I1128 10:15:36.550938 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7fd796d7df-2l8n2" Nov 28 10:15:36 crc kubenswrapper[4838]: I1128 10:15:36.571531 4838 scope.go:117] "RemoveContainer" containerID="b6d686543b4e08665860e69d07ffa365af877f51803720a552be5713f55c528f" Nov 28 10:15:36 crc kubenswrapper[4838]: I1128 10:15:36.574961 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-g727w" Nov 28 10:15:36 crc kubenswrapper[4838]: I1128 10:15:36.600151 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7fd796d7df-2l8n2"] Nov 28 10:15:36 crc kubenswrapper[4838]: I1128 10:15:36.601131 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lld9g\" (UniqueName: \"kubernetes.io/projected/b120d6f5-29b7-4dc8-9457-e908a8e4d6f0-kube-api-access-lld9g\") pod \"glance-a5d1-account-create-update-2m49r\" (UID: \"b120d6f5-29b7-4dc8-9457-e908a8e4d6f0\") " pod="openstack/glance-a5d1-account-create-update-2m49r" Nov 28 10:15:36 crc kubenswrapper[4838]: I1128 10:15:36.601318 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b120d6f5-29b7-4dc8-9457-e908a8e4d6f0-operator-scripts\") pod \"glance-a5d1-account-create-update-2m49r\" (UID: \"b120d6f5-29b7-4dc8-9457-e908a8e4d6f0\") " pod="openstack/glance-a5d1-account-create-update-2m49r" Nov 28 10:15:36 crc kubenswrapper[4838]: I1128 10:15:36.602428 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b120d6f5-29b7-4dc8-9457-e908a8e4d6f0-operator-scripts\") pod \"glance-a5d1-account-create-update-2m49r\" (UID: \"b120d6f5-29b7-4dc8-9457-e908a8e4d6f0\") " pod="openstack/glance-a5d1-account-create-update-2m49r" Nov 28 10:15:36 crc kubenswrapper[4838]: I1128 10:15:36.609088 4838 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-7fd796d7df-2l8n2"] Nov 28 10:15:36 crc kubenswrapper[4838]: I1128 10:15:36.624754 4838 scope.go:117] "RemoveContainer" containerID="f1013d6748c0587d238275c145b677197ab3945c896eb3c22870af418aca490c" Nov 28 10:15:36 crc kubenswrapper[4838]: I1128 10:15:36.628204 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lld9g\" (UniqueName: \"kubernetes.io/projected/b120d6f5-29b7-4dc8-9457-e908a8e4d6f0-kube-api-access-lld9g\") pod \"glance-a5d1-account-create-update-2m49r\" (UID: \"b120d6f5-29b7-4dc8-9457-e908a8e4d6f0\") " pod="openstack/glance-a5d1-account-create-update-2m49r" Nov 28 10:15:36 crc kubenswrapper[4838]: E1128 10:15:36.628308 4838 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f1013d6748c0587d238275c145b677197ab3945c896eb3c22870af418aca490c\": container with ID starting with f1013d6748c0587d238275c145b677197ab3945c896eb3c22870af418aca490c not found: ID does not exist" containerID="f1013d6748c0587d238275c145b677197ab3945c896eb3c22870af418aca490c" Nov 28 10:15:36 crc kubenswrapper[4838]: I1128 10:15:36.628417 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f1013d6748c0587d238275c145b677197ab3945c896eb3c22870af418aca490c"} err="failed to get container status \"f1013d6748c0587d238275c145b677197ab3945c896eb3c22870af418aca490c\": rpc error: code = NotFound desc = could not find container \"f1013d6748c0587d238275c145b677197ab3945c896eb3c22870af418aca490c\": container with ID starting with f1013d6748c0587d238275c145b677197ab3945c896eb3c22870af418aca490c not found: ID does not exist" Nov 28 10:15:36 crc kubenswrapper[4838]: I1128 10:15:36.628499 4838 scope.go:117] "RemoveContainer" containerID="b6d686543b4e08665860e69d07ffa365af877f51803720a552be5713f55c528f" Nov 28 10:15:36 crc kubenswrapper[4838]: E1128 10:15:36.628907 4838 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b6d686543b4e08665860e69d07ffa365af877f51803720a552be5713f55c528f\": container with ID starting with b6d686543b4e08665860e69d07ffa365af877f51803720a552be5713f55c528f not found: ID does not exist" containerID="b6d686543b4e08665860e69d07ffa365af877f51803720a552be5713f55c528f" Nov 28 10:15:36 crc kubenswrapper[4838]: I1128 10:15:36.628940 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b6d686543b4e08665860e69d07ffa365af877f51803720a552be5713f55c528f"} err="failed to get container status \"b6d686543b4e08665860e69d07ffa365af877f51803720a552be5713f55c528f\": rpc error: code = NotFound desc = could not find container \"b6d686543b4e08665860e69d07ffa365af877f51803720a552be5713f55c528f\": container with ID starting with b6d686543b4e08665860e69d07ffa365af877f51803720a552be5713f55c528f not found: ID does not exist" Nov 28 10:15:36 crc kubenswrapper[4838]: I1128 10:15:36.935767 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-a5d1-account-create-update-2m49r" Nov 28 10:15:37 crc kubenswrapper[4838]: I1128 10:15:37.060362 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-g727w"] Nov 28 10:15:37 crc kubenswrapper[4838]: W1128 10:15:37.066069 4838 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda37aacc0_2d3e_4a20_abd2_3f0bcd87ab7e.slice/crio-4421d26ebf90ecd50526038ee0e61ee81b563dde489dbb0df8a494c9cc0ad70f WatchSource:0}: Error finding container 4421d26ebf90ecd50526038ee0e61ee81b563dde489dbb0df8a494c9cc0ad70f: Status 404 returned error can't find the container with id 4421d26ebf90ecd50526038ee0e61ee81b563dde489dbb0df8a494c9cc0ad70f Nov 28 10:15:37 crc kubenswrapper[4838]: I1128 10:15:37.172039 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-a5d1-account-create-update-2m49r"] Nov 28 10:15:37 crc kubenswrapper[4838]: I1128 10:15:37.562837 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-a5d1-account-create-update-2m49r" event={"ID":"b120d6f5-29b7-4dc8-9457-e908a8e4d6f0","Type":"ContainerStarted","Data":"4bf2128f3ad15b6773d65a327d640c863021c7b8f24f3148d2ea40b9bbabb89a"} Nov 28 10:15:37 crc kubenswrapper[4838]: I1128 10:15:37.564542 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-g727w" event={"ID":"a37aacc0-2d3e-4a20-abd2-3f0bcd87ab7e","Type":"ContainerStarted","Data":"4421d26ebf90ecd50526038ee0e61ee81b563dde489dbb0df8a494c9cc0ad70f"} Nov 28 10:15:38 crc kubenswrapper[4838]: I1128 10:15:38.582239 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3dfeb40f-6bbe-4fea-96e9-83f3b4996c79" path="/var/lib/kubelet/pods/3dfeb40f-6bbe-4fea-96e9-83f3b4996c79/volumes" Nov 28 10:15:40 crc kubenswrapper[4838]: I1128 10:15:40.642562 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-create-ph2vv"] Nov 28 10:15:40 crc kubenswrapper[4838]: I1128 10:15:40.644595 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-ph2vv" Nov 28 10:15:40 crc kubenswrapper[4838]: I1128 10:15:40.663083 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-ph2vv"] Nov 28 10:15:40 crc kubenswrapper[4838]: I1128 10:15:40.736159 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-6815-account-create-update-kvdfv"] Nov 28 10:15:40 crc kubenswrapper[4838]: I1128 10:15:40.737513 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-6815-account-create-update-kvdfv" Nov 28 10:15:40 crc kubenswrapper[4838]: I1128 10:15:40.740999 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-db-secret" Nov 28 10:15:40 crc kubenswrapper[4838]: I1128 10:15:40.754767 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-6815-account-create-update-kvdfv"] Nov 28 10:15:40 crc kubenswrapper[4838]: I1128 10:15:40.802434 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8769e7d0-1a95-40da-b0e7-91cbc81f94e1-operator-scripts\") pod \"keystone-db-create-ph2vv\" (UID: \"8769e7d0-1a95-40da-b0e7-91cbc81f94e1\") " pod="openstack/keystone-db-create-ph2vv" Nov 28 10:15:40 crc kubenswrapper[4838]: I1128 10:15:40.802553 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2b9c7\" (UniqueName: \"kubernetes.io/projected/8769e7d0-1a95-40da-b0e7-91cbc81f94e1-kube-api-access-2b9c7\") pod \"keystone-db-create-ph2vv\" (UID: \"8769e7d0-1a95-40da-b0e7-91cbc81f94e1\") " pod="openstack/keystone-db-create-ph2vv" Nov 28 10:15:40 crc kubenswrapper[4838]: I1128 10:15:40.903749 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2b9c7\" (UniqueName: \"kubernetes.io/projected/8769e7d0-1a95-40da-b0e7-91cbc81f94e1-kube-api-access-2b9c7\") pod \"keystone-db-create-ph2vv\" (UID: \"8769e7d0-1a95-40da-b0e7-91cbc81f94e1\") " pod="openstack/keystone-db-create-ph2vv" Nov 28 10:15:40 crc kubenswrapper[4838]: I1128 10:15:40.903840 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8f0c6e8c-ce32-4db4-b903-d56ec1fec884-operator-scripts\") pod \"keystone-6815-account-create-update-kvdfv\" (UID: \"8f0c6e8c-ce32-4db4-b903-d56ec1fec884\") " pod="openstack/keystone-6815-account-create-update-kvdfv" Nov 28 10:15:40 crc kubenswrapper[4838]: I1128 10:15:40.903861 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p49p4\" (UniqueName: \"kubernetes.io/projected/8f0c6e8c-ce32-4db4-b903-d56ec1fec884-kube-api-access-p49p4\") pod \"keystone-6815-account-create-update-kvdfv\" (UID: \"8f0c6e8c-ce32-4db4-b903-d56ec1fec884\") " pod="openstack/keystone-6815-account-create-update-kvdfv" Nov 28 10:15:40 crc kubenswrapper[4838]: I1128 10:15:40.903904 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8769e7d0-1a95-40da-b0e7-91cbc81f94e1-operator-scripts\") pod \"keystone-db-create-ph2vv\" (UID: \"8769e7d0-1a95-40da-b0e7-91cbc81f94e1\") " pod="openstack/keystone-db-create-ph2vv" Nov 28 10:15:40 crc kubenswrapper[4838]: I1128 10:15:40.904565 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8769e7d0-1a95-40da-b0e7-91cbc81f94e1-operator-scripts\") pod \"keystone-db-create-ph2vv\" (UID: \"8769e7d0-1a95-40da-b0e7-91cbc81f94e1\") " pod="openstack/keystone-db-create-ph2vv" Nov 28 10:15:40 crc kubenswrapper[4838]: I1128 10:15:40.923511 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2b9c7\" (UniqueName: \"kubernetes.io/projected/8769e7d0-1a95-40da-b0e7-91cbc81f94e1-kube-api-access-2b9c7\") pod \"keystone-db-create-ph2vv\" (UID: \"8769e7d0-1a95-40da-b0e7-91cbc81f94e1\") " pod="openstack/keystone-db-create-ph2vv" Nov 28 10:15:40 crc kubenswrapper[4838]: I1128 10:15:40.984413 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-ph2vv" Nov 28 10:15:41 crc kubenswrapper[4838]: I1128 10:15:41.005859 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8f0c6e8c-ce32-4db4-b903-d56ec1fec884-operator-scripts\") pod \"keystone-6815-account-create-update-kvdfv\" (UID: \"8f0c6e8c-ce32-4db4-b903-d56ec1fec884\") " pod="openstack/keystone-6815-account-create-update-kvdfv" Nov 28 10:15:41 crc kubenswrapper[4838]: I1128 10:15:41.005914 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p49p4\" (UniqueName: \"kubernetes.io/projected/8f0c6e8c-ce32-4db4-b903-d56ec1fec884-kube-api-access-p49p4\") pod \"keystone-6815-account-create-update-kvdfv\" (UID: \"8f0c6e8c-ce32-4db4-b903-d56ec1fec884\") " pod="openstack/keystone-6815-account-create-update-kvdfv" Nov 28 10:15:41 crc kubenswrapper[4838]: I1128 10:15:41.007328 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8f0c6e8c-ce32-4db4-b903-d56ec1fec884-operator-scripts\") pod \"keystone-6815-account-create-update-kvdfv\" (UID: \"8f0c6e8c-ce32-4db4-b903-d56ec1fec884\") " pod="openstack/keystone-6815-account-create-update-kvdfv" Nov 28 10:15:41 crc kubenswrapper[4838]: I1128 10:15:41.024584 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-northd-0" Nov 28 10:15:41 crc kubenswrapper[4838]: I1128 10:15:41.029935 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p49p4\" (UniqueName: \"kubernetes.io/projected/8f0c6e8c-ce32-4db4-b903-d56ec1fec884-kube-api-access-p49p4\") pod \"keystone-6815-account-create-update-kvdfv\" (UID: \"8f0c6e8c-ce32-4db4-b903-d56ec1fec884\") " pod="openstack/keystone-6815-account-create-update-kvdfv" Nov 28 10:15:41 crc kubenswrapper[4838]: I1128 10:15:41.076836 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-6815-account-create-update-kvdfv" Nov 28 10:15:41 crc kubenswrapper[4838]: I1128 10:15:41.484709 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-ph2vv"] Nov 28 10:15:41 crc kubenswrapper[4838]: W1128 10:15:41.490527 4838 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8769e7d0_1a95_40da_b0e7_91cbc81f94e1.slice/crio-00a28fa77d4641e329384658d50d8519b1d143c20782a2deb208adf3b3988a56 WatchSource:0}: Error finding container 00a28fa77d4641e329384658d50d8519b1d143c20782a2deb208adf3b3988a56: Status 404 returned error can't find the container with id 00a28fa77d4641e329384658d50d8519b1d143c20782a2deb208adf3b3988a56 Nov 28 10:15:41 crc kubenswrapper[4838]: I1128 10:15:41.546444 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-6815-account-create-update-kvdfv"] Nov 28 10:15:41 crc kubenswrapper[4838]: W1128 10:15:41.554452 4838 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8f0c6e8c_ce32_4db4_b903_d56ec1fec884.slice/crio-76736fac308536903397ead4412a9df4f9b527eefaf3756988c764072406b0c0 WatchSource:0}: Error finding container 76736fac308536903397ead4412a9df4f9b527eefaf3756988c764072406b0c0: Status 404 returned error can't find the container with id 76736fac308536903397ead4412a9df4f9b527eefaf3756988c764072406b0c0 Nov 28 10:15:41 crc kubenswrapper[4838]: I1128 10:15:41.602521 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-ph2vv" event={"ID":"8769e7d0-1a95-40da-b0e7-91cbc81f94e1","Type":"ContainerStarted","Data":"00a28fa77d4641e329384658d50d8519b1d143c20782a2deb208adf3b3988a56"} Nov 28 10:15:41 crc kubenswrapper[4838]: I1128 10:15:41.603498 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-6815-account-create-update-kvdfv" event={"ID":"8f0c6e8c-ce32-4db4-b903-d56ec1fec884","Type":"ContainerStarted","Data":"76736fac308536903397ead4412a9df4f9b527eefaf3756988c764072406b0c0"} Nov 28 10:15:44 crc kubenswrapper[4838]: I1128 10:15:44.634289 4838 generic.go:334] "Generic (PLEG): container finished" podID="a37aacc0-2d3e-4a20-abd2-3f0bcd87ab7e" containerID="f1d4c74768a48ebc6c8fe4c28696b5b71356d149280aa1308c657b6fad5b6201" exitCode=0 Nov 28 10:15:44 crc kubenswrapper[4838]: I1128 10:15:44.635077 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-g727w" event={"ID":"a37aacc0-2d3e-4a20-abd2-3f0bcd87ab7e","Type":"ContainerDied","Data":"f1d4c74768a48ebc6c8fe4c28696b5b71356d149280aa1308c657b6fad5b6201"} Nov 28 10:15:44 crc kubenswrapper[4838]: I1128 10:15:44.639000 4838 generic.go:334] "Generic (PLEG): container finished" podID="b120d6f5-29b7-4dc8-9457-e908a8e4d6f0" containerID="ef7f17a99d5227a45089b42792eab52941ae18c70ff92518ecb0418c605f120d" exitCode=0 Nov 28 10:15:44 crc kubenswrapper[4838]: I1128 10:15:44.639200 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-a5d1-account-create-update-2m49r" event={"ID":"b120d6f5-29b7-4dc8-9457-e908a8e4d6f0","Type":"ContainerDied","Data":"ef7f17a99d5227a45089b42792eab52941ae18c70ff92518ecb0418c605f120d"} Nov 28 10:15:44 crc kubenswrapper[4838]: I1128 10:15:44.642054 4838 generic.go:334] "Generic (PLEG): container finished" podID="8769e7d0-1a95-40da-b0e7-91cbc81f94e1" containerID="310c9b59a557c1ea060de7a9365d8688f8ad07acd971b29c5645ad6803fc2a14" exitCode=0 Nov 28 10:15:44 crc kubenswrapper[4838]: I1128 10:15:44.642179 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-ph2vv" event={"ID":"8769e7d0-1a95-40da-b0e7-91cbc81f94e1","Type":"ContainerDied","Data":"310c9b59a557c1ea060de7a9365d8688f8ad07acd971b29c5645ad6803fc2a14"} Nov 28 10:15:44 crc kubenswrapper[4838]: I1128 10:15:44.644502 4838 generic.go:334] "Generic (PLEG): container finished" podID="8f0c6e8c-ce32-4db4-b903-d56ec1fec884" containerID="4542a47699cd4c04eee312be65e9216e34f3f4a139fde038b0e9a5dc4cae7f1c" exitCode=0 Nov 28 10:15:44 crc kubenswrapper[4838]: I1128 10:15:44.644919 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-6815-account-create-update-kvdfv" event={"ID":"8f0c6e8c-ce32-4db4-b903-d56ec1fec884","Type":"ContainerDied","Data":"4542a47699cd4c04eee312be65e9216e34f3f4a139fde038b0e9a5dc4cae7f1c"} Nov 28 10:15:46 crc kubenswrapper[4838]: I1128 10:15:46.117885 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-ph2vv" Nov 28 10:15:46 crc kubenswrapper[4838]: I1128 10:15:46.213882 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-g727w" Nov 28 10:15:46 crc kubenswrapper[4838]: I1128 10:15:46.222882 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-a5d1-account-create-update-2m49r" Nov 28 10:15:46 crc kubenswrapper[4838]: I1128 10:15:46.235632 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-6815-account-create-update-kvdfv" Nov 28 10:15:46 crc kubenswrapper[4838]: I1128 10:15:46.312825 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8769e7d0-1a95-40da-b0e7-91cbc81f94e1-operator-scripts\") pod \"8769e7d0-1a95-40da-b0e7-91cbc81f94e1\" (UID: \"8769e7d0-1a95-40da-b0e7-91cbc81f94e1\") " Nov 28 10:15:46 crc kubenswrapper[4838]: I1128 10:15:46.312921 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2b9c7\" (UniqueName: \"kubernetes.io/projected/8769e7d0-1a95-40da-b0e7-91cbc81f94e1-kube-api-access-2b9c7\") pod \"8769e7d0-1a95-40da-b0e7-91cbc81f94e1\" (UID: \"8769e7d0-1a95-40da-b0e7-91cbc81f94e1\") " Nov 28 10:15:46 crc kubenswrapper[4838]: I1128 10:15:46.313686 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8769e7d0-1a95-40da-b0e7-91cbc81f94e1-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "8769e7d0-1a95-40da-b0e7-91cbc81f94e1" (UID: "8769e7d0-1a95-40da-b0e7-91cbc81f94e1"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 10:15:46 crc kubenswrapper[4838]: I1128 10:15:46.332938 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8769e7d0-1a95-40da-b0e7-91cbc81f94e1-kube-api-access-2b9c7" (OuterVolumeSpecName: "kube-api-access-2b9c7") pod "8769e7d0-1a95-40da-b0e7-91cbc81f94e1" (UID: "8769e7d0-1a95-40da-b0e7-91cbc81f94e1"). InnerVolumeSpecName "kube-api-access-2b9c7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:15:46 crc kubenswrapper[4838]: I1128 10:15:46.414495 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a37aacc0-2d3e-4a20-abd2-3f0bcd87ab7e-operator-scripts\") pod \"a37aacc0-2d3e-4a20-abd2-3f0bcd87ab7e\" (UID: \"a37aacc0-2d3e-4a20-abd2-3f0bcd87ab7e\") " Nov 28 10:15:46 crc kubenswrapper[4838]: I1128 10:15:46.414574 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-987bx\" (UniqueName: \"kubernetes.io/projected/a37aacc0-2d3e-4a20-abd2-3f0bcd87ab7e-kube-api-access-987bx\") pod \"a37aacc0-2d3e-4a20-abd2-3f0bcd87ab7e\" (UID: \"a37aacc0-2d3e-4a20-abd2-3f0bcd87ab7e\") " Nov 28 10:15:46 crc kubenswrapper[4838]: I1128 10:15:46.414763 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8f0c6e8c-ce32-4db4-b903-d56ec1fec884-operator-scripts\") pod \"8f0c6e8c-ce32-4db4-b903-d56ec1fec884\" (UID: \"8f0c6e8c-ce32-4db4-b903-d56ec1fec884\") " Nov 28 10:15:46 crc kubenswrapper[4838]: I1128 10:15:46.414786 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-p49p4\" (UniqueName: \"kubernetes.io/projected/8f0c6e8c-ce32-4db4-b903-d56ec1fec884-kube-api-access-p49p4\") pod \"8f0c6e8c-ce32-4db4-b903-d56ec1fec884\" (UID: \"8f0c6e8c-ce32-4db4-b903-d56ec1fec884\") " Nov 28 10:15:46 crc kubenswrapper[4838]: I1128 10:15:46.414828 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b120d6f5-29b7-4dc8-9457-e908a8e4d6f0-operator-scripts\") pod \"b120d6f5-29b7-4dc8-9457-e908a8e4d6f0\" (UID: \"b120d6f5-29b7-4dc8-9457-e908a8e4d6f0\") " Nov 28 10:15:46 crc kubenswrapper[4838]: I1128 10:15:46.414849 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lld9g\" (UniqueName: \"kubernetes.io/projected/b120d6f5-29b7-4dc8-9457-e908a8e4d6f0-kube-api-access-lld9g\") pod \"b120d6f5-29b7-4dc8-9457-e908a8e4d6f0\" (UID: \"b120d6f5-29b7-4dc8-9457-e908a8e4d6f0\") " Nov 28 10:15:46 crc kubenswrapper[4838]: I1128 10:15:46.415481 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a37aacc0-2d3e-4a20-abd2-3f0bcd87ab7e-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "a37aacc0-2d3e-4a20-abd2-3f0bcd87ab7e" (UID: "a37aacc0-2d3e-4a20-abd2-3f0bcd87ab7e"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 10:15:46 crc kubenswrapper[4838]: I1128 10:15:46.415478 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f0c6e8c-ce32-4db4-b903-d56ec1fec884-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "8f0c6e8c-ce32-4db4-b903-d56ec1fec884" (UID: "8f0c6e8c-ce32-4db4-b903-d56ec1fec884"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 10:15:46 crc kubenswrapper[4838]: I1128 10:15:46.415486 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b120d6f5-29b7-4dc8-9457-e908a8e4d6f0-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "b120d6f5-29b7-4dc8-9457-e908a8e4d6f0" (UID: "b120d6f5-29b7-4dc8-9457-e908a8e4d6f0"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 10:15:46 crc kubenswrapper[4838]: I1128 10:15:46.415796 4838 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b120d6f5-29b7-4dc8-9457-e908a8e4d6f0-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 10:15:46 crc kubenswrapper[4838]: I1128 10:15:46.415818 4838 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a37aacc0-2d3e-4a20-abd2-3f0bcd87ab7e-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 10:15:46 crc kubenswrapper[4838]: I1128 10:15:46.415830 4838 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8769e7d0-1a95-40da-b0e7-91cbc81f94e1-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 10:15:46 crc kubenswrapper[4838]: I1128 10:15:46.415843 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2b9c7\" (UniqueName: \"kubernetes.io/projected/8769e7d0-1a95-40da-b0e7-91cbc81f94e1-kube-api-access-2b9c7\") on node \"crc\" DevicePath \"\"" Nov 28 10:15:46 crc kubenswrapper[4838]: I1128 10:15:46.415856 4838 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8f0c6e8c-ce32-4db4-b903-d56ec1fec884-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 10:15:46 crc kubenswrapper[4838]: I1128 10:15:46.418547 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b120d6f5-29b7-4dc8-9457-e908a8e4d6f0-kube-api-access-lld9g" (OuterVolumeSpecName: "kube-api-access-lld9g") pod "b120d6f5-29b7-4dc8-9457-e908a8e4d6f0" (UID: "b120d6f5-29b7-4dc8-9457-e908a8e4d6f0"). InnerVolumeSpecName "kube-api-access-lld9g". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:15:46 crc kubenswrapper[4838]: I1128 10:15:46.418902 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f0c6e8c-ce32-4db4-b903-d56ec1fec884-kube-api-access-p49p4" (OuterVolumeSpecName: "kube-api-access-p49p4") pod "8f0c6e8c-ce32-4db4-b903-d56ec1fec884" (UID: "8f0c6e8c-ce32-4db4-b903-d56ec1fec884"). InnerVolumeSpecName "kube-api-access-p49p4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:15:46 crc kubenswrapper[4838]: I1128 10:15:46.419331 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a37aacc0-2d3e-4a20-abd2-3f0bcd87ab7e-kube-api-access-987bx" (OuterVolumeSpecName: "kube-api-access-987bx") pod "a37aacc0-2d3e-4a20-abd2-3f0bcd87ab7e" (UID: "a37aacc0-2d3e-4a20-abd2-3f0bcd87ab7e"). InnerVolumeSpecName "kube-api-access-987bx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:15:46 crc kubenswrapper[4838]: I1128 10:15:46.516791 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-987bx\" (UniqueName: \"kubernetes.io/projected/a37aacc0-2d3e-4a20-abd2-3f0bcd87ab7e-kube-api-access-987bx\") on node \"crc\" DevicePath \"\"" Nov 28 10:15:46 crc kubenswrapper[4838]: I1128 10:15:46.517262 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-p49p4\" (UniqueName: \"kubernetes.io/projected/8f0c6e8c-ce32-4db4-b903-d56ec1fec884-kube-api-access-p49p4\") on node \"crc\" DevicePath \"\"" Nov 28 10:15:46 crc kubenswrapper[4838]: I1128 10:15:46.517322 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lld9g\" (UniqueName: \"kubernetes.io/projected/b120d6f5-29b7-4dc8-9457-e908a8e4d6f0-kube-api-access-lld9g\") on node \"crc\" DevicePath \"\"" Nov 28 10:15:46 crc kubenswrapper[4838]: I1128 10:15:46.668064 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-a5d1-account-create-update-2m49r" event={"ID":"b120d6f5-29b7-4dc8-9457-e908a8e4d6f0","Type":"ContainerDied","Data":"4bf2128f3ad15b6773d65a327d640c863021c7b8f24f3148d2ea40b9bbabb89a"} Nov 28 10:15:46 crc kubenswrapper[4838]: I1128 10:15:46.668115 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-a5d1-account-create-update-2m49r" Nov 28 10:15:46 crc kubenswrapper[4838]: I1128 10:15:46.668128 4838 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4bf2128f3ad15b6773d65a327d640c863021c7b8f24f3148d2ea40b9bbabb89a" Nov 28 10:15:46 crc kubenswrapper[4838]: I1128 10:15:46.670822 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-ph2vv" event={"ID":"8769e7d0-1a95-40da-b0e7-91cbc81f94e1","Type":"ContainerDied","Data":"00a28fa77d4641e329384658d50d8519b1d143c20782a2deb208adf3b3988a56"} Nov 28 10:15:46 crc kubenswrapper[4838]: I1128 10:15:46.670860 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-ph2vv" Nov 28 10:15:46 crc kubenswrapper[4838]: I1128 10:15:46.670890 4838 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="00a28fa77d4641e329384658d50d8519b1d143c20782a2deb208adf3b3988a56" Nov 28 10:15:46 crc kubenswrapper[4838]: I1128 10:15:46.676512 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-6815-account-create-update-kvdfv" event={"ID":"8f0c6e8c-ce32-4db4-b903-d56ec1fec884","Type":"ContainerDied","Data":"76736fac308536903397ead4412a9df4f9b527eefaf3756988c764072406b0c0"} Nov 28 10:15:46 crc kubenswrapper[4838]: I1128 10:15:46.676542 4838 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="76736fac308536903397ead4412a9df4f9b527eefaf3756988c764072406b0c0" Nov 28 10:15:46 crc kubenswrapper[4838]: I1128 10:15:46.676621 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-6815-account-create-update-kvdfv" Nov 28 10:15:46 crc kubenswrapper[4838]: I1128 10:15:46.680005 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-g727w" event={"ID":"a37aacc0-2d3e-4a20-abd2-3f0bcd87ab7e","Type":"ContainerDied","Data":"4421d26ebf90ecd50526038ee0e61ee81b563dde489dbb0df8a494c9cc0ad70f"} Nov 28 10:15:46 crc kubenswrapper[4838]: I1128 10:15:46.680032 4838 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4421d26ebf90ecd50526038ee0e61ee81b563dde489dbb0df8a494c9cc0ad70f" Nov 28 10:15:46 crc kubenswrapper[4838]: I1128 10:15:46.680072 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-g727w" Nov 28 10:15:51 crc kubenswrapper[4838]: I1128 10:15:51.486343 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-sync-98gmp"] Nov 28 10:15:51 crc kubenswrapper[4838]: E1128 10:15:51.486993 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a37aacc0-2d3e-4a20-abd2-3f0bcd87ab7e" containerName="mariadb-database-create" Nov 28 10:15:51 crc kubenswrapper[4838]: I1128 10:15:51.487009 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="a37aacc0-2d3e-4a20-abd2-3f0bcd87ab7e" containerName="mariadb-database-create" Nov 28 10:15:51 crc kubenswrapper[4838]: E1128 10:15:51.487024 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8f0c6e8c-ce32-4db4-b903-d56ec1fec884" containerName="mariadb-account-create-update" Nov 28 10:15:51 crc kubenswrapper[4838]: I1128 10:15:51.487033 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="8f0c6e8c-ce32-4db4-b903-d56ec1fec884" containerName="mariadb-account-create-update" Nov 28 10:15:51 crc kubenswrapper[4838]: E1128 10:15:51.487054 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b120d6f5-29b7-4dc8-9457-e908a8e4d6f0" containerName="mariadb-account-create-update" Nov 28 10:15:51 crc kubenswrapper[4838]: I1128 10:15:51.487062 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="b120d6f5-29b7-4dc8-9457-e908a8e4d6f0" containerName="mariadb-account-create-update" Nov 28 10:15:51 crc kubenswrapper[4838]: E1128 10:15:51.487072 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8769e7d0-1a95-40da-b0e7-91cbc81f94e1" containerName="mariadb-database-create" Nov 28 10:15:51 crc kubenswrapper[4838]: I1128 10:15:51.487083 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="8769e7d0-1a95-40da-b0e7-91cbc81f94e1" containerName="mariadb-database-create" Nov 28 10:15:51 crc kubenswrapper[4838]: I1128 10:15:51.487267 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="a37aacc0-2d3e-4a20-abd2-3f0bcd87ab7e" containerName="mariadb-database-create" Nov 28 10:15:51 crc kubenswrapper[4838]: I1128 10:15:51.487288 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="8f0c6e8c-ce32-4db4-b903-d56ec1fec884" containerName="mariadb-account-create-update" Nov 28 10:15:51 crc kubenswrapper[4838]: I1128 10:15:51.487299 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="8769e7d0-1a95-40da-b0e7-91cbc81f94e1" containerName="mariadb-database-create" Nov 28 10:15:51 crc kubenswrapper[4838]: I1128 10:15:51.487320 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="b120d6f5-29b7-4dc8-9457-e908a8e4d6f0" containerName="mariadb-account-create-update" Nov 28 10:15:51 crc kubenswrapper[4838]: I1128 10:15:51.487933 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-98gmp" Nov 28 10:15:51 crc kubenswrapper[4838]: I1128 10:15:51.489525 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-d7h7r" Nov 28 10:15:51 crc kubenswrapper[4838]: I1128 10:15:51.490395 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-config-data" Nov 28 10:15:51 crc kubenswrapper[4838]: I1128 10:15:51.506521 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b46f0d10-b6df-4e90-aae8-b903a29d6898-combined-ca-bundle\") pod \"glance-db-sync-98gmp\" (UID: \"b46f0d10-b6df-4e90-aae8-b903a29d6898\") " pod="openstack/glance-db-sync-98gmp" Nov 28 10:15:51 crc kubenswrapper[4838]: I1128 10:15:51.506602 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/b46f0d10-b6df-4e90-aae8-b903a29d6898-db-sync-config-data\") pod \"glance-db-sync-98gmp\" (UID: \"b46f0d10-b6df-4e90-aae8-b903a29d6898\") " pod="openstack/glance-db-sync-98gmp" Nov 28 10:15:51 crc kubenswrapper[4838]: I1128 10:15:51.506666 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jl6qh\" (UniqueName: \"kubernetes.io/projected/b46f0d10-b6df-4e90-aae8-b903a29d6898-kube-api-access-jl6qh\") pod \"glance-db-sync-98gmp\" (UID: \"b46f0d10-b6df-4e90-aae8-b903a29d6898\") " pod="openstack/glance-db-sync-98gmp" Nov 28 10:15:51 crc kubenswrapper[4838]: I1128 10:15:51.506786 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b46f0d10-b6df-4e90-aae8-b903a29d6898-config-data\") pod \"glance-db-sync-98gmp\" (UID: \"b46f0d10-b6df-4e90-aae8-b903a29d6898\") " pod="openstack/glance-db-sync-98gmp" Nov 28 10:15:51 crc kubenswrapper[4838]: I1128 10:15:51.512236 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-98gmp"] Nov 28 10:15:51 crc kubenswrapper[4838]: I1128 10:15:51.608346 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b46f0d10-b6df-4e90-aae8-b903a29d6898-config-data\") pod \"glance-db-sync-98gmp\" (UID: \"b46f0d10-b6df-4e90-aae8-b903a29d6898\") " pod="openstack/glance-db-sync-98gmp" Nov 28 10:15:51 crc kubenswrapper[4838]: I1128 10:15:51.608519 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b46f0d10-b6df-4e90-aae8-b903a29d6898-combined-ca-bundle\") pod \"glance-db-sync-98gmp\" (UID: \"b46f0d10-b6df-4e90-aae8-b903a29d6898\") " pod="openstack/glance-db-sync-98gmp" Nov 28 10:15:51 crc kubenswrapper[4838]: I1128 10:15:51.608558 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/b46f0d10-b6df-4e90-aae8-b903a29d6898-db-sync-config-data\") pod \"glance-db-sync-98gmp\" (UID: \"b46f0d10-b6df-4e90-aae8-b903a29d6898\") " pod="openstack/glance-db-sync-98gmp" Nov 28 10:15:51 crc kubenswrapper[4838]: I1128 10:15:51.608602 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jl6qh\" (UniqueName: \"kubernetes.io/projected/b46f0d10-b6df-4e90-aae8-b903a29d6898-kube-api-access-jl6qh\") pod \"glance-db-sync-98gmp\" (UID: \"b46f0d10-b6df-4e90-aae8-b903a29d6898\") " pod="openstack/glance-db-sync-98gmp" Nov 28 10:15:51 crc kubenswrapper[4838]: I1128 10:15:51.614944 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/b46f0d10-b6df-4e90-aae8-b903a29d6898-db-sync-config-data\") pod \"glance-db-sync-98gmp\" (UID: \"b46f0d10-b6df-4e90-aae8-b903a29d6898\") " pod="openstack/glance-db-sync-98gmp" Nov 28 10:15:51 crc kubenswrapper[4838]: I1128 10:15:51.615652 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b46f0d10-b6df-4e90-aae8-b903a29d6898-combined-ca-bundle\") pod \"glance-db-sync-98gmp\" (UID: \"b46f0d10-b6df-4e90-aae8-b903a29d6898\") " pod="openstack/glance-db-sync-98gmp" Nov 28 10:15:51 crc kubenswrapper[4838]: I1128 10:15:51.620307 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b46f0d10-b6df-4e90-aae8-b903a29d6898-config-data\") pod \"glance-db-sync-98gmp\" (UID: \"b46f0d10-b6df-4e90-aae8-b903a29d6898\") " pod="openstack/glance-db-sync-98gmp" Nov 28 10:15:51 crc kubenswrapper[4838]: I1128 10:15:51.629449 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jl6qh\" (UniqueName: \"kubernetes.io/projected/b46f0d10-b6df-4e90-aae8-b903a29d6898-kube-api-access-jl6qh\") pod \"glance-db-sync-98gmp\" (UID: \"b46f0d10-b6df-4e90-aae8-b903a29d6898\") " pod="openstack/glance-db-sync-98gmp" Nov 28 10:15:51 crc kubenswrapper[4838]: I1128 10:15:51.807671 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-98gmp" Nov 28 10:15:52 crc kubenswrapper[4838]: I1128 10:15:52.330147 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-98gmp"] Nov 28 10:15:52 crc kubenswrapper[4838]: I1128 10:15:52.498366 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-zt4fz" Nov 28 10:15:52 crc kubenswrapper[4838]: I1128 10:15:52.550339 4838 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-dqjd8" podUID="50cdff0a-cfe5-41e1-8eed-67b23079335f" containerName="ovn-controller" probeResult="failure" output=< Nov 28 10:15:52 crc kubenswrapper[4838]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Nov 28 10:15:52 crc kubenswrapper[4838]: > Nov 28 10:15:52 crc kubenswrapper[4838]: I1128 10:15:52.552687 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-zt4fz" Nov 28 10:15:52 crc kubenswrapper[4838]: I1128 10:15:52.766914 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-dqjd8-config-j2slz"] Nov 28 10:15:52 crc kubenswrapper[4838]: I1128 10:15:52.768361 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-dqjd8-config-j2slz" Nov 28 10:15:52 crc kubenswrapper[4838]: I1128 10:15:52.771048 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-extra-scripts" Nov 28 10:15:52 crc kubenswrapper[4838]: I1128 10:15:52.789997 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-dqjd8-config-j2slz"] Nov 28 10:15:52 crc kubenswrapper[4838]: I1128 10:15:52.798979 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-98gmp" event={"ID":"b46f0d10-b6df-4e90-aae8-b903a29d6898","Type":"ContainerStarted","Data":"a34088ee791dedba57d0708018576e07f82f75ab6e6189a85773e30802f941be"} Nov 28 10:15:52 crc kubenswrapper[4838]: I1128 10:15:52.954406 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/51cc9f8c-d9d9-4829-a0df-befd42f5898d-additional-scripts\") pod \"ovn-controller-dqjd8-config-j2slz\" (UID: \"51cc9f8c-d9d9-4829-a0df-befd42f5898d\") " pod="openstack/ovn-controller-dqjd8-config-j2slz" Nov 28 10:15:52 crc kubenswrapper[4838]: I1128 10:15:52.954488 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fkzms\" (UniqueName: \"kubernetes.io/projected/51cc9f8c-d9d9-4829-a0df-befd42f5898d-kube-api-access-fkzms\") pod \"ovn-controller-dqjd8-config-j2slz\" (UID: \"51cc9f8c-d9d9-4829-a0df-befd42f5898d\") " pod="openstack/ovn-controller-dqjd8-config-j2slz" Nov 28 10:15:52 crc kubenswrapper[4838]: I1128 10:15:52.954515 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/51cc9f8c-d9d9-4829-a0df-befd42f5898d-var-log-ovn\") pod \"ovn-controller-dqjd8-config-j2slz\" (UID: \"51cc9f8c-d9d9-4829-a0df-befd42f5898d\") " pod="openstack/ovn-controller-dqjd8-config-j2slz" Nov 28 10:15:52 crc kubenswrapper[4838]: I1128 10:15:52.954559 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/51cc9f8c-d9d9-4829-a0df-befd42f5898d-var-run-ovn\") pod \"ovn-controller-dqjd8-config-j2slz\" (UID: \"51cc9f8c-d9d9-4829-a0df-befd42f5898d\") " pod="openstack/ovn-controller-dqjd8-config-j2slz" Nov 28 10:15:52 crc kubenswrapper[4838]: I1128 10:15:52.954589 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/51cc9f8c-d9d9-4829-a0df-befd42f5898d-var-run\") pod \"ovn-controller-dqjd8-config-j2slz\" (UID: \"51cc9f8c-d9d9-4829-a0df-befd42f5898d\") " pod="openstack/ovn-controller-dqjd8-config-j2slz" Nov 28 10:15:52 crc kubenswrapper[4838]: I1128 10:15:52.954649 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/51cc9f8c-d9d9-4829-a0df-befd42f5898d-scripts\") pod \"ovn-controller-dqjd8-config-j2slz\" (UID: \"51cc9f8c-d9d9-4829-a0df-befd42f5898d\") " pod="openstack/ovn-controller-dqjd8-config-j2slz" Nov 28 10:15:53 crc kubenswrapper[4838]: I1128 10:15:53.056494 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fkzms\" (UniqueName: \"kubernetes.io/projected/51cc9f8c-d9d9-4829-a0df-befd42f5898d-kube-api-access-fkzms\") pod \"ovn-controller-dqjd8-config-j2slz\" (UID: \"51cc9f8c-d9d9-4829-a0df-befd42f5898d\") " pod="openstack/ovn-controller-dqjd8-config-j2slz" Nov 28 10:15:53 crc kubenswrapper[4838]: I1128 10:15:53.056544 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/51cc9f8c-d9d9-4829-a0df-befd42f5898d-var-log-ovn\") pod \"ovn-controller-dqjd8-config-j2slz\" (UID: \"51cc9f8c-d9d9-4829-a0df-befd42f5898d\") " pod="openstack/ovn-controller-dqjd8-config-j2slz" Nov 28 10:15:53 crc kubenswrapper[4838]: I1128 10:15:53.056583 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/51cc9f8c-d9d9-4829-a0df-befd42f5898d-var-run-ovn\") pod \"ovn-controller-dqjd8-config-j2slz\" (UID: \"51cc9f8c-d9d9-4829-a0df-befd42f5898d\") " pod="openstack/ovn-controller-dqjd8-config-j2slz" Nov 28 10:15:53 crc kubenswrapper[4838]: I1128 10:15:53.056608 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/51cc9f8c-d9d9-4829-a0df-befd42f5898d-var-run\") pod \"ovn-controller-dqjd8-config-j2slz\" (UID: \"51cc9f8c-d9d9-4829-a0df-befd42f5898d\") " pod="openstack/ovn-controller-dqjd8-config-j2slz" Nov 28 10:15:53 crc kubenswrapper[4838]: I1128 10:15:53.056658 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/51cc9f8c-d9d9-4829-a0df-befd42f5898d-scripts\") pod \"ovn-controller-dqjd8-config-j2slz\" (UID: \"51cc9f8c-d9d9-4829-a0df-befd42f5898d\") " pod="openstack/ovn-controller-dqjd8-config-j2slz" Nov 28 10:15:53 crc kubenswrapper[4838]: I1128 10:15:53.056697 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/51cc9f8c-d9d9-4829-a0df-befd42f5898d-additional-scripts\") pod \"ovn-controller-dqjd8-config-j2slz\" (UID: \"51cc9f8c-d9d9-4829-a0df-befd42f5898d\") " pod="openstack/ovn-controller-dqjd8-config-j2slz" Nov 28 10:15:53 crc kubenswrapper[4838]: I1128 10:15:53.056920 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/51cc9f8c-d9d9-4829-a0df-befd42f5898d-var-run-ovn\") pod \"ovn-controller-dqjd8-config-j2slz\" (UID: \"51cc9f8c-d9d9-4829-a0df-befd42f5898d\") " pod="openstack/ovn-controller-dqjd8-config-j2slz" Nov 28 10:15:53 crc kubenswrapper[4838]: I1128 10:15:53.057015 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/51cc9f8c-d9d9-4829-a0df-befd42f5898d-var-log-ovn\") pod \"ovn-controller-dqjd8-config-j2slz\" (UID: \"51cc9f8c-d9d9-4829-a0df-befd42f5898d\") " pod="openstack/ovn-controller-dqjd8-config-j2slz" Nov 28 10:15:53 crc kubenswrapper[4838]: I1128 10:15:53.057057 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/51cc9f8c-d9d9-4829-a0df-befd42f5898d-var-run\") pod \"ovn-controller-dqjd8-config-j2slz\" (UID: \"51cc9f8c-d9d9-4829-a0df-befd42f5898d\") " pod="openstack/ovn-controller-dqjd8-config-j2slz" Nov 28 10:15:53 crc kubenswrapper[4838]: I1128 10:15:53.057535 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/51cc9f8c-d9d9-4829-a0df-befd42f5898d-additional-scripts\") pod \"ovn-controller-dqjd8-config-j2slz\" (UID: \"51cc9f8c-d9d9-4829-a0df-befd42f5898d\") " pod="openstack/ovn-controller-dqjd8-config-j2slz" Nov 28 10:15:53 crc kubenswrapper[4838]: I1128 10:15:53.061170 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/51cc9f8c-d9d9-4829-a0df-befd42f5898d-scripts\") pod \"ovn-controller-dqjd8-config-j2slz\" (UID: \"51cc9f8c-d9d9-4829-a0df-befd42f5898d\") " pod="openstack/ovn-controller-dqjd8-config-j2slz" Nov 28 10:15:53 crc kubenswrapper[4838]: I1128 10:15:53.080032 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fkzms\" (UniqueName: \"kubernetes.io/projected/51cc9f8c-d9d9-4829-a0df-befd42f5898d-kube-api-access-fkzms\") pod \"ovn-controller-dqjd8-config-j2slz\" (UID: \"51cc9f8c-d9d9-4829-a0df-befd42f5898d\") " pod="openstack/ovn-controller-dqjd8-config-j2slz" Nov 28 10:15:53 crc kubenswrapper[4838]: I1128 10:15:53.089185 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-dqjd8-config-j2slz" Nov 28 10:15:53 crc kubenswrapper[4838]: I1128 10:15:53.608830 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-dqjd8-config-j2slz"] Nov 28 10:15:53 crc kubenswrapper[4838]: W1128 10:15:53.610904 4838 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod51cc9f8c_d9d9_4829_a0df_befd42f5898d.slice/crio-5218f23ddffd9423120b53b93f33a951081b698883cccd0db26241aca298dac0 WatchSource:0}: Error finding container 5218f23ddffd9423120b53b93f33a951081b698883cccd0db26241aca298dac0: Status 404 returned error can't find the container with id 5218f23ddffd9423120b53b93f33a951081b698883cccd0db26241aca298dac0 Nov 28 10:15:53 crc kubenswrapper[4838]: I1128 10:15:53.808270 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-dqjd8-config-j2slz" event={"ID":"51cc9f8c-d9d9-4829-a0df-befd42f5898d","Type":"ContainerStarted","Data":"5218f23ddffd9423120b53b93f33a951081b698883cccd0db26241aca298dac0"} Nov 28 10:15:54 crc kubenswrapper[4838]: I1128 10:15:54.817644 4838 generic.go:334] "Generic (PLEG): container finished" podID="51cc9f8c-d9d9-4829-a0df-befd42f5898d" containerID="69d421e40045fc218969791f368c7f037bec542d7f7d42c32a098a332a595239" exitCode=0 Nov 28 10:15:54 crc kubenswrapper[4838]: I1128 10:15:54.817801 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-dqjd8-config-j2slz" event={"ID":"51cc9f8c-d9d9-4829-a0df-befd42f5898d","Type":"ContainerDied","Data":"69d421e40045fc218969791f368c7f037bec542d7f7d42c32a098a332a595239"} Nov 28 10:15:56 crc kubenswrapper[4838]: I1128 10:15:56.171259 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-dqjd8-config-j2slz" Nov 28 10:15:56 crc kubenswrapper[4838]: I1128 10:15:56.311841 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/51cc9f8c-d9d9-4829-a0df-befd42f5898d-scripts\") pod \"51cc9f8c-d9d9-4829-a0df-befd42f5898d\" (UID: \"51cc9f8c-d9d9-4829-a0df-befd42f5898d\") " Nov 28 10:15:56 crc kubenswrapper[4838]: I1128 10:15:56.311926 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/51cc9f8c-d9d9-4829-a0df-befd42f5898d-var-log-ovn\") pod \"51cc9f8c-d9d9-4829-a0df-befd42f5898d\" (UID: \"51cc9f8c-d9d9-4829-a0df-befd42f5898d\") " Nov 28 10:15:56 crc kubenswrapper[4838]: I1128 10:15:56.311997 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/51cc9f8c-d9d9-4829-a0df-befd42f5898d-var-run-ovn\") pod \"51cc9f8c-d9d9-4829-a0df-befd42f5898d\" (UID: \"51cc9f8c-d9d9-4829-a0df-befd42f5898d\") " Nov 28 10:15:56 crc kubenswrapper[4838]: I1128 10:15:56.312048 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/51cc9f8c-d9d9-4829-a0df-befd42f5898d-additional-scripts\") pod \"51cc9f8c-d9d9-4829-a0df-befd42f5898d\" (UID: \"51cc9f8c-d9d9-4829-a0df-befd42f5898d\") " Nov 28 10:15:56 crc kubenswrapper[4838]: I1128 10:15:56.312073 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/51cc9f8c-d9d9-4829-a0df-befd42f5898d-var-run\") pod \"51cc9f8c-d9d9-4829-a0df-befd42f5898d\" (UID: \"51cc9f8c-d9d9-4829-a0df-befd42f5898d\") " Nov 28 10:15:56 crc kubenswrapper[4838]: I1128 10:15:56.312081 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/51cc9f8c-d9d9-4829-a0df-befd42f5898d-var-log-ovn" (OuterVolumeSpecName: "var-log-ovn") pod "51cc9f8c-d9d9-4829-a0df-befd42f5898d" (UID: "51cc9f8c-d9d9-4829-a0df-befd42f5898d"). InnerVolumeSpecName "var-log-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 10:15:56 crc kubenswrapper[4838]: I1128 10:15:56.312119 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/51cc9f8c-d9d9-4829-a0df-befd42f5898d-var-run" (OuterVolumeSpecName: "var-run") pod "51cc9f8c-d9d9-4829-a0df-befd42f5898d" (UID: "51cc9f8c-d9d9-4829-a0df-befd42f5898d"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 10:15:56 crc kubenswrapper[4838]: I1128 10:15:56.312086 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/51cc9f8c-d9d9-4829-a0df-befd42f5898d-var-run-ovn" (OuterVolumeSpecName: "var-run-ovn") pod "51cc9f8c-d9d9-4829-a0df-befd42f5898d" (UID: "51cc9f8c-d9d9-4829-a0df-befd42f5898d"). InnerVolumeSpecName "var-run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 10:15:56 crc kubenswrapper[4838]: I1128 10:15:56.312100 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fkzms\" (UniqueName: \"kubernetes.io/projected/51cc9f8c-d9d9-4829-a0df-befd42f5898d-kube-api-access-fkzms\") pod \"51cc9f8c-d9d9-4829-a0df-befd42f5898d\" (UID: \"51cc9f8c-d9d9-4829-a0df-befd42f5898d\") " Nov 28 10:15:56 crc kubenswrapper[4838]: I1128 10:15:56.312505 4838 reconciler_common.go:293] "Volume detached for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/51cc9f8c-d9d9-4829-a0df-befd42f5898d-var-log-ovn\") on node \"crc\" DevicePath \"\"" Nov 28 10:15:56 crc kubenswrapper[4838]: I1128 10:15:56.312523 4838 reconciler_common.go:293] "Volume detached for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/51cc9f8c-d9d9-4829-a0df-befd42f5898d-var-run-ovn\") on node \"crc\" DevicePath \"\"" Nov 28 10:15:56 crc kubenswrapper[4838]: I1128 10:15:56.312532 4838 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/51cc9f8c-d9d9-4829-a0df-befd42f5898d-var-run\") on node \"crc\" DevicePath \"\"" Nov 28 10:15:56 crc kubenswrapper[4838]: I1128 10:15:56.312840 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/51cc9f8c-d9d9-4829-a0df-befd42f5898d-additional-scripts" (OuterVolumeSpecName: "additional-scripts") pod "51cc9f8c-d9d9-4829-a0df-befd42f5898d" (UID: "51cc9f8c-d9d9-4829-a0df-befd42f5898d"). InnerVolumeSpecName "additional-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 10:15:56 crc kubenswrapper[4838]: I1128 10:15:56.313089 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/51cc9f8c-d9d9-4829-a0df-befd42f5898d-scripts" (OuterVolumeSpecName: "scripts") pod "51cc9f8c-d9d9-4829-a0df-befd42f5898d" (UID: "51cc9f8c-d9d9-4829-a0df-befd42f5898d"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 10:15:56 crc kubenswrapper[4838]: I1128 10:15:56.319987 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/51cc9f8c-d9d9-4829-a0df-befd42f5898d-kube-api-access-fkzms" (OuterVolumeSpecName: "kube-api-access-fkzms") pod "51cc9f8c-d9d9-4829-a0df-befd42f5898d" (UID: "51cc9f8c-d9d9-4829-a0df-befd42f5898d"). InnerVolumeSpecName "kube-api-access-fkzms". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:15:56 crc kubenswrapper[4838]: I1128 10:15:56.414281 4838 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/51cc9f8c-d9d9-4829-a0df-befd42f5898d-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 10:15:56 crc kubenswrapper[4838]: I1128 10:15:56.414314 4838 reconciler_common.go:293] "Volume detached for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/51cc9f8c-d9d9-4829-a0df-befd42f5898d-additional-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 10:15:56 crc kubenswrapper[4838]: I1128 10:15:56.414327 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fkzms\" (UniqueName: \"kubernetes.io/projected/51cc9f8c-d9d9-4829-a0df-befd42f5898d-kube-api-access-fkzms\") on node \"crc\" DevicePath \"\"" Nov 28 10:15:56 crc kubenswrapper[4838]: I1128 10:15:56.834012 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-dqjd8-config-j2slz" event={"ID":"51cc9f8c-d9d9-4829-a0df-befd42f5898d","Type":"ContainerDied","Data":"5218f23ddffd9423120b53b93f33a951081b698883cccd0db26241aca298dac0"} Nov 28 10:15:56 crc kubenswrapper[4838]: I1128 10:15:56.834048 4838 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5218f23ddffd9423120b53b93f33a951081b698883cccd0db26241aca298dac0" Nov 28 10:15:56 crc kubenswrapper[4838]: I1128 10:15:56.834079 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-dqjd8-config-j2slz" Nov 28 10:15:57 crc kubenswrapper[4838]: I1128 10:15:57.282704 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-dqjd8-config-j2slz"] Nov 28 10:15:57 crc kubenswrapper[4838]: I1128 10:15:57.294405 4838 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-dqjd8-config-j2slz"] Nov 28 10:15:57 crc kubenswrapper[4838]: I1128 10:15:57.405996 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-dqjd8-config-gpfdd"] Nov 28 10:15:57 crc kubenswrapper[4838]: E1128 10:15:57.406347 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="51cc9f8c-d9d9-4829-a0df-befd42f5898d" containerName="ovn-config" Nov 28 10:15:57 crc kubenswrapper[4838]: I1128 10:15:57.406359 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="51cc9f8c-d9d9-4829-a0df-befd42f5898d" containerName="ovn-config" Nov 28 10:15:57 crc kubenswrapper[4838]: I1128 10:15:57.406517 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="51cc9f8c-d9d9-4829-a0df-befd42f5898d" containerName="ovn-config" Nov 28 10:15:57 crc kubenswrapper[4838]: I1128 10:15:57.407027 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-dqjd8-config-gpfdd" Nov 28 10:15:57 crc kubenswrapper[4838]: I1128 10:15:57.414763 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-extra-scripts" Nov 28 10:15:57 crc kubenswrapper[4838]: I1128 10:15:57.416969 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-dqjd8-config-gpfdd"] Nov 28 10:15:57 crc kubenswrapper[4838]: I1128 10:15:57.437799 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/1eb1068d-2c78-4baa-9bb3-d21a9181e5db-var-log-ovn\") pod \"ovn-controller-dqjd8-config-gpfdd\" (UID: \"1eb1068d-2c78-4baa-9bb3-d21a9181e5db\") " pod="openstack/ovn-controller-dqjd8-config-gpfdd" Nov 28 10:15:57 crc kubenswrapper[4838]: I1128 10:15:57.437934 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/1eb1068d-2c78-4baa-9bb3-d21a9181e5db-var-run-ovn\") pod \"ovn-controller-dqjd8-config-gpfdd\" (UID: \"1eb1068d-2c78-4baa-9bb3-d21a9181e5db\") " pod="openstack/ovn-controller-dqjd8-config-gpfdd" Nov 28 10:15:57 crc kubenswrapper[4838]: I1128 10:15:57.437966 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-662h4\" (UniqueName: \"kubernetes.io/projected/1eb1068d-2c78-4baa-9bb3-d21a9181e5db-kube-api-access-662h4\") pod \"ovn-controller-dqjd8-config-gpfdd\" (UID: \"1eb1068d-2c78-4baa-9bb3-d21a9181e5db\") " pod="openstack/ovn-controller-dqjd8-config-gpfdd" Nov 28 10:15:57 crc kubenswrapper[4838]: I1128 10:15:57.438009 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/1eb1068d-2c78-4baa-9bb3-d21a9181e5db-var-run\") pod \"ovn-controller-dqjd8-config-gpfdd\" (UID: \"1eb1068d-2c78-4baa-9bb3-d21a9181e5db\") " pod="openstack/ovn-controller-dqjd8-config-gpfdd" Nov 28 10:15:57 crc kubenswrapper[4838]: I1128 10:15:57.438036 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/1eb1068d-2c78-4baa-9bb3-d21a9181e5db-additional-scripts\") pod \"ovn-controller-dqjd8-config-gpfdd\" (UID: \"1eb1068d-2c78-4baa-9bb3-d21a9181e5db\") " pod="openstack/ovn-controller-dqjd8-config-gpfdd" Nov 28 10:15:57 crc kubenswrapper[4838]: I1128 10:15:57.438081 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/1eb1068d-2c78-4baa-9bb3-d21a9181e5db-scripts\") pod \"ovn-controller-dqjd8-config-gpfdd\" (UID: \"1eb1068d-2c78-4baa-9bb3-d21a9181e5db\") " pod="openstack/ovn-controller-dqjd8-config-gpfdd" Nov 28 10:15:57 crc kubenswrapper[4838]: I1128 10:15:57.486127 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-dqjd8" Nov 28 10:15:57 crc kubenswrapper[4838]: I1128 10:15:57.539390 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/1eb1068d-2c78-4baa-9bb3-d21a9181e5db-scripts\") pod \"ovn-controller-dqjd8-config-gpfdd\" (UID: \"1eb1068d-2c78-4baa-9bb3-d21a9181e5db\") " pod="openstack/ovn-controller-dqjd8-config-gpfdd" Nov 28 10:15:57 crc kubenswrapper[4838]: I1128 10:15:57.539800 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/1eb1068d-2c78-4baa-9bb3-d21a9181e5db-var-log-ovn\") pod \"ovn-controller-dqjd8-config-gpfdd\" (UID: \"1eb1068d-2c78-4baa-9bb3-d21a9181e5db\") " pod="openstack/ovn-controller-dqjd8-config-gpfdd" Nov 28 10:15:57 crc kubenswrapper[4838]: I1128 10:15:57.539951 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/1eb1068d-2c78-4baa-9bb3-d21a9181e5db-var-run-ovn\") pod \"ovn-controller-dqjd8-config-gpfdd\" (UID: \"1eb1068d-2c78-4baa-9bb3-d21a9181e5db\") " pod="openstack/ovn-controller-dqjd8-config-gpfdd" Nov 28 10:15:57 crc kubenswrapper[4838]: I1128 10:15:57.539983 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-662h4\" (UniqueName: \"kubernetes.io/projected/1eb1068d-2c78-4baa-9bb3-d21a9181e5db-kube-api-access-662h4\") pod \"ovn-controller-dqjd8-config-gpfdd\" (UID: \"1eb1068d-2c78-4baa-9bb3-d21a9181e5db\") " pod="openstack/ovn-controller-dqjd8-config-gpfdd" Nov 28 10:15:57 crc kubenswrapper[4838]: I1128 10:15:57.540016 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/1eb1068d-2c78-4baa-9bb3-d21a9181e5db-var-run\") pod \"ovn-controller-dqjd8-config-gpfdd\" (UID: \"1eb1068d-2c78-4baa-9bb3-d21a9181e5db\") " pod="openstack/ovn-controller-dqjd8-config-gpfdd" Nov 28 10:15:57 crc kubenswrapper[4838]: I1128 10:15:57.540043 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/1eb1068d-2c78-4baa-9bb3-d21a9181e5db-additional-scripts\") pod \"ovn-controller-dqjd8-config-gpfdd\" (UID: \"1eb1068d-2c78-4baa-9bb3-d21a9181e5db\") " pod="openstack/ovn-controller-dqjd8-config-gpfdd" Nov 28 10:15:57 crc kubenswrapper[4838]: I1128 10:15:57.540924 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/1eb1068d-2c78-4baa-9bb3-d21a9181e5db-additional-scripts\") pod \"ovn-controller-dqjd8-config-gpfdd\" (UID: \"1eb1068d-2c78-4baa-9bb3-d21a9181e5db\") " pod="openstack/ovn-controller-dqjd8-config-gpfdd" Nov 28 10:15:57 crc kubenswrapper[4838]: I1128 10:15:57.543152 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/1eb1068d-2c78-4baa-9bb3-d21a9181e5db-var-run-ovn\") pod \"ovn-controller-dqjd8-config-gpfdd\" (UID: \"1eb1068d-2c78-4baa-9bb3-d21a9181e5db\") " pod="openstack/ovn-controller-dqjd8-config-gpfdd" Nov 28 10:15:57 crc kubenswrapper[4838]: I1128 10:15:57.543158 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/1eb1068d-2c78-4baa-9bb3-d21a9181e5db-var-log-ovn\") pod \"ovn-controller-dqjd8-config-gpfdd\" (UID: \"1eb1068d-2c78-4baa-9bb3-d21a9181e5db\") " pod="openstack/ovn-controller-dqjd8-config-gpfdd" Nov 28 10:15:57 crc kubenswrapper[4838]: I1128 10:15:57.543242 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/1eb1068d-2c78-4baa-9bb3-d21a9181e5db-var-run\") pod \"ovn-controller-dqjd8-config-gpfdd\" (UID: \"1eb1068d-2c78-4baa-9bb3-d21a9181e5db\") " pod="openstack/ovn-controller-dqjd8-config-gpfdd" Nov 28 10:15:57 crc kubenswrapper[4838]: I1128 10:15:57.544932 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/1eb1068d-2c78-4baa-9bb3-d21a9181e5db-scripts\") pod \"ovn-controller-dqjd8-config-gpfdd\" (UID: \"1eb1068d-2c78-4baa-9bb3-d21a9181e5db\") " pod="openstack/ovn-controller-dqjd8-config-gpfdd" Nov 28 10:15:57 crc kubenswrapper[4838]: I1128 10:15:57.558890 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-662h4\" (UniqueName: \"kubernetes.io/projected/1eb1068d-2c78-4baa-9bb3-d21a9181e5db-kube-api-access-662h4\") pod \"ovn-controller-dqjd8-config-gpfdd\" (UID: \"1eb1068d-2c78-4baa-9bb3-d21a9181e5db\") " pod="openstack/ovn-controller-dqjd8-config-gpfdd" Nov 28 10:15:57 crc kubenswrapper[4838]: I1128 10:15:57.729057 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-dqjd8-config-gpfdd" Nov 28 10:15:57 crc kubenswrapper[4838]: I1128 10:15:57.848995 4838 generic.go:334] "Generic (PLEG): container finished" podID="366c721a-0e79-44a0-aa02-761c4ddc6936" containerID="c72266b9b0f0228a2212b582e2d53a73166e50af00c9674d71d6a8b1fbb59a1d" exitCode=0 Nov 28 10:15:57 crc kubenswrapper[4838]: I1128 10:15:57.849040 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"366c721a-0e79-44a0-aa02-761c4ddc6936","Type":"ContainerDied","Data":"c72266b9b0f0228a2212b582e2d53a73166e50af00c9674d71d6a8b1fbb59a1d"} Nov 28 10:15:58 crc kubenswrapper[4838]: I1128 10:15:58.576326 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="51cc9f8c-d9d9-4829-a0df-befd42f5898d" path="/var/lib/kubelet/pods/51cc9f8c-d9d9-4829-a0df-befd42f5898d/volumes" Nov 28 10:15:58 crc kubenswrapper[4838]: I1128 10:15:58.860814 4838 generic.go:334] "Generic (PLEG): container finished" podID="dbe39b78-c198-480e-9bca-17eaed6183bf" containerID="4556fc74aa9f704938030bc686cf395afad6eac12531f3c48a0fbaeeaf2d8910" exitCode=0 Nov 28 10:15:58 crc kubenswrapper[4838]: I1128 10:15:58.860865 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"dbe39b78-c198-480e-9bca-17eaed6183bf","Type":"ContainerDied","Data":"4556fc74aa9f704938030bc686cf395afad6eac12531f3c48a0fbaeeaf2d8910"} Nov 28 10:16:02 crc kubenswrapper[4838]: I1128 10:16:02.982931 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-dqjd8-config-gpfdd"] Nov 28 10:16:02 crc kubenswrapper[4838]: W1128 10:16:02.996681 4838 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1eb1068d_2c78_4baa_9bb3_d21a9181e5db.slice/crio-396fd3d70fa190da8b8e067cb1c7b6be5089a8e886dcfaa4c3e66ce3f295e206 WatchSource:0}: Error finding container 396fd3d70fa190da8b8e067cb1c7b6be5089a8e886dcfaa4c3e66ce3f295e206: Status 404 returned error can't find the container with id 396fd3d70fa190da8b8e067cb1c7b6be5089a8e886dcfaa4c3e66ce3f295e206 Nov 28 10:16:03 crc kubenswrapper[4838]: I1128 10:16:03.900869 4838 generic.go:334] "Generic (PLEG): container finished" podID="1eb1068d-2c78-4baa-9bb3-d21a9181e5db" containerID="70561ffe7582eb001bea6579c416ab59dfacdd89d833a820e8066ba416af0b06" exitCode=0 Nov 28 10:16:03 crc kubenswrapper[4838]: I1128 10:16:03.900924 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-dqjd8-config-gpfdd" event={"ID":"1eb1068d-2c78-4baa-9bb3-d21a9181e5db","Type":"ContainerDied","Data":"70561ffe7582eb001bea6579c416ab59dfacdd89d833a820e8066ba416af0b06"} Nov 28 10:16:03 crc kubenswrapper[4838]: I1128 10:16:03.901925 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-dqjd8-config-gpfdd" event={"ID":"1eb1068d-2c78-4baa-9bb3-d21a9181e5db","Type":"ContainerStarted","Data":"396fd3d70fa190da8b8e067cb1c7b6be5089a8e886dcfaa4c3e66ce3f295e206"} Nov 28 10:16:03 crc kubenswrapper[4838]: I1128 10:16:03.904535 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-98gmp" event={"ID":"b46f0d10-b6df-4e90-aae8-b903a29d6898","Type":"ContainerStarted","Data":"1eab55f2714789723b2aa474258fd66c62df590e0987ecf2ef5eec85f115aba6"} Nov 28 10:16:03 crc kubenswrapper[4838]: I1128 10:16:03.906624 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"dbe39b78-c198-480e-9bca-17eaed6183bf","Type":"ContainerStarted","Data":"14999f81bcbe0f31824a6a12d3130d4e3af12b8cc9f8f6f76e3d05268e066714"} Nov 28 10:16:03 crc kubenswrapper[4838]: I1128 10:16:03.906864 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-cell1-server-0" Nov 28 10:16:03 crc kubenswrapper[4838]: I1128 10:16:03.908519 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"366c721a-0e79-44a0-aa02-761c4ddc6936","Type":"ContainerStarted","Data":"4e5b05230586f5bd5d31052d153bdc3687b15e03fd9d5613b97b7fe519435dba"} Nov 28 10:16:03 crc kubenswrapper[4838]: I1128 10:16:03.908789 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-0" Nov 28 10:16:03 crc kubenswrapper[4838]: I1128 10:16:03.948939 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-0" podStartSLOduration=42.38812815 podStartE2EDuration="1m17.948911144s" podCreationTimestamp="2025-11-28 10:14:46 +0000 UTC" firstStartedPulling="2025-11-28 10:14:48.567144994 +0000 UTC m=+1060.266119164" lastFinishedPulling="2025-11-28 10:15:24.127927948 +0000 UTC m=+1095.826902158" observedRunningTime="2025-11-28 10:16:03.944346162 +0000 UTC m=+1135.643320332" watchObservedRunningTime="2025-11-28 10:16:03.948911144 +0000 UTC m=+1135.647885314" Nov 28 10:16:03 crc kubenswrapper[4838]: I1128 10:16:03.962269 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-db-sync-98gmp" podStartSLOduration=2.489325891 podStartE2EDuration="12.962251962s" podCreationTimestamp="2025-11-28 10:15:51 +0000 UTC" firstStartedPulling="2025-11-28 10:15:52.334063173 +0000 UTC m=+1124.033037353" lastFinishedPulling="2025-11-28 10:16:02.806989264 +0000 UTC m=+1134.505963424" observedRunningTime="2025-11-28 10:16:03.958213963 +0000 UTC m=+1135.657188133" watchObservedRunningTime="2025-11-28 10:16:03.962251962 +0000 UTC m=+1135.661226132" Nov 28 10:16:03 crc kubenswrapper[4838]: I1128 10:16:03.983432 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-cell1-server-0" podStartSLOduration=-9223371958.871365 podStartE2EDuration="1m17.983411957s" podCreationTimestamp="2025-11-28 10:14:46 +0000 UTC" firstStartedPulling="2025-11-28 10:14:48.192673743 +0000 UTC m=+1059.891647913" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 10:16:03.983010127 +0000 UTC m=+1135.681984297" watchObservedRunningTime="2025-11-28 10:16:03.983411957 +0000 UTC m=+1135.682386127" Nov 28 10:16:05 crc kubenswrapper[4838]: I1128 10:16:05.353695 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-dqjd8-config-gpfdd" Nov 28 10:16:05 crc kubenswrapper[4838]: I1128 10:16:05.486706 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/1eb1068d-2c78-4baa-9bb3-d21a9181e5db-additional-scripts\") pod \"1eb1068d-2c78-4baa-9bb3-d21a9181e5db\" (UID: \"1eb1068d-2c78-4baa-9bb3-d21a9181e5db\") " Nov 28 10:16:05 crc kubenswrapper[4838]: I1128 10:16:05.486862 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/1eb1068d-2c78-4baa-9bb3-d21a9181e5db-var-run\") pod \"1eb1068d-2c78-4baa-9bb3-d21a9181e5db\" (UID: \"1eb1068d-2c78-4baa-9bb3-d21a9181e5db\") " Nov 28 10:16:05 crc kubenswrapper[4838]: I1128 10:16:05.486980 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/1eb1068d-2c78-4baa-9bb3-d21a9181e5db-var-run-ovn\") pod \"1eb1068d-2c78-4baa-9bb3-d21a9181e5db\" (UID: \"1eb1068d-2c78-4baa-9bb3-d21a9181e5db\") " Nov 28 10:16:05 crc kubenswrapper[4838]: I1128 10:16:05.487062 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/1eb1068d-2c78-4baa-9bb3-d21a9181e5db-scripts\") pod \"1eb1068d-2c78-4baa-9bb3-d21a9181e5db\" (UID: \"1eb1068d-2c78-4baa-9bb3-d21a9181e5db\") " Nov 28 10:16:05 crc kubenswrapper[4838]: I1128 10:16:05.487063 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/1eb1068d-2c78-4baa-9bb3-d21a9181e5db-var-run" (OuterVolumeSpecName: "var-run") pod "1eb1068d-2c78-4baa-9bb3-d21a9181e5db" (UID: "1eb1068d-2c78-4baa-9bb3-d21a9181e5db"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 10:16:05 crc kubenswrapper[4838]: I1128 10:16:05.487106 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-662h4\" (UniqueName: \"kubernetes.io/projected/1eb1068d-2c78-4baa-9bb3-d21a9181e5db-kube-api-access-662h4\") pod \"1eb1068d-2c78-4baa-9bb3-d21a9181e5db\" (UID: \"1eb1068d-2c78-4baa-9bb3-d21a9181e5db\") " Nov 28 10:16:05 crc kubenswrapper[4838]: I1128 10:16:05.487167 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/1eb1068d-2c78-4baa-9bb3-d21a9181e5db-var-log-ovn\") pod \"1eb1068d-2c78-4baa-9bb3-d21a9181e5db\" (UID: \"1eb1068d-2c78-4baa-9bb3-d21a9181e5db\") " Nov 28 10:16:05 crc kubenswrapper[4838]: I1128 10:16:05.487546 4838 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/1eb1068d-2c78-4baa-9bb3-d21a9181e5db-var-run\") on node \"crc\" DevicePath \"\"" Nov 28 10:16:05 crc kubenswrapper[4838]: I1128 10:16:05.487108 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/1eb1068d-2c78-4baa-9bb3-d21a9181e5db-var-run-ovn" (OuterVolumeSpecName: "var-run-ovn") pod "1eb1068d-2c78-4baa-9bb3-d21a9181e5db" (UID: "1eb1068d-2c78-4baa-9bb3-d21a9181e5db"). InnerVolumeSpecName "var-run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 10:16:05 crc kubenswrapper[4838]: I1128 10:16:05.487513 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1eb1068d-2c78-4baa-9bb3-d21a9181e5db-additional-scripts" (OuterVolumeSpecName: "additional-scripts") pod "1eb1068d-2c78-4baa-9bb3-d21a9181e5db" (UID: "1eb1068d-2c78-4baa-9bb3-d21a9181e5db"). InnerVolumeSpecName "additional-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 10:16:05 crc kubenswrapper[4838]: I1128 10:16:05.487609 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/1eb1068d-2c78-4baa-9bb3-d21a9181e5db-var-log-ovn" (OuterVolumeSpecName: "var-log-ovn") pod "1eb1068d-2c78-4baa-9bb3-d21a9181e5db" (UID: "1eb1068d-2c78-4baa-9bb3-d21a9181e5db"). InnerVolumeSpecName "var-log-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 10:16:05 crc kubenswrapper[4838]: I1128 10:16:05.487701 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1eb1068d-2c78-4baa-9bb3-d21a9181e5db-scripts" (OuterVolumeSpecName: "scripts") pod "1eb1068d-2c78-4baa-9bb3-d21a9181e5db" (UID: "1eb1068d-2c78-4baa-9bb3-d21a9181e5db"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 10:16:05 crc kubenswrapper[4838]: I1128 10:16:05.492557 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1eb1068d-2c78-4baa-9bb3-d21a9181e5db-kube-api-access-662h4" (OuterVolumeSpecName: "kube-api-access-662h4") pod "1eb1068d-2c78-4baa-9bb3-d21a9181e5db" (UID: "1eb1068d-2c78-4baa-9bb3-d21a9181e5db"). InnerVolumeSpecName "kube-api-access-662h4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:16:05 crc kubenswrapper[4838]: I1128 10:16:05.588511 4838 reconciler_common.go:293] "Volume detached for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/1eb1068d-2c78-4baa-9bb3-d21a9181e5db-var-run-ovn\") on node \"crc\" DevicePath \"\"" Nov 28 10:16:05 crc kubenswrapper[4838]: I1128 10:16:05.588545 4838 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/1eb1068d-2c78-4baa-9bb3-d21a9181e5db-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 10:16:05 crc kubenswrapper[4838]: I1128 10:16:05.588556 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-662h4\" (UniqueName: \"kubernetes.io/projected/1eb1068d-2c78-4baa-9bb3-d21a9181e5db-kube-api-access-662h4\") on node \"crc\" DevicePath \"\"" Nov 28 10:16:05 crc kubenswrapper[4838]: I1128 10:16:05.588565 4838 reconciler_common.go:293] "Volume detached for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/1eb1068d-2c78-4baa-9bb3-d21a9181e5db-var-log-ovn\") on node \"crc\" DevicePath \"\"" Nov 28 10:16:05 crc kubenswrapper[4838]: I1128 10:16:05.588573 4838 reconciler_common.go:293] "Volume detached for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/1eb1068d-2c78-4baa-9bb3-d21a9181e5db-additional-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 10:16:05 crc kubenswrapper[4838]: I1128 10:16:05.938546 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-dqjd8-config-gpfdd" event={"ID":"1eb1068d-2c78-4baa-9bb3-d21a9181e5db","Type":"ContainerDied","Data":"396fd3d70fa190da8b8e067cb1c7b6be5089a8e886dcfaa4c3e66ce3f295e206"} Nov 28 10:16:05 crc kubenswrapper[4838]: I1128 10:16:05.938581 4838 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="396fd3d70fa190da8b8e067cb1c7b6be5089a8e886dcfaa4c3e66ce3f295e206" Nov 28 10:16:05 crc kubenswrapper[4838]: I1128 10:16:05.938678 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-dqjd8-config-gpfdd" Nov 28 10:16:06 crc kubenswrapper[4838]: I1128 10:16:06.439489 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-dqjd8-config-gpfdd"] Nov 28 10:16:06 crc kubenswrapper[4838]: I1128 10:16:06.457959 4838 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-dqjd8-config-gpfdd"] Nov 28 10:16:06 crc kubenswrapper[4838]: I1128 10:16:06.573506 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1eb1068d-2c78-4baa-9bb3-d21a9181e5db" path="/var/lib/kubelet/pods/1eb1068d-2c78-4baa-9bb3-d21a9181e5db/volumes" Nov 28 10:16:09 crc kubenswrapper[4838]: I1128 10:16:09.979035 4838 generic.go:334] "Generic (PLEG): container finished" podID="b46f0d10-b6df-4e90-aae8-b903a29d6898" containerID="1eab55f2714789723b2aa474258fd66c62df590e0987ecf2ef5eec85f115aba6" exitCode=0 Nov 28 10:16:09 crc kubenswrapper[4838]: I1128 10:16:09.979161 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-98gmp" event={"ID":"b46f0d10-b6df-4e90-aae8-b903a29d6898","Type":"ContainerDied","Data":"1eab55f2714789723b2aa474258fd66c62df590e0987ecf2ef5eec85f115aba6"} Nov 28 10:16:11 crc kubenswrapper[4838]: I1128 10:16:11.376777 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-98gmp" Nov 28 10:16:11 crc kubenswrapper[4838]: I1128 10:16:11.488617 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b46f0d10-b6df-4e90-aae8-b903a29d6898-config-data\") pod \"b46f0d10-b6df-4e90-aae8-b903a29d6898\" (UID: \"b46f0d10-b6df-4e90-aae8-b903a29d6898\") " Nov 28 10:16:11 crc kubenswrapper[4838]: I1128 10:16:11.488883 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/b46f0d10-b6df-4e90-aae8-b903a29d6898-db-sync-config-data\") pod \"b46f0d10-b6df-4e90-aae8-b903a29d6898\" (UID: \"b46f0d10-b6df-4e90-aae8-b903a29d6898\") " Nov 28 10:16:11 crc kubenswrapper[4838]: I1128 10:16:11.488930 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jl6qh\" (UniqueName: \"kubernetes.io/projected/b46f0d10-b6df-4e90-aae8-b903a29d6898-kube-api-access-jl6qh\") pod \"b46f0d10-b6df-4e90-aae8-b903a29d6898\" (UID: \"b46f0d10-b6df-4e90-aae8-b903a29d6898\") " Nov 28 10:16:11 crc kubenswrapper[4838]: I1128 10:16:11.488991 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b46f0d10-b6df-4e90-aae8-b903a29d6898-combined-ca-bundle\") pod \"b46f0d10-b6df-4e90-aae8-b903a29d6898\" (UID: \"b46f0d10-b6df-4e90-aae8-b903a29d6898\") " Nov 28 10:16:11 crc kubenswrapper[4838]: I1128 10:16:11.496300 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b46f0d10-b6df-4e90-aae8-b903a29d6898-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "b46f0d10-b6df-4e90-aae8-b903a29d6898" (UID: "b46f0d10-b6df-4e90-aae8-b903a29d6898"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:16:11 crc kubenswrapper[4838]: I1128 10:16:11.496372 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b46f0d10-b6df-4e90-aae8-b903a29d6898-kube-api-access-jl6qh" (OuterVolumeSpecName: "kube-api-access-jl6qh") pod "b46f0d10-b6df-4e90-aae8-b903a29d6898" (UID: "b46f0d10-b6df-4e90-aae8-b903a29d6898"). InnerVolumeSpecName "kube-api-access-jl6qh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:16:11 crc kubenswrapper[4838]: I1128 10:16:11.541684 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b46f0d10-b6df-4e90-aae8-b903a29d6898-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b46f0d10-b6df-4e90-aae8-b903a29d6898" (UID: "b46f0d10-b6df-4e90-aae8-b903a29d6898"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:16:11 crc kubenswrapper[4838]: I1128 10:16:11.548087 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b46f0d10-b6df-4e90-aae8-b903a29d6898-config-data" (OuterVolumeSpecName: "config-data") pod "b46f0d10-b6df-4e90-aae8-b903a29d6898" (UID: "b46f0d10-b6df-4e90-aae8-b903a29d6898"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:16:11 crc kubenswrapper[4838]: I1128 10:16:11.591211 4838 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b46f0d10-b6df-4e90-aae8-b903a29d6898-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 10:16:11 crc kubenswrapper[4838]: I1128 10:16:11.591253 4838 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b46f0d10-b6df-4e90-aae8-b903a29d6898-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 10:16:11 crc kubenswrapper[4838]: I1128 10:16:11.591271 4838 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/b46f0d10-b6df-4e90-aae8-b903a29d6898-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 10:16:11 crc kubenswrapper[4838]: I1128 10:16:11.591289 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jl6qh\" (UniqueName: \"kubernetes.io/projected/b46f0d10-b6df-4e90-aae8-b903a29d6898-kube-api-access-jl6qh\") on node \"crc\" DevicePath \"\"" Nov 28 10:16:12 crc kubenswrapper[4838]: I1128 10:16:12.004520 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-98gmp" event={"ID":"b46f0d10-b6df-4e90-aae8-b903a29d6898","Type":"ContainerDied","Data":"a34088ee791dedba57d0708018576e07f82f75ab6e6189a85773e30802f941be"} Nov 28 10:16:12 crc kubenswrapper[4838]: I1128 10:16:12.004581 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-98gmp" Nov 28 10:16:12 crc kubenswrapper[4838]: I1128 10:16:12.004586 4838 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a34088ee791dedba57d0708018576e07f82f75ab6e6189a85773e30802f941be" Nov 28 10:16:12 crc kubenswrapper[4838]: I1128 10:16:12.611904 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-54f9b7b8d9-kxcgn"] Nov 28 10:16:12 crc kubenswrapper[4838]: E1128 10:16:12.612508 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1eb1068d-2c78-4baa-9bb3-d21a9181e5db" containerName="ovn-config" Nov 28 10:16:12 crc kubenswrapper[4838]: I1128 10:16:12.612520 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="1eb1068d-2c78-4baa-9bb3-d21a9181e5db" containerName="ovn-config" Nov 28 10:16:12 crc kubenswrapper[4838]: E1128 10:16:12.612540 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b46f0d10-b6df-4e90-aae8-b903a29d6898" containerName="glance-db-sync" Nov 28 10:16:12 crc kubenswrapper[4838]: I1128 10:16:12.612546 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="b46f0d10-b6df-4e90-aae8-b903a29d6898" containerName="glance-db-sync" Nov 28 10:16:12 crc kubenswrapper[4838]: I1128 10:16:12.612684 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="b46f0d10-b6df-4e90-aae8-b903a29d6898" containerName="glance-db-sync" Nov 28 10:16:12 crc kubenswrapper[4838]: I1128 10:16:12.612706 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="1eb1068d-2c78-4baa-9bb3-d21a9181e5db" containerName="ovn-config" Nov 28 10:16:12 crc kubenswrapper[4838]: I1128 10:16:12.613534 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-54f9b7b8d9-kxcgn" Nov 28 10:16:12 crc kubenswrapper[4838]: I1128 10:16:12.641208 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-54f9b7b8d9-kxcgn"] Nov 28 10:16:12 crc kubenswrapper[4838]: I1128 10:16:12.714416 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2e5b70fe-78a6-4c9a-b0fb-77f1d0e7c2d2-ovsdbserver-nb\") pod \"dnsmasq-dns-54f9b7b8d9-kxcgn\" (UID: \"2e5b70fe-78a6-4c9a-b0fb-77f1d0e7c2d2\") " pod="openstack/dnsmasq-dns-54f9b7b8d9-kxcgn" Nov 28 10:16:12 crc kubenswrapper[4838]: I1128 10:16:12.714460 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2e5b70fe-78a6-4c9a-b0fb-77f1d0e7c2d2-config\") pod \"dnsmasq-dns-54f9b7b8d9-kxcgn\" (UID: \"2e5b70fe-78a6-4c9a-b0fb-77f1d0e7c2d2\") " pod="openstack/dnsmasq-dns-54f9b7b8d9-kxcgn" Nov 28 10:16:12 crc kubenswrapper[4838]: I1128 10:16:12.714501 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2e5b70fe-78a6-4c9a-b0fb-77f1d0e7c2d2-dns-svc\") pod \"dnsmasq-dns-54f9b7b8d9-kxcgn\" (UID: \"2e5b70fe-78a6-4c9a-b0fb-77f1d0e7c2d2\") " pod="openstack/dnsmasq-dns-54f9b7b8d9-kxcgn" Nov 28 10:16:12 crc kubenswrapper[4838]: I1128 10:16:12.714589 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t2mm7\" (UniqueName: \"kubernetes.io/projected/2e5b70fe-78a6-4c9a-b0fb-77f1d0e7c2d2-kube-api-access-t2mm7\") pod \"dnsmasq-dns-54f9b7b8d9-kxcgn\" (UID: \"2e5b70fe-78a6-4c9a-b0fb-77f1d0e7c2d2\") " pod="openstack/dnsmasq-dns-54f9b7b8d9-kxcgn" Nov 28 10:16:12 crc kubenswrapper[4838]: I1128 10:16:12.714608 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2e5b70fe-78a6-4c9a-b0fb-77f1d0e7c2d2-ovsdbserver-sb\") pod \"dnsmasq-dns-54f9b7b8d9-kxcgn\" (UID: \"2e5b70fe-78a6-4c9a-b0fb-77f1d0e7c2d2\") " pod="openstack/dnsmasq-dns-54f9b7b8d9-kxcgn" Nov 28 10:16:12 crc kubenswrapper[4838]: I1128 10:16:12.816050 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2e5b70fe-78a6-4c9a-b0fb-77f1d0e7c2d2-dns-svc\") pod \"dnsmasq-dns-54f9b7b8d9-kxcgn\" (UID: \"2e5b70fe-78a6-4c9a-b0fb-77f1d0e7c2d2\") " pod="openstack/dnsmasq-dns-54f9b7b8d9-kxcgn" Nov 28 10:16:12 crc kubenswrapper[4838]: I1128 10:16:12.816163 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t2mm7\" (UniqueName: \"kubernetes.io/projected/2e5b70fe-78a6-4c9a-b0fb-77f1d0e7c2d2-kube-api-access-t2mm7\") pod \"dnsmasq-dns-54f9b7b8d9-kxcgn\" (UID: \"2e5b70fe-78a6-4c9a-b0fb-77f1d0e7c2d2\") " pod="openstack/dnsmasq-dns-54f9b7b8d9-kxcgn" Nov 28 10:16:12 crc kubenswrapper[4838]: I1128 10:16:12.816192 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2e5b70fe-78a6-4c9a-b0fb-77f1d0e7c2d2-ovsdbserver-sb\") pod \"dnsmasq-dns-54f9b7b8d9-kxcgn\" (UID: \"2e5b70fe-78a6-4c9a-b0fb-77f1d0e7c2d2\") " pod="openstack/dnsmasq-dns-54f9b7b8d9-kxcgn" Nov 28 10:16:12 crc kubenswrapper[4838]: I1128 10:16:12.816222 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2e5b70fe-78a6-4c9a-b0fb-77f1d0e7c2d2-ovsdbserver-nb\") pod \"dnsmasq-dns-54f9b7b8d9-kxcgn\" (UID: \"2e5b70fe-78a6-4c9a-b0fb-77f1d0e7c2d2\") " pod="openstack/dnsmasq-dns-54f9b7b8d9-kxcgn" Nov 28 10:16:12 crc kubenswrapper[4838]: I1128 10:16:12.816242 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2e5b70fe-78a6-4c9a-b0fb-77f1d0e7c2d2-config\") pod \"dnsmasq-dns-54f9b7b8d9-kxcgn\" (UID: \"2e5b70fe-78a6-4c9a-b0fb-77f1d0e7c2d2\") " pod="openstack/dnsmasq-dns-54f9b7b8d9-kxcgn" Nov 28 10:16:12 crc kubenswrapper[4838]: I1128 10:16:12.817128 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2e5b70fe-78a6-4c9a-b0fb-77f1d0e7c2d2-dns-svc\") pod \"dnsmasq-dns-54f9b7b8d9-kxcgn\" (UID: \"2e5b70fe-78a6-4c9a-b0fb-77f1d0e7c2d2\") " pod="openstack/dnsmasq-dns-54f9b7b8d9-kxcgn" Nov 28 10:16:12 crc kubenswrapper[4838]: I1128 10:16:12.817192 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2e5b70fe-78a6-4c9a-b0fb-77f1d0e7c2d2-ovsdbserver-sb\") pod \"dnsmasq-dns-54f9b7b8d9-kxcgn\" (UID: \"2e5b70fe-78a6-4c9a-b0fb-77f1d0e7c2d2\") " pod="openstack/dnsmasq-dns-54f9b7b8d9-kxcgn" Nov 28 10:16:12 crc kubenswrapper[4838]: I1128 10:16:12.817218 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2e5b70fe-78a6-4c9a-b0fb-77f1d0e7c2d2-config\") pod \"dnsmasq-dns-54f9b7b8d9-kxcgn\" (UID: \"2e5b70fe-78a6-4c9a-b0fb-77f1d0e7c2d2\") " pod="openstack/dnsmasq-dns-54f9b7b8d9-kxcgn" Nov 28 10:16:12 crc kubenswrapper[4838]: I1128 10:16:12.817188 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2e5b70fe-78a6-4c9a-b0fb-77f1d0e7c2d2-ovsdbserver-nb\") pod \"dnsmasq-dns-54f9b7b8d9-kxcgn\" (UID: \"2e5b70fe-78a6-4c9a-b0fb-77f1d0e7c2d2\") " pod="openstack/dnsmasq-dns-54f9b7b8d9-kxcgn" Nov 28 10:16:12 crc kubenswrapper[4838]: I1128 10:16:12.835935 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t2mm7\" (UniqueName: \"kubernetes.io/projected/2e5b70fe-78a6-4c9a-b0fb-77f1d0e7c2d2-kube-api-access-t2mm7\") pod \"dnsmasq-dns-54f9b7b8d9-kxcgn\" (UID: \"2e5b70fe-78a6-4c9a-b0fb-77f1d0e7c2d2\") " pod="openstack/dnsmasq-dns-54f9b7b8d9-kxcgn" Nov 28 10:16:12 crc kubenswrapper[4838]: I1128 10:16:12.933478 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-54f9b7b8d9-kxcgn" Nov 28 10:16:13 crc kubenswrapper[4838]: I1128 10:16:13.344313 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-54f9b7b8d9-kxcgn"] Nov 28 10:16:13 crc kubenswrapper[4838]: W1128 10:16:13.354911 4838 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2e5b70fe_78a6_4c9a_b0fb_77f1d0e7c2d2.slice/crio-9e58bb5fa67f7adde39e1d74c14ca5c6d0d892929313f03becb784f251e5605b WatchSource:0}: Error finding container 9e58bb5fa67f7adde39e1d74c14ca5c6d0d892929313f03becb784f251e5605b: Status 404 returned error can't find the container with id 9e58bb5fa67f7adde39e1d74c14ca5c6d0d892929313f03becb784f251e5605b Nov 28 10:16:14 crc kubenswrapper[4838]: I1128 10:16:14.036262 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-54f9b7b8d9-kxcgn" event={"ID":"2e5b70fe-78a6-4c9a-b0fb-77f1d0e7c2d2","Type":"ContainerStarted","Data":"9e58bb5fa67f7adde39e1d74c14ca5c6d0d892929313f03becb784f251e5605b"} Nov 28 10:16:17 crc kubenswrapper[4838]: I1128 10:16:17.062599 4838 generic.go:334] "Generic (PLEG): container finished" podID="2e5b70fe-78a6-4c9a-b0fb-77f1d0e7c2d2" containerID="e039323569056de17135c4b6cfcccdad13e28b3a15678ee89c93c65fb83260cb" exitCode=0 Nov 28 10:16:17 crc kubenswrapper[4838]: I1128 10:16:17.062758 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-54f9b7b8d9-kxcgn" event={"ID":"2e5b70fe-78a6-4c9a-b0fb-77f1d0e7c2d2","Type":"ContainerDied","Data":"e039323569056de17135c4b6cfcccdad13e28b3a15678ee89c93c65fb83260cb"} Nov 28 10:16:17 crc kubenswrapper[4838]: I1128 10:16:17.682956 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-cell1-server-0" Nov 28 10:16:17 crc kubenswrapper[4838]: I1128 10:16:17.991926 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-0" Nov 28 10:16:18 crc kubenswrapper[4838]: I1128 10:16:18.072644 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-54f9b7b8d9-kxcgn" event={"ID":"2e5b70fe-78a6-4c9a-b0fb-77f1d0e7c2d2","Type":"ContainerStarted","Data":"f50eef4ef7b6d2f9da4b3f4a982f5900bae1db40cf0ac32340b73745f9f95797"} Nov 28 10:16:18 crc kubenswrapper[4838]: I1128 10:16:18.072803 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-54f9b7b8d9-kxcgn" Nov 28 10:16:18 crc kubenswrapper[4838]: I1128 10:16:18.098196 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-54f9b7b8d9-kxcgn" podStartSLOduration=6.098175734 podStartE2EDuration="6.098175734s" podCreationTimestamp="2025-11-28 10:16:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 10:16:18.096288893 +0000 UTC m=+1149.795263073" watchObservedRunningTime="2025-11-28 10:16:18.098175734 +0000 UTC m=+1149.797149904" Nov 28 10:16:19 crc kubenswrapper[4838]: I1128 10:16:19.919936 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-create-jbjbb"] Nov 28 10:16:19 crc kubenswrapper[4838]: I1128 10:16:19.921222 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-jbjbb" Nov 28 10:16:19 crc kubenswrapper[4838]: I1128 10:16:19.927457 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-jbjbb"] Nov 28 10:16:20 crc kubenswrapper[4838]: I1128 10:16:20.016552 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-create-pnr4n"] Nov 28 10:16:20 crc kubenswrapper[4838]: I1128 10:16:20.019335 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-pnr4n" Nov 28 10:16:20 crc kubenswrapper[4838]: I1128 10:16:20.027094 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-pnr4n"] Nov 28 10:16:20 crc kubenswrapper[4838]: I1128 10:16:20.036268 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4bljh\" (UniqueName: \"kubernetes.io/projected/47572f0c-e812-4be7-b4de-2a4a1045553f-kube-api-access-4bljh\") pod \"cinder-db-create-jbjbb\" (UID: \"47572f0c-e812-4be7-b4de-2a4a1045553f\") " pod="openstack/cinder-db-create-jbjbb" Nov 28 10:16:20 crc kubenswrapper[4838]: I1128 10:16:20.044975 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/47572f0c-e812-4be7-b4de-2a4a1045553f-operator-scripts\") pod \"cinder-db-create-jbjbb\" (UID: \"47572f0c-e812-4be7-b4de-2a4a1045553f\") " pod="openstack/cinder-db-create-jbjbb" Nov 28 10:16:20 crc kubenswrapper[4838]: I1128 10:16:20.132912 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-f31a-account-create-update-nk65p"] Nov 28 10:16:20 crc kubenswrapper[4838]: I1128 10:16:20.133825 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-f31a-account-create-update-nk65p" Nov 28 10:16:20 crc kubenswrapper[4838]: I1128 10:16:20.135364 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-db-secret" Nov 28 10:16:20 crc kubenswrapper[4838]: I1128 10:16:20.143861 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-f31a-account-create-update-nk65p"] Nov 28 10:16:20 crc kubenswrapper[4838]: I1128 10:16:20.150243 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rmq8c\" (UniqueName: \"kubernetes.io/projected/43c88e97-d64f-4155-bd4a-691c588527b2-kube-api-access-rmq8c\") pod \"barbican-db-create-pnr4n\" (UID: \"43c88e97-d64f-4155-bd4a-691c588527b2\") " pod="openstack/barbican-db-create-pnr4n" Nov 28 10:16:20 crc kubenswrapper[4838]: I1128 10:16:20.150433 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4bljh\" (UniqueName: \"kubernetes.io/projected/47572f0c-e812-4be7-b4de-2a4a1045553f-kube-api-access-4bljh\") pod \"cinder-db-create-jbjbb\" (UID: \"47572f0c-e812-4be7-b4de-2a4a1045553f\") " pod="openstack/cinder-db-create-jbjbb" Nov 28 10:16:20 crc kubenswrapper[4838]: I1128 10:16:20.150652 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/43c88e97-d64f-4155-bd4a-691c588527b2-operator-scripts\") pod \"barbican-db-create-pnr4n\" (UID: \"43c88e97-d64f-4155-bd4a-691c588527b2\") " pod="openstack/barbican-db-create-pnr4n" Nov 28 10:16:20 crc kubenswrapper[4838]: I1128 10:16:20.150789 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/47572f0c-e812-4be7-b4de-2a4a1045553f-operator-scripts\") pod \"cinder-db-create-jbjbb\" (UID: \"47572f0c-e812-4be7-b4de-2a4a1045553f\") " pod="openstack/cinder-db-create-jbjbb" Nov 28 10:16:20 crc kubenswrapper[4838]: I1128 10:16:20.151859 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/47572f0c-e812-4be7-b4de-2a4a1045553f-operator-scripts\") pod \"cinder-db-create-jbjbb\" (UID: \"47572f0c-e812-4be7-b4de-2a4a1045553f\") " pod="openstack/cinder-db-create-jbjbb" Nov 28 10:16:20 crc kubenswrapper[4838]: I1128 10:16:20.178578 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4bljh\" (UniqueName: \"kubernetes.io/projected/47572f0c-e812-4be7-b4de-2a4a1045553f-kube-api-access-4bljh\") pod \"cinder-db-create-jbjbb\" (UID: \"47572f0c-e812-4be7-b4de-2a4a1045553f\") " pod="openstack/cinder-db-create-jbjbb" Nov 28 10:16:20 crc kubenswrapper[4838]: I1128 10:16:20.233608 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-8c24-account-create-update-grgdd"] Nov 28 10:16:20 crc kubenswrapper[4838]: I1128 10:16:20.235104 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-8c24-account-create-update-grgdd" Nov 28 10:16:20 crc kubenswrapper[4838]: I1128 10:16:20.240771 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-db-secret" Nov 28 10:16:20 crc kubenswrapper[4838]: I1128 10:16:20.242495 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-8c24-account-create-update-grgdd"] Nov 28 10:16:20 crc kubenswrapper[4838]: I1128 10:16:20.252204 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rmq8c\" (UniqueName: \"kubernetes.io/projected/43c88e97-d64f-4155-bd4a-691c588527b2-kube-api-access-rmq8c\") pod \"barbican-db-create-pnr4n\" (UID: \"43c88e97-d64f-4155-bd4a-691c588527b2\") " pod="openstack/barbican-db-create-pnr4n" Nov 28 10:16:20 crc kubenswrapper[4838]: I1128 10:16:20.252289 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q7q7c\" (UniqueName: \"kubernetes.io/projected/da642c1a-1fea-497a-85af-f966ce5ddaad-kube-api-access-q7q7c\") pod \"cinder-f31a-account-create-update-nk65p\" (UID: \"da642c1a-1fea-497a-85af-f966ce5ddaad\") " pod="openstack/cinder-f31a-account-create-update-nk65p" Nov 28 10:16:20 crc kubenswrapper[4838]: I1128 10:16:20.252317 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/da642c1a-1fea-497a-85af-f966ce5ddaad-operator-scripts\") pod \"cinder-f31a-account-create-update-nk65p\" (UID: \"da642c1a-1fea-497a-85af-f966ce5ddaad\") " pod="openstack/cinder-f31a-account-create-update-nk65p" Nov 28 10:16:20 crc kubenswrapper[4838]: I1128 10:16:20.252349 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/43c88e97-d64f-4155-bd4a-691c588527b2-operator-scripts\") pod \"barbican-db-create-pnr4n\" (UID: \"43c88e97-d64f-4155-bd4a-691c588527b2\") " pod="openstack/barbican-db-create-pnr4n" Nov 28 10:16:20 crc kubenswrapper[4838]: I1128 10:16:20.253000 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/43c88e97-d64f-4155-bd4a-691c588527b2-operator-scripts\") pod \"barbican-db-create-pnr4n\" (UID: \"43c88e97-d64f-4155-bd4a-691c588527b2\") " pod="openstack/barbican-db-create-pnr4n" Nov 28 10:16:20 crc kubenswrapper[4838]: I1128 10:16:20.270235 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rmq8c\" (UniqueName: \"kubernetes.io/projected/43c88e97-d64f-4155-bd4a-691c588527b2-kube-api-access-rmq8c\") pod \"barbican-db-create-pnr4n\" (UID: \"43c88e97-d64f-4155-bd4a-691c588527b2\") " pod="openstack/barbican-db-create-pnr4n" Nov 28 10:16:20 crc kubenswrapper[4838]: I1128 10:16:20.282474 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-sync-z98d5"] Nov 28 10:16:20 crc kubenswrapper[4838]: I1128 10:16:20.283683 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-z98d5" Nov 28 10:16:20 crc kubenswrapper[4838]: I1128 10:16:20.286126 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 28 10:16:20 crc kubenswrapper[4838]: I1128 10:16:20.286396 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-2bjp9" Nov 28 10:16:20 crc kubenswrapper[4838]: I1128 10:16:20.286671 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 28 10:16:20 crc kubenswrapper[4838]: I1128 10:16:20.287776 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 28 10:16:20 crc kubenswrapper[4838]: I1128 10:16:20.297835 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-jbjbb" Nov 28 10:16:20 crc kubenswrapper[4838]: I1128 10:16:20.301246 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-z98d5"] Nov 28 10:16:20 crc kubenswrapper[4838]: I1128 10:16:20.350322 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-e066-account-create-update-t26s2"] Nov 28 10:16:20 crc kubenswrapper[4838]: I1128 10:16:20.351582 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-e066-account-create-update-t26s2" Nov 28 10:16:20 crc kubenswrapper[4838]: I1128 10:16:20.352126 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-pnr4n" Nov 28 10:16:20 crc kubenswrapper[4838]: I1128 10:16:20.353117 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sj46w\" (UniqueName: \"kubernetes.io/projected/44f940a0-f05c-4d6a-b5d3-310bec612088-kube-api-access-sj46w\") pod \"neutron-8c24-account-create-update-grgdd\" (UID: \"44f940a0-f05c-4d6a-b5d3-310bec612088\") " pod="openstack/neutron-8c24-account-create-update-grgdd" Nov 28 10:16:20 crc kubenswrapper[4838]: I1128 10:16:20.353146 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4c451dac-becb-4d7e-ae6b-4ee9864113b6-combined-ca-bundle\") pod \"keystone-db-sync-z98d5\" (UID: \"4c451dac-becb-4d7e-ae6b-4ee9864113b6\") " pod="openstack/keystone-db-sync-z98d5" Nov 28 10:16:20 crc kubenswrapper[4838]: I1128 10:16:20.353167 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gvd89\" (UniqueName: \"kubernetes.io/projected/4c451dac-becb-4d7e-ae6b-4ee9864113b6-kube-api-access-gvd89\") pod \"keystone-db-sync-z98d5\" (UID: \"4c451dac-becb-4d7e-ae6b-4ee9864113b6\") " pod="openstack/keystone-db-sync-z98d5" Nov 28 10:16:20 crc kubenswrapper[4838]: I1128 10:16:20.353200 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q7q7c\" (UniqueName: \"kubernetes.io/projected/da642c1a-1fea-497a-85af-f966ce5ddaad-kube-api-access-q7q7c\") pod \"cinder-f31a-account-create-update-nk65p\" (UID: \"da642c1a-1fea-497a-85af-f966ce5ddaad\") " pod="openstack/cinder-f31a-account-create-update-nk65p" Nov 28 10:16:20 crc kubenswrapper[4838]: I1128 10:16:20.353224 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/da642c1a-1fea-497a-85af-f966ce5ddaad-operator-scripts\") pod \"cinder-f31a-account-create-update-nk65p\" (UID: \"da642c1a-1fea-497a-85af-f966ce5ddaad\") " pod="openstack/cinder-f31a-account-create-update-nk65p" Nov 28 10:16:20 crc kubenswrapper[4838]: I1128 10:16:20.353252 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/44f940a0-f05c-4d6a-b5d3-310bec612088-operator-scripts\") pod \"neutron-8c24-account-create-update-grgdd\" (UID: \"44f940a0-f05c-4d6a-b5d3-310bec612088\") " pod="openstack/neutron-8c24-account-create-update-grgdd" Nov 28 10:16:20 crc kubenswrapper[4838]: I1128 10:16:20.353274 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4c451dac-becb-4d7e-ae6b-4ee9864113b6-config-data\") pod \"keystone-db-sync-z98d5\" (UID: \"4c451dac-becb-4d7e-ae6b-4ee9864113b6\") " pod="openstack/keystone-db-sync-z98d5" Nov 28 10:16:20 crc kubenswrapper[4838]: I1128 10:16:20.354302 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/da642c1a-1fea-497a-85af-f966ce5ddaad-operator-scripts\") pod \"cinder-f31a-account-create-update-nk65p\" (UID: \"da642c1a-1fea-497a-85af-f966ce5ddaad\") " pod="openstack/cinder-f31a-account-create-update-nk65p" Nov 28 10:16:20 crc kubenswrapper[4838]: I1128 10:16:20.359842 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-db-secret" Nov 28 10:16:20 crc kubenswrapper[4838]: I1128 10:16:20.364274 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-e066-account-create-update-t26s2"] Nov 28 10:16:20 crc kubenswrapper[4838]: I1128 10:16:20.383410 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-create-27vq9"] Nov 28 10:16:20 crc kubenswrapper[4838]: I1128 10:16:20.384513 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-27vq9" Nov 28 10:16:20 crc kubenswrapper[4838]: I1128 10:16:20.389773 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-27vq9"] Nov 28 10:16:20 crc kubenswrapper[4838]: I1128 10:16:20.390185 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q7q7c\" (UniqueName: \"kubernetes.io/projected/da642c1a-1fea-497a-85af-f966ce5ddaad-kube-api-access-q7q7c\") pod \"cinder-f31a-account-create-update-nk65p\" (UID: \"da642c1a-1fea-497a-85af-f966ce5ddaad\") " pod="openstack/cinder-f31a-account-create-update-nk65p" Nov 28 10:16:20 crc kubenswrapper[4838]: I1128 10:16:20.451845 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-f31a-account-create-update-nk65p" Nov 28 10:16:20 crc kubenswrapper[4838]: I1128 10:16:20.457018 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2xkrd\" (UniqueName: \"kubernetes.io/projected/35d4d06e-5f03-40aa-9363-17421ac37e64-kube-api-access-2xkrd\") pod \"barbican-e066-account-create-update-t26s2\" (UID: \"35d4d06e-5f03-40aa-9363-17421ac37e64\") " pod="openstack/barbican-e066-account-create-update-t26s2" Nov 28 10:16:20 crc kubenswrapper[4838]: I1128 10:16:20.457068 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/44f940a0-f05c-4d6a-b5d3-310bec612088-operator-scripts\") pod \"neutron-8c24-account-create-update-grgdd\" (UID: \"44f940a0-f05c-4d6a-b5d3-310bec612088\") " pod="openstack/neutron-8c24-account-create-update-grgdd" Nov 28 10:16:20 crc kubenswrapper[4838]: I1128 10:16:20.457104 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4c451dac-becb-4d7e-ae6b-4ee9864113b6-config-data\") pod \"keystone-db-sync-z98d5\" (UID: \"4c451dac-becb-4d7e-ae6b-4ee9864113b6\") " pod="openstack/keystone-db-sync-z98d5" Nov 28 10:16:20 crc kubenswrapper[4838]: I1128 10:16:20.457163 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fzbr5\" (UniqueName: \"kubernetes.io/projected/8a69eff2-209f-4644-95ac-d9490f525525-kube-api-access-fzbr5\") pod \"neutron-db-create-27vq9\" (UID: \"8a69eff2-209f-4644-95ac-d9490f525525\") " pod="openstack/neutron-db-create-27vq9" Nov 28 10:16:20 crc kubenswrapper[4838]: I1128 10:16:20.457216 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8a69eff2-209f-4644-95ac-d9490f525525-operator-scripts\") pod \"neutron-db-create-27vq9\" (UID: \"8a69eff2-209f-4644-95ac-d9490f525525\") " pod="openstack/neutron-db-create-27vq9" Nov 28 10:16:20 crc kubenswrapper[4838]: I1128 10:16:20.457258 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sj46w\" (UniqueName: \"kubernetes.io/projected/44f940a0-f05c-4d6a-b5d3-310bec612088-kube-api-access-sj46w\") pod \"neutron-8c24-account-create-update-grgdd\" (UID: \"44f940a0-f05c-4d6a-b5d3-310bec612088\") " pod="openstack/neutron-8c24-account-create-update-grgdd" Nov 28 10:16:20 crc kubenswrapper[4838]: I1128 10:16:20.457278 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4c451dac-becb-4d7e-ae6b-4ee9864113b6-combined-ca-bundle\") pod \"keystone-db-sync-z98d5\" (UID: \"4c451dac-becb-4d7e-ae6b-4ee9864113b6\") " pod="openstack/keystone-db-sync-z98d5" Nov 28 10:16:20 crc kubenswrapper[4838]: I1128 10:16:20.457301 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gvd89\" (UniqueName: \"kubernetes.io/projected/4c451dac-becb-4d7e-ae6b-4ee9864113b6-kube-api-access-gvd89\") pod \"keystone-db-sync-z98d5\" (UID: \"4c451dac-becb-4d7e-ae6b-4ee9864113b6\") " pod="openstack/keystone-db-sync-z98d5" Nov 28 10:16:20 crc kubenswrapper[4838]: I1128 10:16:20.457323 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/35d4d06e-5f03-40aa-9363-17421ac37e64-operator-scripts\") pod \"barbican-e066-account-create-update-t26s2\" (UID: \"35d4d06e-5f03-40aa-9363-17421ac37e64\") " pod="openstack/barbican-e066-account-create-update-t26s2" Nov 28 10:16:20 crc kubenswrapper[4838]: I1128 10:16:20.460662 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/44f940a0-f05c-4d6a-b5d3-310bec612088-operator-scripts\") pod \"neutron-8c24-account-create-update-grgdd\" (UID: \"44f940a0-f05c-4d6a-b5d3-310bec612088\") " pod="openstack/neutron-8c24-account-create-update-grgdd" Nov 28 10:16:20 crc kubenswrapper[4838]: I1128 10:16:20.472422 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4c451dac-becb-4d7e-ae6b-4ee9864113b6-combined-ca-bundle\") pod \"keystone-db-sync-z98d5\" (UID: \"4c451dac-becb-4d7e-ae6b-4ee9864113b6\") " pod="openstack/keystone-db-sync-z98d5" Nov 28 10:16:20 crc kubenswrapper[4838]: I1128 10:16:20.477842 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gvd89\" (UniqueName: \"kubernetes.io/projected/4c451dac-becb-4d7e-ae6b-4ee9864113b6-kube-api-access-gvd89\") pod \"keystone-db-sync-z98d5\" (UID: \"4c451dac-becb-4d7e-ae6b-4ee9864113b6\") " pod="openstack/keystone-db-sync-z98d5" Nov 28 10:16:20 crc kubenswrapper[4838]: I1128 10:16:20.478124 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4c451dac-becb-4d7e-ae6b-4ee9864113b6-config-data\") pod \"keystone-db-sync-z98d5\" (UID: \"4c451dac-becb-4d7e-ae6b-4ee9864113b6\") " pod="openstack/keystone-db-sync-z98d5" Nov 28 10:16:20 crc kubenswrapper[4838]: I1128 10:16:20.480725 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sj46w\" (UniqueName: \"kubernetes.io/projected/44f940a0-f05c-4d6a-b5d3-310bec612088-kube-api-access-sj46w\") pod \"neutron-8c24-account-create-update-grgdd\" (UID: \"44f940a0-f05c-4d6a-b5d3-310bec612088\") " pod="openstack/neutron-8c24-account-create-update-grgdd" Nov 28 10:16:20 crc kubenswrapper[4838]: I1128 10:16:20.555239 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-8c24-account-create-update-grgdd" Nov 28 10:16:20 crc kubenswrapper[4838]: I1128 10:16:20.560598 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2xkrd\" (UniqueName: \"kubernetes.io/projected/35d4d06e-5f03-40aa-9363-17421ac37e64-kube-api-access-2xkrd\") pod \"barbican-e066-account-create-update-t26s2\" (UID: \"35d4d06e-5f03-40aa-9363-17421ac37e64\") " pod="openstack/barbican-e066-account-create-update-t26s2" Nov 28 10:16:20 crc kubenswrapper[4838]: I1128 10:16:20.560706 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fzbr5\" (UniqueName: \"kubernetes.io/projected/8a69eff2-209f-4644-95ac-d9490f525525-kube-api-access-fzbr5\") pod \"neutron-db-create-27vq9\" (UID: \"8a69eff2-209f-4644-95ac-d9490f525525\") " pod="openstack/neutron-db-create-27vq9" Nov 28 10:16:20 crc kubenswrapper[4838]: I1128 10:16:20.560791 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8a69eff2-209f-4644-95ac-d9490f525525-operator-scripts\") pod \"neutron-db-create-27vq9\" (UID: \"8a69eff2-209f-4644-95ac-d9490f525525\") " pod="openstack/neutron-db-create-27vq9" Nov 28 10:16:20 crc kubenswrapper[4838]: I1128 10:16:20.560866 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/35d4d06e-5f03-40aa-9363-17421ac37e64-operator-scripts\") pod \"barbican-e066-account-create-update-t26s2\" (UID: \"35d4d06e-5f03-40aa-9363-17421ac37e64\") " pod="openstack/barbican-e066-account-create-update-t26s2" Nov 28 10:16:20 crc kubenswrapper[4838]: I1128 10:16:20.562230 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8a69eff2-209f-4644-95ac-d9490f525525-operator-scripts\") pod \"neutron-db-create-27vq9\" (UID: \"8a69eff2-209f-4644-95ac-d9490f525525\") " pod="openstack/neutron-db-create-27vq9" Nov 28 10:16:20 crc kubenswrapper[4838]: I1128 10:16:20.562463 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/35d4d06e-5f03-40aa-9363-17421ac37e64-operator-scripts\") pod \"barbican-e066-account-create-update-t26s2\" (UID: \"35d4d06e-5f03-40aa-9363-17421ac37e64\") " pod="openstack/barbican-e066-account-create-update-t26s2" Nov 28 10:16:20 crc kubenswrapper[4838]: I1128 10:16:20.578840 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2xkrd\" (UniqueName: \"kubernetes.io/projected/35d4d06e-5f03-40aa-9363-17421ac37e64-kube-api-access-2xkrd\") pod \"barbican-e066-account-create-update-t26s2\" (UID: \"35d4d06e-5f03-40aa-9363-17421ac37e64\") " pod="openstack/barbican-e066-account-create-update-t26s2" Nov 28 10:16:20 crc kubenswrapper[4838]: I1128 10:16:20.580448 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fzbr5\" (UniqueName: \"kubernetes.io/projected/8a69eff2-209f-4644-95ac-d9490f525525-kube-api-access-fzbr5\") pod \"neutron-db-create-27vq9\" (UID: \"8a69eff2-209f-4644-95ac-d9490f525525\") " pod="openstack/neutron-db-create-27vq9" Nov 28 10:16:20 crc kubenswrapper[4838]: I1128 10:16:20.752570 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-z98d5" Nov 28 10:16:20 crc kubenswrapper[4838]: I1128 10:16:20.785283 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-e066-account-create-update-t26s2" Nov 28 10:16:20 crc kubenswrapper[4838]: I1128 10:16:20.790439 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-jbjbb"] Nov 28 10:16:20 crc kubenswrapper[4838]: I1128 10:16:20.793399 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-27vq9" Nov 28 10:16:20 crc kubenswrapper[4838]: W1128 10:16:20.817457 4838 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod47572f0c_e812_4be7_b4de_2a4a1045553f.slice/crio-6c03ed8a43193cc28d3275c5f31aced51a2664771af3b3462a5e8a839d34f755 WatchSource:0}: Error finding container 6c03ed8a43193cc28d3275c5f31aced51a2664771af3b3462a5e8a839d34f755: Status 404 returned error can't find the container with id 6c03ed8a43193cc28d3275c5f31aced51a2664771af3b3462a5e8a839d34f755 Nov 28 10:16:20 crc kubenswrapper[4838]: I1128 10:16:20.914113 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-f31a-account-create-update-nk65p"] Nov 28 10:16:20 crc kubenswrapper[4838]: I1128 10:16:20.935676 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-pnr4n"] Nov 28 10:16:20 crc kubenswrapper[4838]: W1128 10:16:20.957257 4838 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod43c88e97_d64f_4155_bd4a_691c588527b2.slice/crio-42e35d8b7a41bed93a97928c21e791a87d1ecea9688fb7647beb714d8211e87e WatchSource:0}: Error finding container 42e35d8b7a41bed93a97928c21e791a87d1ecea9688fb7647beb714d8211e87e: Status 404 returned error can't find the container with id 42e35d8b7a41bed93a97928c21e791a87d1ecea9688fb7647beb714d8211e87e Nov 28 10:16:21 crc kubenswrapper[4838]: I1128 10:16:21.082561 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-8c24-account-create-update-grgdd"] Nov 28 10:16:21 crc kubenswrapper[4838]: I1128 10:16:21.100373 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-jbjbb" event={"ID":"47572f0c-e812-4be7-b4de-2a4a1045553f","Type":"ContainerStarted","Data":"1723ce3d5dc802ce733bacd1dce7082bccaa22dfdcb736551d99aa7778a25c4e"} Nov 28 10:16:21 crc kubenswrapper[4838]: I1128 10:16:21.100411 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-jbjbb" event={"ID":"47572f0c-e812-4be7-b4de-2a4a1045553f","Type":"ContainerStarted","Data":"6c03ed8a43193cc28d3275c5f31aced51a2664771af3b3462a5e8a839d34f755"} Nov 28 10:16:21 crc kubenswrapper[4838]: I1128 10:16:21.102495 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-f31a-account-create-update-nk65p" event={"ID":"da642c1a-1fea-497a-85af-f966ce5ddaad","Type":"ContainerStarted","Data":"8a9f1cd248763ea6513575f5d9f006938d043092bd8a26f823f547230764cda0"} Nov 28 10:16:21 crc kubenswrapper[4838]: I1128 10:16:21.103289 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-pnr4n" event={"ID":"43c88e97-d64f-4155-bd4a-691c588527b2","Type":"ContainerStarted","Data":"42e35d8b7a41bed93a97928c21e791a87d1ecea9688fb7647beb714d8211e87e"} Nov 28 10:16:21 crc kubenswrapper[4838]: I1128 10:16:21.107870 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-8c24-account-create-update-grgdd" event={"ID":"44f940a0-f05c-4d6a-b5d3-310bec612088","Type":"ContainerStarted","Data":"005e5ff469926829b4d206267eb808b41de018d601aa590dedef577c3374aea1"} Nov 28 10:16:21 crc kubenswrapper[4838]: I1128 10:16:21.122580 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-db-create-jbjbb" podStartSLOduration=2.122558933 podStartE2EDuration="2.122558933s" podCreationTimestamp="2025-11-28 10:16:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 10:16:21.111338763 +0000 UTC m=+1152.810312933" watchObservedRunningTime="2025-11-28 10:16:21.122558933 +0000 UTC m=+1152.821533123" Nov 28 10:16:21 crc kubenswrapper[4838]: I1128 10:16:21.278193 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-z98d5"] Nov 28 10:16:21 crc kubenswrapper[4838]: W1128 10:16:21.312843 4838 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4c451dac_becb_4d7e_ae6b_4ee9864113b6.slice/crio-ef067562e02f9f26ec8b74fb9a067f9dab0caa780f9f8d41012f0ace374fa021 WatchSource:0}: Error finding container ef067562e02f9f26ec8b74fb9a067f9dab0caa780f9f8d41012f0ace374fa021: Status 404 returned error can't find the container with id ef067562e02f9f26ec8b74fb9a067f9dab0caa780f9f8d41012f0ace374fa021 Nov 28 10:16:21 crc kubenswrapper[4838]: W1128 10:16:21.358082 4838 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8a69eff2_209f_4644_95ac_d9490f525525.slice/crio-1e71abd9dbfd814e683b711121af75140045dcaab738a284e54d4e691ae70f0f WatchSource:0}: Error finding container 1e71abd9dbfd814e683b711121af75140045dcaab738a284e54d4e691ae70f0f: Status 404 returned error can't find the container with id 1e71abd9dbfd814e683b711121af75140045dcaab738a284e54d4e691ae70f0f Nov 28 10:16:21 crc kubenswrapper[4838]: I1128 10:16:21.363740 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-27vq9"] Nov 28 10:16:21 crc kubenswrapper[4838]: W1128 10:16:21.370096 4838 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod35d4d06e_5f03_40aa_9363_17421ac37e64.slice/crio-080d4dfb513a6b1196cbebdec556a77398f45a795d0b4470eb3e3301b6a1cf57 WatchSource:0}: Error finding container 080d4dfb513a6b1196cbebdec556a77398f45a795d0b4470eb3e3301b6a1cf57: Status 404 returned error can't find the container with id 080d4dfb513a6b1196cbebdec556a77398f45a795d0b4470eb3e3301b6a1cf57 Nov 28 10:16:21 crc kubenswrapper[4838]: I1128 10:16:21.375696 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-e066-account-create-update-t26s2"] Nov 28 10:16:22 crc kubenswrapper[4838]: I1128 10:16:22.115515 4838 generic.go:334] "Generic (PLEG): container finished" podID="47572f0c-e812-4be7-b4de-2a4a1045553f" containerID="1723ce3d5dc802ce733bacd1dce7082bccaa22dfdcb736551d99aa7778a25c4e" exitCode=0 Nov 28 10:16:22 crc kubenswrapper[4838]: I1128 10:16:22.115576 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-jbjbb" event={"ID":"47572f0c-e812-4be7-b4de-2a4a1045553f","Type":"ContainerDied","Data":"1723ce3d5dc802ce733bacd1dce7082bccaa22dfdcb736551d99aa7778a25c4e"} Nov 28 10:16:22 crc kubenswrapper[4838]: I1128 10:16:22.117859 4838 generic.go:334] "Generic (PLEG): container finished" podID="da642c1a-1fea-497a-85af-f966ce5ddaad" containerID="92361b6868108e2b152342a4687d4a9e4a553b690331375fdf73138aa8897d11" exitCode=0 Nov 28 10:16:22 crc kubenswrapper[4838]: I1128 10:16:22.117914 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-f31a-account-create-update-nk65p" event={"ID":"da642c1a-1fea-497a-85af-f966ce5ddaad","Type":"ContainerDied","Data":"92361b6868108e2b152342a4687d4a9e4a553b690331375fdf73138aa8897d11"} Nov 28 10:16:22 crc kubenswrapper[4838]: I1128 10:16:22.119423 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-z98d5" event={"ID":"4c451dac-becb-4d7e-ae6b-4ee9864113b6","Type":"ContainerStarted","Data":"ef067562e02f9f26ec8b74fb9a067f9dab0caa780f9f8d41012f0ace374fa021"} Nov 28 10:16:22 crc kubenswrapper[4838]: I1128 10:16:22.121822 4838 generic.go:334] "Generic (PLEG): container finished" podID="35d4d06e-5f03-40aa-9363-17421ac37e64" containerID="6bc60708c6d9af83c454a275a74a4e0638bf9b4d6ea4add004e86546538258cb" exitCode=0 Nov 28 10:16:22 crc kubenswrapper[4838]: I1128 10:16:22.121920 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-e066-account-create-update-t26s2" event={"ID":"35d4d06e-5f03-40aa-9363-17421ac37e64","Type":"ContainerDied","Data":"6bc60708c6d9af83c454a275a74a4e0638bf9b4d6ea4add004e86546538258cb"} Nov 28 10:16:22 crc kubenswrapper[4838]: I1128 10:16:22.121943 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-e066-account-create-update-t26s2" event={"ID":"35d4d06e-5f03-40aa-9363-17421ac37e64","Type":"ContainerStarted","Data":"080d4dfb513a6b1196cbebdec556a77398f45a795d0b4470eb3e3301b6a1cf57"} Nov 28 10:16:22 crc kubenswrapper[4838]: I1128 10:16:22.123375 4838 generic.go:334] "Generic (PLEG): container finished" podID="43c88e97-d64f-4155-bd4a-691c588527b2" containerID="d67a5083bff388d76a707e1d845b786425d0ef8b1e67f6174ffb4130fc1239e8" exitCode=0 Nov 28 10:16:22 crc kubenswrapper[4838]: I1128 10:16:22.123424 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-pnr4n" event={"ID":"43c88e97-d64f-4155-bd4a-691c588527b2","Type":"ContainerDied","Data":"d67a5083bff388d76a707e1d845b786425d0ef8b1e67f6174ffb4130fc1239e8"} Nov 28 10:16:22 crc kubenswrapper[4838]: I1128 10:16:22.124886 4838 generic.go:334] "Generic (PLEG): container finished" podID="44f940a0-f05c-4d6a-b5d3-310bec612088" containerID="fca4003b91f3c167ba46164180040e73f3865609d4bc8a2ca58435b309b5f535" exitCode=0 Nov 28 10:16:22 crc kubenswrapper[4838]: I1128 10:16:22.124923 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-8c24-account-create-update-grgdd" event={"ID":"44f940a0-f05c-4d6a-b5d3-310bec612088","Type":"ContainerDied","Data":"fca4003b91f3c167ba46164180040e73f3865609d4bc8a2ca58435b309b5f535"} Nov 28 10:16:22 crc kubenswrapper[4838]: I1128 10:16:22.126277 4838 generic.go:334] "Generic (PLEG): container finished" podID="8a69eff2-209f-4644-95ac-d9490f525525" containerID="5c2d62fce023cec20e9f4c9a6dee170efd85240c407b7988a7e57868731435b2" exitCode=0 Nov 28 10:16:22 crc kubenswrapper[4838]: I1128 10:16:22.126308 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-27vq9" event={"ID":"8a69eff2-209f-4644-95ac-d9490f525525","Type":"ContainerDied","Data":"5c2d62fce023cec20e9f4c9a6dee170efd85240c407b7988a7e57868731435b2"} Nov 28 10:16:22 crc kubenswrapper[4838]: I1128 10:16:22.126325 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-27vq9" event={"ID":"8a69eff2-209f-4644-95ac-d9490f525525","Type":"ContainerStarted","Data":"1e71abd9dbfd814e683b711121af75140045dcaab738a284e54d4e691ae70f0f"} Nov 28 10:16:22 crc kubenswrapper[4838]: I1128 10:16:22.935058 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-54f9b7b8d9-kxcgn" Nov 28 10:16:23 crc kubenswrapper[4838]: I1128 10:16:23.012739 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-86db49b7ff-r85mc"] Nov 28 10:16:23 crc kubenswrapper[4838]: I1128 10:16:23.013168 4838 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-86db49b7ff-r85mc" podUID="db94f019-727d-48df-a297-7007e4133cf6" containerName="dnsmasq-dns" containerID="cri-o://11d211432847e858379127214f174f46122922ea4f842948fdc12c095cb853b6" gracePeriod=10 Nov 28 10:16:23 crc kubenswrapper[4838]: I1128 10:16:23.137451 4838 generic.go:334] "Generic (PLEG): container finished" podID="db94f019-727d-48df-a297-7007e4133cf6" containerID="11d211432847e858379127214f174f46122922ea4f842948fdc12c095cb853b6" exitCode=0 Nov 28 10:16:23 crc kubenswrapper[4838]: I1128 10:16:23.137571 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-86db49b7ff-r85mc" event={"ID":"db94f019-727d-48df-a297-7007e4133cf6","Type":"ContainerDied","Data":"11d211432847e858379127214f174f46122922ea4f842948fdc12c095cb853b6"} Nov 28 10:16:23 crc kubenswrapper[4838]: I1128 10:16:23.519778 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-pnr4n" Nov 28 10:16:23 crc kubenswrapper[4838]: I1128 10:16:23.621338 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/43c88e97-d64f-4155-bd4a-691c588527b2-operator-scripts\") pod \"43c88e97-d64f-4155-bd4a-691c588527b2\" (UID: \"43c88e97-d64f-4155-bd4a-691c588527b2\") " Nov 28 10:16:23 crc kubenswrapper[4838]: I1128 10:16:23.622554 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43c88e97-d64f-4155-bd4a-691c588527b2-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "43c88e97-d64f-4155-bd4a-691c588527b2" (UID: "43c88e97-d64f-4155-bd4a-691c588527b2"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 10:16:23 crc kubenswrapper[4838]: I1128 10:16:23.622631 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rmq8c\" (UniqueName: \"kubernetes.io/projected/43c88e97-d64f-4155-bd4a-691c588527b2-kube-api-access-rmq8c\") pod \"43c88e97-d64f-4155-bd4a-691c588527b2\" (UID: \"43c88e97-d64f-4155-bd4a-691c588527b2\") " Nov 28 10:16:23 crc kubenswrapper[4838]: I1128 10:16:23.627131 4838 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/43c88e97-d64f-4155-bd4a-691c588527b2-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 10:16:23 crc kubenswrapper[4838]: I1128 10:16:23.635010 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/43c88e97-d64f-4155-bd4a-691c588527b2-kube-api-access-rmq8c" (OuterVolumeSpecName: "kube-api-access-rmq8c") pod "43c88e97-d64f-4155-bd4a-691c588527b2" (UID: "43c88e97-d64f-4155-bd4a-691c588527b2"). InnerVolumeSpecName "kube-api-access-rmq8c". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:16:23 crc kubenswrapper[4838]: I1128 10:16:23.729038 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rmq8c\" (UniqueName: \"kubernetes.io/projected/43c88e97-d64f-4155-bd4a-691c588527b2-kube-api-access-rmq8c\") on node \"crc\" DevicePath \"\"" Nov 28 10:16:23 crc kubenswrapper[4838]: I1128 10:16:23.740472 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-27vq9" Nov 28 10:16:23 crc kubenswrapper[4838]: I1128 10:16:23.747754 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-jbjbb" Nov 28 10:16:23 crc kubenswrapper[4838]: I1128 10:16:23.761101 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-86db49b7ff-r85mc" Nov 28 10:16:23 crc kubenswrapper[4838]: I1128 10:16:23.774989 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-e066-account-create-update-t26s2" Nov 28 10:16:23 crc kubenswrapper[4838]: I1128 10:16:23.782871 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-8c24-account-create-update-grgdd" Nov 28 10:16:23 crc kubenswrapper[4838]: I1128 10:16:23.788951 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-f31a-account-create-update-nk65p" Nov 28 10:16:23 crc kubenswrapper[4838]: I1128 10:16:23.830557 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/db94f019-727d-48df-a297-7007e4133cf6-ovsdbserver-nb\") pod \"db94f019-727d-48df-a297-7007e4133cf6\" (UID: \"db94f019-727d-48df-a297-7007e4133cf6\") " Nov 28 10:16:23 crc kubenswrapper[4838]: I1128 10:16:23.830618 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/35d4d06e-5f03-40aa-9363-17421ac37e64-operator-scripts\") pod \"35d4d06e-5f03-40aa-9363-17421ac37e64\" (UID: \"35d4d06e-5f03-40aa-9363-17421ac37e64\") " Nov 28 10:16:23 crc kubenswrapper[4838]: I1128 10:16:23.830649 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/db94f019-727d-48df-a297-7007e4133cf6-dns-svc\") pod \"db94f019-727d-48df-a297-7007e4133cf6\" (UID: \"db94f019-727d-48df-a297-7007e4133cf6\") " Nov 28 10:16:23 crc kubenswrapper[4838]: I1128 10:16:23.830679 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/db94f019-727d-48df-a297-7007e4133cf6-config\") pod \"db94f019-727d-48df-a297-7007e4133cf6\" (UID: \"db94f019-727d-48df-a297-7007e4133cf6\") " Nov 28 10:16:23 crc kubenswrapper[4838]: I1128 10:16:23.830767 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fzbr5\" (UniqueName: \"kubernetes.io/projected/8a69eff2-209f-4644-95ac-d9490f525525-kube-api-access-fzbr5\") pod \"8a69eff2-209f-4644-95ac-d9490f525525\" (UID: \"8a69eff2-209f-4644-95ac-d9490f525525\") " Nov 28 10:16:23 crc kubenswrapper[4838]: I1128 10:16:23.830787 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/47572f0c-e812-4be7-b4de-2a4a1045553f-operator-scripts\") pod \"47572f0c-e812-4be7-b4de-2a4a1045553f\" (UID: \"47572f0c-e812-4be7-b4de-2a4a1045553f\") " Nov 28 10:16:23 crc kubenswrapper[4838]: I1128 10:16:23.830828 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9pxb4\" (UniqueName: \"kubernetes.io/projected/db94f019-727d-48df-a297-7007e4133cf6-kube-api-access-9pxb4\") pod \"db94f019-727d-48df-a297-7007e4133cf6\" (UID: \"db94f019-727d-48df-a297-7007e4133cf6\") " Nov 28 10:16:23 crc kubenswrapper[4838]: I1128 10:16:23.830902 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4bljh\" (UniqueName: \"kubernetes.io/projected/47572f0c-e812-4be7-b4de-2a4a1045553f-kube-api-access-4bljh\") pod \"47572f0c-e812-4be7-b4de-2a4a1045553f\" (UID: \"47572f0c-e812-4be7-b4de-2a4a1045553f\") " Nov 28 10:16:23 crc kubenswrapper[4838]: I1128 10:16:23.830927 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2xkrd\" (UniqueName: \"kubernetes.io/projected/35d4d06e-5f03-40aa-9363-17421ac37e64-kube-api-access-2xkrd\") pod \"35d4d06e-5f03-40aa-9363-17421ac37e64\" (UID: \"35d4d06e-5f03-40aa-9363-17421ac37e64\") " Nov 28 10:16:23 crc kubenswrapper[4838]: I1128 10:16:23.830970 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8a69eff2-209f-4644-95ac-d9490f525525-operator-scripts\") pod \"8a69eff2-209f-4644-95ac-d9490f525525\" (UID: \"8a69eff2-209f-4644-95ac-d9490f525525\") " Nov 28 10:16:23 crc kubenswrapper[4838]: I1128 10:16:23.830985 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/db94f019-727d-48df-a297-7007e4133cf6-ovsdbserver-sb\") pod \"db94f019-727d-48df-a297-7007e4133cf6\" (UID: \"db94f019-727d-48df-a297-7007e4133cf6\") " Nov 28 10:16:23 crc kubenswrapper[4838]: I1128 10:16:23.834310 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/35d4d06e-5f03-40aa-9363-17421ac37e64-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "35d4d06e-5f03-40aa-9363-17421ac37e64" (UID: "35d4d06e-5f03-40aa-9363-17421ac37e64"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 10:16:23 crc kubenswrapper[4838]: I1128 10:16:23.834649 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/47572f0c-e812-4be7-b4de-2a4a1045553f-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "47572f0c-e812-4be7-b4de-2a4a1045553f" (UID: "47572f0c-e812-4be7-b4de-2a4a1045553f"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 10:16:23 crc kubenswrapper[4838]: I1128 10:16:23.845271 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8a69eff2-209f-4644-95ac-d9490f525525-kube-api-access-fzbr5" (OuterVolumeSpecName: "kube-api-access-fzbr5") pod "8a69eff2-209f-4644-95ac-d9490f525525" (UID: "8a69eff2-209f-4644-95ac-d9490f525525"). InnerVolumeSpecName "kube-api-access-fzbr5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:16:23 crc kubenswrapper[4838]: I1128 10:16:23.845803 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8a69eff2-209f-4644-95ac-d9490f525525-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "8a69eff2-209f-4644-95ac-d9490f525525" (UID: "8a69eff2-209f-4644-95ac-d9490f525525"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 10:16:23 crc kubenswrapper[4838]: I1128 10:16:23.846818 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/35d4d06e-5f03-40aa-9363-17421ac37e64-kube-api-access-2xkrd" (OuterVolumeSpecName: "kube-api-access-2xkrd") pod "35d4d06e-5f03-40aa-9363-17421ac37e64" (UID: "35d4d06e-5f03-40aa-9363-17421ac37e64"). InnerVolumeSpecName "kube-api-access-2xkrd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:16:23 crc kubenswrapper[4838]: I1128 10:16:23.849886 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/47572f0c-e812-4be7-b4de-2a4a1045553f-kube-api-access-4bljh" (OuterVolumeSpecName: "kube-api-access-4bljh") pod "47572f0c-e812-4be7-b4de-2a4a1045553f" (UID: "47572f0c-e812-4be7-b4de-2a4a1045553f"). InnerVolumeSpecName "kube-api-access-4bljh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:16:23 crc kubenswrapper[4838]: I1128 10:16:23.855847 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/db94f019-727d-48df-a297-7007e4133cf6-kube-api-access-9pxb4" (OuterVolumeSpecName: "kube-api-access-9pxb4") pod "db94f019-727d-48df-a297-7007e4133cf6" (UID: "db94f019-727d-48df-a297-7007e4133cf6"). InnerVolumeSpecName "kube-api-access-9pxb4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:16:23 crc kubenswrapper[4838]: I1128 10:16:23.878532 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/db94f019-727d-48df-a297-7007e4133cf6-config" (OuterVolumeSpecName: "config") pod "db94f019-727d-48df-a297-7007e4133cf6" (UID: "db94f019-727d-48df-a297-7007e4133cf6"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 10:16:23 crc kubenswrapper[4838]: I1128 10:16:23.883258 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/db94f019-727d-48df-a297-7007e4133cf6-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "db94f019-727d-48df-a297-7007e4133cf6" (UID: "db94f019-727d-48df-a297-7007e4133cf6"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 10:16:23 crc kubenswrapper[4838]: I1128 10:16:23.886880 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/db94f019-727d-48df-a297-7007e4133cf6-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "db94f019-727d-48df-a297-7007e4133cf6" (UID: "db94f019-727d-48df-a297-7007e4133cf6"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 10:16:23 crc kubenswrapper[4838]: I1128 10:16:23.895532 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/db94f019-727d-48df-a297-7007e4133cf6-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "db94f019-727d-48df-a297-7007e4133cf6" (UID: "db94f019-727d-48df-a297-7007e4133cf6"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 10:16:23 crc kubenswrapper[4838]: I1128 10:16:23.932347 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q7q7c\" (UniqueName: \"kubernetes.io/projected/da642c1a-1fea-497a-85af-f966ce5ddaad-kube-api-access-q7q7c\") pod \"da642c1a-1fea-497a-85af-f966ce5ddaad\" (UID: \"da642c1a-1fea-497a-85af-f966ce5ddaad\") " Nov 28 10:16:23 crc kubenswrapper[4838]: I1128 10:16:23.932517 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/da642c1a-1fea-497a-85af-f966ce5ddaad-operator-scripts\") pod \"da642c1a-1fea-497a-85af-f966ce5ddaad\" (UID: \"da642c1a-1fea-497a-85af-f966ce5ddaad\") " Nov 28 10:16:23 crc kubenswrapper[4838]: I1128 10:16:23.932573 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/44f940a0-f05c-4d6a-b5d3-310bec612088-operator-scripts\") pod \"44f940a0-f05c-4d6a-b5d3-310bec612088\" (UID: \"44f940a0-f05c-4d6a-b5d3-310bec612088\") " Nov 28 10:16:23 crc kubenswrapper[4838]: I1128 10:16:23.932672 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sj46w\" (UniqueName: \"kubernetes.io/projected/44f940a0-f05c-4d6a-b5d3-310bec612088-kube-api-access-sj46w\") pod \"44f940a0-f05c-4d6a-b5d3-310bec612088\" (UID: \"44f940a0-f05c-4d6a-b5d3-310bec612088\") " Nov 28 10:16:23 crc kubenswrapper[4838]: I1128 10:16:23.933178 4838 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/db94f019-727d-48df-a297-7007e4133cf6-config\") on node \"crc\" DevicePath \"\"" Nov 28 10:16:23 crc kubenswrapper[4838]: I1128 10:16:23.933198 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fzbr5\" (UniqueName: \"kubernetes.io/projected/8a69eff2-209f-4644-95ac-d9490f525525-kube-api-access-fzbr5\") on node \"crc\" DevicePath \"\"" Nov 28 10:16:23 crc kubenswrapper[4838]: I1128 10:16:23.933210 4838 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/47572f0c-e812-4be7-b4de-2a4a1045553f-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 10:16:23 crc kubenswrapper[4838]: I1128 10:16:23.933220 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9pxb4\" (UniqueName: \"kubernetes.io/projected/db94f019-727d-48df-a297-7007e4133cf6-kube-api-access-9pxb4\") on node \"crc\" DevicePath \"\"" Nov 28 10:16:23 crc kubenswrapper[4838]: I1128 10:16:23.933230 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4bljh\" (UniqueName: \"kubernetes.io/projected/47572f0c-e812-4be7-b4de-2a4a1045553f-kube-api-access-4bljh\") on node \"crc\" DevicePath \"\"" Nov 28 10:16:23 crc kubenswrapper[4838]: I1128 10:16:23.933239 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2xkrd\" (UniqueName: \"kubernetes.io/projected/35d4d06e-5f03-40aa-9363-17421ac37e64-kube-api-access-2xkrd\") on node \"crc\" DevicePath \"\"" Nov 28 10:16:23 crc kubenswrapper[4838]: I1128 10:16:23.933248 4838 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8a69eff2-209f-4644-95ac-d9490f525525-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 10:16:23 crc kubenswrapper[4838]: I1128 10:16:23.933257 4838 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/db94f019-727d-48df-a297-7007e4133cf6-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 28 10:16:23 crc kubenswrapper[4838]: I1128 10:16:23.933268 4838 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/db94f019-727d-48df-a297-7007e4133cf6-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 28 10:16:23 crc kubenswrapper[4838]: I1128 10:16:23.933279 4838 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/35d4d06e-5f03-40aa-9363-17421ac37e64-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 10:16:23 crc kubenswrapper[4838]: I1128 10:16:23.933307 4838 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/db94f019-727d-48df-a297-7007e4133cf6-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 10:16:23 crc kubenswrapper[4838]: I1128 10:16:23.933416 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/da642c1a-1fea-497a-85af-f966ce5ddaad-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "da642c1a-1fea-497a-85af-f966ce5ddaad" (UID: "da642c1a-1fea-497a-85af-f966ce5ddaad"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 10:16:23 crc kubenswrapper[4838]: I1128 10:16:23.933528 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/44f940a0-f05c-4d6a-b5d3-310bec612088-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "44f940a0-f05c-4d6a-b5d3-310bec612088" (UID: "44f940a0-f05c-4d6a-b5d3-310bec612088"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 10:16:23 crc kubenswrapper[4838]: I1128 10:16:23.935913 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/da642c1a-1fea-497a-85af-f966ce5ddaad-kube-api-access-q7q7c" (OuterVolumeSpecName: "kube-api-access-q7q7c") pod "da642c1a-1fea-497a-85af-f966ce5ddaad" (UID: "da642c1a-1fea-497a-85af-f966ce5ddaad"). InnerVolumeSpecName "kube-api-access-q7q7c". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:16:23 crc kubenswrapper[4838]: I1128 10:16:23.936329 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/44f940a0-f05c-4d6a-b5d3-310bec612088-kube-api-access-sj46w" (OuterVolumeSpecName: "kube-api-access-sj46w") pod "44f940a0-f05c-4d6a-b5d3-310bec612088" (UID: "44f940a0-f05c-4d6a-b5d3-310bec612088"). InnerVolumeSpecName "kube-api-access-sj46w". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:16:24 crc kubenswrapper[4838]: I1128 10:16:24.034644 4838 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/da642c1a-1fea-497a-85af-f966ce5ddaad-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 10:16:24 crc kubenswrapper[4838]: I1128 10:16:24.034681 4838 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/44f940a0-f05c-4d6a-b5d3-310bec612088-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 10:16:24 crc kubenswrapper[4838]: I1128 10:16:24.034693 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sj46w\" (UniqueName: \"kubernetes.io/projected/44f940a0-f05c-4d6a-b5d3-310bec612088-kube-api-access-sj46w\") on node \"crc\" DevicePath \"\"" Nov 28 10:16:24 crc kubenswrapper[4838]: I1128 10:16:24.034707 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q7q7c\" (UniqueName: \"kubernetes.io/projected/da642c1a-1fea-497a-85af-f966ce5ddaad-kube-api-access-q7q7c\") on node \"crc\" DevicePath \"\"" Nov 28 10:16:24 crc kubenswrapper[4838]: I1128 10:16:24.161040 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-86db49b7ff-r85mc" event={"ID":"db94f019-727d-48df-a297-7007e4133cf6","Type":"ContainerDied","Data":"4412dd745cc5c5431ceb7a65635d5c2f5f237cff3e6cf654298847dd72b2f6b8"} Nov 28 10:16:24 crc kubenswrapper[4838]: I1128 10:16:24.161450 4838 scope.go:117] "RemoveContainer" containerID="11d211432847e858379127214f174f46122922ea4f842948fdc12c095cb853b6" Nov 28 10:16:24 crc kubenswrapper[4838]: I1128 10:16:24.161072 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-86db49b7ff-r85mc" Nov 28 10:16:24 crc kubenswrapper[4838]: I1128 10:16:24.163773 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-27vq9" event={"ID":"8a69eff2-209f-4644-95ac-d9490f525525","Type":"ContainerDied","Data":"1e71abd9dbfd814e683b711121af75140045dcaab738a284e54d4e691ae70f0f"} Nov 28 10:16:24 crc kubenswrapper[4838]: I1128 10:16:24.163814 4838 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1e71abd9dbfd814e683b711121af75140045dcaab738a284e54d4e691ae70f0f" Nov 28 10:16:24 crc kubenswrapper[4838]: I1128 10:16:24.163869 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-27vq9" Nov 28 10:16:24 crc kubenswrapper[4838]: I1128 10:16:24.165410 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-jbjbb" event={"ID":"47572f0c-e812-4be7-b4de-2a4a1045553f","Type":"ContainerDied","Data":"6c03ed8a43193cc28d3275c5f31aced51a2664771af3b3462a5e8a839d34f755"} Nov 28 10:16:24 crc kubenswrapper[4838]: I1128 10:16:24.165435 4838 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6c03ed8a43193cc28d3275c5f31aced51a2664771af3b3462a5e8a839d34f755" Nov 28 10:16:24 crc kubenswrapper[4838]: I1128 10:16:24.165488 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-jbjbb" Nov 28 10:16:24 crc kubenswrapper[4838]: I1128 10:16:24.168004 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-f31a-account-create-update-nk65p" event={"ID":"da642c1a-1fea-497a-85af-f966ce5ddaad","Type":"ContainerDied","Data":"8a9f1cd248763ea6513575f5d9f006938d043092bd8a26f823f547230764cda0"} Nov 28 10:16:24 crc kubenswrapper[4838]: I1128 10:16:24.168872 4838 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8a9f1cd248763ea6513575f5d9f006938d043092bd8a26f823f547230764cda0" Nov 28 10:16:24 crc kubenswrapper[4838]: I1128 10:16:24.169178 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-f31a-account-create-update-nk65p" Nov 28 10:16:24 crc kubenswrapper[4838]: I1128 10:16:24.180599 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-e066-account-create-update-t26s2" event={"ID":"35d4d06e-5f03-40aa-9363-17421ac37e64","Type":"ContainerDied","Data":"080d4dfb513a6b1196cbebdec556a77398f45a795d0b4470eb3e3301b6a1cf57"} Nov 28 10:16:24 crc kubenswrapper[4838]: I1128 10:16:24.180639 4838 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="080d4dfb513a6b1196cbebdec556a77398f45a795d0b4470eb3e3301b6a1cf57" Nov 28 10:16:24 crc kubenswrapper[4838]: I1128 10:16:24.180733 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-e066-account-create-update-t26s2" Nov 28 10:16:24 crc kubenswrapper[4838]: I1128 10:16:24.183068 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-pnr4n" event={"ID":"43c88e97-d64f-4155-bd4a-691c588527b2","Type":"ContainerDied","Data":"42e35d8b7a41bed93a97928c21e791a87d1ecea9688fb7647beb714d8211e87e"} Nov 28 10:16:24 crc kubenswrapper[4838]: I1128 10:16:24.183097 4838 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="42e35d8b7a41bed93a97928c21e791a87d1ecea9688fb7647beb714d8211e87e" Nov 28 10:16:24 crc kubenswrapper[4838]: I1128 10:16:24.183140 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-pnr4n" Nov 28 10:16:24 crc kubenswrapper[4838]: I1128 10:16:24.191828 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-8c24-account-create-update-grgdd" event={"ID":"44f940a0-f05c-4d6a-b5d3-310bec612088","Type":"ContainerDied","Data":"005e5ff469926829b4d206267eb808b41de018d601aa590dedef577c3374aea1"} Nov 28 10:16:24 crc kubenswrapper[4838]: I1128 10:16:24.191877 4838 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="005e5ff469926829b4d206267eb808b41de018d601aa590dedef577c3374aea1" Nov 28 10:16:24 crc kubenswrapper[4838]: I1128 10:16:24.191941 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-8c24-account-create-update-grgdd" Nov 28 10:16:24 crc kubenswrapper[4838]: I1128 10:16:24.241645 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-86db49b7ff-r85mc"] Nov 28 10:16:24 crc kubenswrapper[4838]: I1128 10:16:24.247921 4838 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-86db49b7ff-r85mc"] Nov 28 10:16:24 crc kubenswrapper[4838]: I1128 10:16:24.581678 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="db94f019-727d-48df-a297-7007e4133cf6" path="/var/lib/kubelet/pods/db94f019-727d-48df-a297-7007e4133cf6/volumes" Nov 28 10:16:26 crc kubenswrapper[4838]: I1128 10:16:26.173014 4838 scope.go:117] "RemoveContainer" containerID="f3d207828f4e088114c9b3e3082ff4d147b1a1c27cdac5ba1fb1078f696c631a" Nov 28 10:16:27 crc kubenswrapper[4838]: I1128 10:16:27.231524 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-z98d5" event={"ID":"4c451dac-becb-4d7e-ae6b-4ee9864113b6","Type":"ContainerStarted","Data":"2e70d115c83ef7518fb3d01d81b48bb4b8aa260315d3a261933d60e58503faed"} Nov 28 10:16:27 crc kubenswrapper[4838]: I1128 10:16:27.259453 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-db-sync-z98d5" podStartSLOduration=2.311715907 podStartE2EDuration="7.259428181s" podCreationTimestamp="2025-11-28 10:16:20 +0000 UTC" firstStartedPulling="2025-11-28 10:16:21.315809965 +0000 UTC m=+1153.014784135" lastFinishedPulling="2025-11-28 10:16:26.263522209 +0000 UTC m=+1157.962496409" observedRunningTime="2025-11-28 10:16:27.257792758 +0000 UTC m=+1158.956766958" watchObservedRunningTime="2025-11-28 10:16:27.259428181 +0000 UTC m=+1158.958402391" Nov 28 10:16:30 crc kubenswrapper[4838]: I1128 10:16:30.272561 4838 generic.go:334] "Generic (PLEG): container finished" podID="4c451dac-becb-4d7e-ae6b-4ee9864113b6" containerID="2e70d115c83ef7518fb3d01d81b48bb4b8aa260315d3a261933d60e58503faed" exitCode=0 Nov 28 10:16:30 crc kubenswrapper[4838]: I1128 10:16:30.272684 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-z98d5" event={"ID":"4c451dac-becb-4d7e-ae6b-4ee9864113b6","Type":"ContainerDied","Data":"2e70d115c83ef7518fb3d01d81b48bb4b8aa260315d3a261933d60e58503faed"} Nov 28 10:16:31 crc kubenswrapper[4838]: I1128 10:16:31.713166 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-z98d5" Nov 28 10:16:31 crc kubenswrapper[4838]: I1128 10:16:31.787949 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gvd89\" (UniqueName: \"kubernetes.io/projected/4c451dac-becb-4d7e-ae6b-4ee9864113b6-kube-api-access-gvd89\") pod \"4c451dac-becb-4d7e-ae6b-4ee9864113b6\" (UID: \"4c451dac-becb-4d7e-ae6b-4ee9864113b6\") " Nov 28 10:16:31 crc kubenswrapper[4838]: I1128 10:16:31.788112 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4c451dac-becb-4d7e-ae6b-4ee9864113b6-config-data\") pod \"4c451dac-becb-4d7e-ae6b-4ee9864113b6\" (UID: \"4c451dac-becb-4d7e-ae6b-4ee9864113b6\") " Nov 28 10:16:31 crc kubenswrapper[4838]: I1128 10:16:31.788144 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4c451dac-becb-4d7e-ae6b-4ee9864113b6-combined-ca-bundle\") pod \"4c451dac-becb-4d7e-ae6b-4ee9864113b6\" (UID: \"4c451dac-becb-4d7e-ae6b-4ee9864113b6\") " Nov 28 10:16:31 crc kubenswrapper[4838]: I1128 10:16:31.804135 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4c451dac-becb-4d7e-ae6b-4ee9864113b6-kube-api-access-gvd89" (OuterVolumeSpecName: "kube-api-access-gvd89") pod "4c451dac-becb-4d7e-ae6b-4ee9864113b6" (UID: "4c451dac-becb-4d7e-ae6b-4ee9864113b6"). InnerVolumeSpecName "kube-api-access-gvd89". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:16:31 crc kubenswrapper[4838]: I1128 10:16:31.816949 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4c451dac-becb-4d7e-ae6b-4ee9864113b6-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "4c451dac-becb-4d7e-ae6b-4ee9864113b6" (UID: "4c451dac-becb-4d7e-ae6b-4ee9864113b6"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:16:31 crc kubenswrapper[4838]: I1128 10:16:31.838812 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4c451dac-becb-4d7e-ae6b-4ee9864113b6-config-data" (OuterVolumeSpecName: "config-data") pod "4c451dac-becb-4d7e-ae6b-4ee9864113b6" (UID: "4c451dac-becb-4d7e-ae6b-4ee9864113b6"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:16:31 crc kubenswrapper[4838]: I1128 10:16:31.890404 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gvd89\" (UniqueName: \"kubernetes.io/projected/4c451dac-becb-4d7e-ae6b-4ee9864113b6-kube-api-access-gvd89\") on node \"crc\" DevicePath \"\"" Nov 28 10:16:31 crc kubenswrapper[4838]: I1128 10:16:31.890443 4838 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4c451dac-becb-4d7e-ae6b-4ee9864113b6-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 10:16:31 crc kubenswrapper[4838]: I1128 10:16:31.890457 4838 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4c451dac-becb-4d7e-ae6b-4ee9864113b6-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 10:16:32 crc kubenswrapper[4838]: I1128 10:16:32.310494 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-z98d5" event={"ID":"4c451dac-becb-4d7e-ae6b-4ee9864113b6","Type":"ContainerDied","Data":"ef067562e02f9f26ec8b74fb9a067f9dab0caa780f9f8d41012f0ace374fa021"} Nov 28 10:16:32 crc kubenswrapper[4838]: I1128 10:16:32.310550 4838 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ef067562e02f9f26ec8b74fb9a067f9dab0caa780f9f8d41012f0ace374fa021" Nov 28 10:16:32 crc kubenswrapper[4838]: I1128 10:16:32.310639 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-z98d5" Nov 28 10:16:32 crc kubenswrapper[4838]: I1128 10:16:32.610443 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6546db6db7-wstkp"] Nov 28 10:16:32 crc kubenswrapper[4838]: E1128 10:16:32.610799 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="db94f019-727d-48df-a297-7007e4133cf6" containerName="dnsmasq-dns" Nov 28 10:16:32 crc kubenswrapper[4838]: I1128 10:16:32.610810 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="db94f019-727d-48df-a297-7007e4133cf6" containerName="dnsmasq-dns" Nov 28 10:16:32 crc kubenswrapper[4838]: E1128 10:16:32.610823 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="47572f0c-e812-4be7-b4de-2a4a1045553f" containerName="mariadb-database-create" Nov 28 10:16:32 crc kubenswrapper[4838]: I1128 10:16:32.610829 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="47572f0c-e812-4be7-b4de-2a4a1045553f" containerName="mariadb-database-create" Nov 28 10:16:32 crc kubenswrapper[4838]: E1128 10:16:32.610848 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4c451dac-becb-4d7e-ae6b-4ee9864113b6" containerName="keystone-db-sync" Nov 28 10:16:32 crc kubenswrapper[4838]: I1128 10:16:32.610854 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="4c451dac-becb-4d7e-ae6b-4ee9864113b6" containerName="keystone-db-sync" Nov 28 10:16:32 crc kubenswrapper[4838]: E1128 10:16:32.610865 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="da642c1a-1fea-497a-85af-f966ce5ddaad" containerName="mariadb-account-create-update" Nov 28 10:16:32 crc kubenswrapper[4838]: I1128 10:16:32.610873 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="da642c1a-1fea-497a-85af-f966ce5ddaad" containerName="mariadb-account-create-update" Nov 28 10:16:32 crc kubenswrapper[4838]: E1128 10:16:32.610885 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="db94f019-727d-48df-a297-7007e4133cf6" containerName="init" Nov 28 10:16:32 crc kubenswrapper[4838]: I1128 10:16:32.610892 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="db94f019-727d-48df-a297-7007e4133cf6" containerName="init" Nov 28 10:16:32 crc kubenswrapper[4838]: E1128 10:16:32.610903 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="35d4d06e-5f03-40aa-9363-17421ac37e64" containerName="mariadb-account-create-update" Nov 28 10:16:32 crc kubenswrapper[4838]: I1128 10:16:32.610909 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="35d4d06e-5f03-40aa-9363-17421ac37e64" containerName="mariadb-account-create-update" Nov 28 10:16:32 crc kubenswrapper[4838]: E1128 10:16:32.610919 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8a69eff2-209f-4644-95ac-d9490f525525" containerName="mariadb-database-create" Nov 28 10:16:32 crc kubenswrapper[4838]: I1128 10:16:32.610925 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="8a69eff2-209f-4644-95ac-d9490f525525" containerName="mariadb-database-create" Nov 28 10:16:32 crc kubenswrapper[4838]: E1128 10:16:32.610932 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="44f940a0-f05c-4d6a-b5d3-310bec612088" containerName="mariadb-account-create-update" Nov 28 10:16:32 crc kubenswrapper[4838]: I1128 10:16:32.610938 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="44f940a0-f05c-4d6a-b5d3-310bec612088" containerName="mariadb-account-create-update" Nov 28 10:16:32 crc kubenswrapper[4838]: E1128 10:16:32.610950 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="43c88e97-d64f-4155-bd4a-691c588527b2" containerName="mariadb-database-create" Nov 28 10:16:32 crc kubenswrapper[4838]: I1128 10:16:32.610956 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="43c88e97-d64f-4155-bd4a-691c588527b2" containerName="mariadb-database-create" Nov 28 10:16:32 crc kubenswrapper[4838]: I1128 10:16:32.611099 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="35d4d06e-5f03-40aa-9363-17421ac37e64" containerName="mariadb-account-create-update" Nov 28 10:16:32 crc kubenswrapper[4838]: I1128 10:16:32.611107 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="da642c1a-1fea-497a-85af-f966ce5ddaad" containerName="mariadb-account-create-update" Nov 28 10:16:32 crc kubenswrapper[4838]: I1128 10:16:32.611114 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="44f940a0-f05c-4d6a-b5d3-310bec612088" containerName="mariadb-account-create-update" Nov 28 10:16:32 crc kubenswrapper[4838]: I1128 10:16:32.611122 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="43c88e97-d64f-4155-bd4a-691c588527b2" containerName="mariadb-database-create" Nov 28 10:16:32 crc kubenswrapper[4838]: I1128 10:16:32.611135 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="8a69eff2-209f-4644-95ac-d9490f525525" containerName="mariadb-database-create" Nov 28 10:16:32 crc kubenswrapper[4838]: I1128 10:16:32.611143 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="db94f019-727d-48df-a297-7007e4133cf6" containerName="dnsmasq-dns" Nov 28 10:16:32 crc kubenswrapper[4838]: I1128 10:16:32.611151 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="47572f0c-e812-4be7-b4de-2a4a1045553f" containerName="mariadb-database-create" Nov 28 10:16:32 crc kubenswrapper[4838]: I1128 10:16:32.611162 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="4c451dac-becb-4d7e-ae6b-4ee9864113b6" containerName="keystone-db-sync" Nov 28 10:16:32 crc kubenswrapper[4838]: I1128 10:16:32.612957 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6546db6db7-wstkp" Nov 28 10:16:32 crc kubenswrapper[4838]: I1128 10:16:32.618697 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-prfd7"] Nov 28 10:16:32 crc kubenswrapper[4838]: I1128 10:16:32.619659 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-prfd7" Nov 28 10:16:32 crc kubenswrapper[4838]: I1128 10:16:32.626849 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 28 10:16:32 crc kubenswrapper[4838]: I1128 10:16:32.627225 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Nov 28 10:16:32 crc kubenswrapper[4838]: I1128 10:16:32.627416 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 28 10:16:32 crc kubenswrapper[4838]: I1128 10:16:32.627582 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-2bjp9" Nov 28 10:16:32 crc kubenswrapper[4838]: I1128 10:16:32.627771 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 28 10:16:32 crc kubenswrapper[4838]: I1128 10:16:32.647001 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6546db6db7-wstkp"] Nov 28 10:16:32 crc kubenswrapper[4838]: I1128 10:16:32.661462 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-prfd7"] Nov 28 10:16:32 crc kubenswrapper[4838]: I1128 10:16:32.709545 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9b97c7c8-0036-4f7d-a899-74a0bc1c8206-ovsdbserver-nb\") pod \"dnsmasq-dns-6546db6db7-wstkp\" (UID: \"9b97c7c8-0036-4f7d-a899-74a0bc1c8206\") " pod="openstack/dnsmasq-dns-6546db6db7-wstkp" Nov 28 10:16:32 crc kubenswrapper[4838]: I1128 10:16:32.709630 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d8ch6\" (UniqueName: \"kubernetes.io/projected/e086f615-67f3-43ba-8f8f-6c25889eb972-kube-api-access-d8ch6\") pod \"keystone-bootstrap-prfd7\" (UID: \"e086f615-67f3-43ba-8f8f-6c25889eb972\") " pod="openstack/keystone-bootstrap-prfd7" Nov 28 10:16:32 crc kubenswrapper[4838]: I1128 10:16:32.709648 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9b97c7c8-0036-4f7d-a899-74a0bc1c8206-config\") pod \"dnsmasq-dns-6546db6db7-wstkp\" (UID: \"9b97c7c8-0036-4f7d-a899-74a0bc1c8206\") " pod="openstack/dnsmasq-dns-6546db6db7-wstkp" Nov 28 10:16:32 crc kubenswrapper[4838]: I1128 10:16:32.709704 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4jh42\" (UniqueName: \"kubernetes.io/projected/9b97c7c8-0036-4f7d-a899-74a0bc1c8206-kube-api-access-4jh42\") pod \"dnsmasq-dns-6546db6db7-wstkp\" (UID: \"9b97c7c8-0036-4f7d-a899-74a0bc1c8206\") " pod="openstack/dnsmasq-dns-6546db6db7-wstkp" Nov 28 10:16:32 crc kubenswrapper[4838]: I1128 10:16:32.709740 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/e086f615-67f3-43ba-8f8f-6c25889eb972-fernet-keys\") pod \"keystone-bootstrap-prfd7\" (UID: \"e086f615-67f3-43ba-8f8f-6c25889eb972\") " pod="openstack/keystone-bootstrap-prfd7" Nov 28 10:16:32 crc kubenswrapper[4838]: I1128 10:16:32.709760 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/e086f615-67f3-43ba-8f8f-6c25889eb972-credential-keys\") pod \"keystone-bootstrap-prfd7\" (UID: \"e086f615-67f3-43ba-8f8f-6c25889eb972\") " pod="openstack/keystone-bootstrap-prfd7" Nov 28 10:16:32 crc kubenswrapper[4838]: I1128 10:16:32.709796 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e086f615-67f3-43ba-8f8f-6c25889eb972-combined-ca-bundle\") pod \"keystone-bootstrap-prfd7\" (UID: \"e086f615-67f3-43ba-8f8f-6c25889eb972\") " pod="openstack/keystone-bootstrap-prfd7" Nov 28 10:16:32 crc kubenswrapper[4838]: I1128 10:16:32.709822 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e086f615-67f3-43ba-8f8f-6c25889eb972-config-data\") pod \"keystone-bootstrap-prfd7\" (UID: \"e086f615-67f3-43ba-8f8f-6c25889eb972\") " pod="openstack/keystone-bootstrap-prfd7" Nov 28 10:16:32 crc kubenswrapper[4838]: I1128 10:16:32.709863 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9b97c7c8-0036-4f7d-a899-74a0bc1c8206-dns-svc\") pod \"dnsmasq-dns-6546db6db7-wstkp\" (UID: \"9b97c7c8-0036-4f7d-a899-74a0bc1c8206\") " pod="openstack/dnsmasq-dns-6546db6db7-wstkp" Nov 28 10:16:32 crc kubenswrapper[4838]: I1128 10:16:32.709903 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9b97c7c8-0036-4f7d-a899-74a0bc1c8206-ovsdbserver-sb\") pod \"dnsmasq-dns-6546db6db7-wstkp\" (UID: \"9b97c7c8-0036-4f7d-a899-74a0bc1c8206\") " pod="openstack/dnsmasq-dns-6546db6db7-wstkp" Nov 28 10:16:32 crc kubenswrapper[4838]: I1128 10:16:32.709940 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e086f615-67f3-43ba-8f8f-6c25889eb972-scripts\") pod \"keystone-bootstrap-prfd7\" (UID: \"e086f615-67f3-43ba-8f8f-6c25889eb972\") " pod="openstack/keystone-bootstrap-prfd7" Nov 28 10:16:32 crc kubenswrapper[4838]: I1128 10:16:32.796480 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-sync-qdl6f"] Nov 28 10:16:32 crc kubenswrapper[4838]: I1128 10:16:32.797666 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-qdl6f" Nov 28 10:16:32 crc kubenswrapper[4838]: I1128 10:16:32.805024 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Nov 28 10:16:32 crc kubenswrapper[4838]: I1128 10:16:32.805149 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-79wbw" Nov 28 10:16:32 crc kubenswrapper[4838]: I1128 10:16:32.805366 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Nov 28 10:16:32 crc kubenswrapper[4838]: I1128 10:16:32.806956 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-qdl6f"] Nov 28 10:16:32 crc kubenswrapper[4838]: I1128 10:16:32.823110 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e086f615-67f3-43ba-8f8f-6c25889eb972-scripts\") pod \"keystone-bootstrap-prfd7\" (UID: \"e086f615-67f3-43ba-8f8f-6c25889eb972\") " pod="openstack/keystone-bootstrap-prfd7" Nov 28 10:16:32 crc kubenswrapper[4838]: I1128 10:16:32.823195 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9b97c7c8-0036-4f7d-a899-74a0bc1c8206-ovsdbserver-nb\") pod \"dnsmasq-dns-6546db6db7-wstkp\" (UID: \"9b97c7c8-0036-4f7d-a899-74a0bc1c8206\") " pod="openstack/dnsmasq-dns-6546db6db7-wstkp" Nov 28 10:16:32 crc kubenswrapper[4838]: I1128 10:16:32.823269 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d8ch6\" (UniqueName: \"kubernetes.io/projected/e086f615-67f3-43ba-8f8f-6c25889eb972-kube-api-access-d8ch6\") pod \"keystone-bootstrap-prfd7\" (UID: \"e086f615-67f3-43ba-8f8f-6c25889eb972\") " pod="openstack/keystone-bootstrap-prfd7" Nov 28 10:16:32 crc kubenswrapper[4838]: I1128 10:16:32.823288 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9b97c7c8-0036-4f7d-a899-74a0bc1c8206-config\") pod \"dnsmasq-dns-6546db6db7-wstkp\" (UID: \"9b97c7c8-0036-4f7d-a899-74a0bc1c8206\") " pod="openstack/dnsmasq-dns-6546db6db7-wstkp" Nov 28 10:16:32 crc kubenswrapper[4838]: I1128 10:16:32.823337 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4jh42\" (UniqueName: \"kubernetes.io/projected/9b97c7c8-0036-4f7d-a899-74a0bc1c8206-kube-api-access-4jh42\") pod \"dnsmasq-dns-6546db6db7-wstkp\" (UID: \"9b97c7c8-0036-4f7d-a899-74a0bc1c8206\") " pod="openstack/dnsmasq-dns-6546db6db7-wstkp" Nov 28 10:16:32 crc kubenswrapper[4838]: I1128 10:16:32.823362 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/e086f615-67f3-43ba-8f8f-6c25889eb972-fernet-keys\") pod \"keystone-bootstrap-prfd7\" (UID: \"e086f615-67f3-43ba-8f8f-6c25889eb972\") " pod="openstack/keystone-bootstrap-prfd7" Nov 28 10:16:32 crc kubenswrapper[4838]: I1128 10:16:32.823391 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/e086f615-67f3-43ba-8f8f-6c25889eb972-credential-keys\") pod \"keystone-bootstrap-prfd7\" (UID: \"e086f615-67f3-43ba-8f8f-6c25889eb972\") " pod="openstack/keystone-bootstrap-prfd7" Nov 28 10:16:32 crc kubenswrapper[4838]: I1128 10:16:32.823425 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e086f615-67f3-43ba-8f8f-6c25889eb972-combined-ca-bundle\") pod \"keystone-bootstrap-prfd7\" (UID: \"e086f615-67f3-43ba-8f8f-6c25889eb972\") " pod="openstack/keystone-bootstrap-prfd7" Nov 28 10:16:32 crc kubenswrapper[4838]: I1128 10:16:32.823461 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e086f615-67f3-43ba-8f8f-6c25889eb972-config-data\") pod \"keystone-bootstrap-prfd7\" (UID: \"e086f615-67f3-43ba-8f8f-6c25889eb972\") " pod="openstack/keystone-bootstrap-prfd7" Nov 28 10:16:32 crc kubenswrapper[4838]: I1128 10:16:32.823526 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9b97c7c8-0036-4f7d-a899-74a0bc1c8206-dns-svc\") pod \"dnsmasq-dns-6546db6db7-wstkp\" (UID: \"9b97c7c8-0036-4f7d-a899-74a0bc1c8206\") " pod="openstack/dnsmasq-dns-6546db6db7-wstkp" Nov 28 10:16:32 crc kubenswrapper[4838]: I1128 10:16:32.823580 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9b97c7c8-0036-4f7d-a899-74a0bc1c8206-ovsdbserver-sb\") pod \"dnsmasq-dns-6546db6db7-wstkp\" (UID: \"9b97c7c8-0036-4f7d-a899-74a0bc1c8206\") " pod="openstack/dnsmasq-dns-6546db6db7-wstkp" Nov 28 10:16:32 crc kubenswrapper[4838]: I1128 10:16:32.824417 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9b97c7c8-0036-4f7d-a899-74a0bc1c8206-ovsdbserver-sb\") pod \"dnsmasq-dns-6546db6db7-wstkp\" (UID: \"9b97c7c8-0036-4f7d-a899-74a0bc1c8206\") " pod="openstack/dnsmasq-dns-6546db6db7-wstkp" Nov 28 10:16:32 crc kubenswrapper[4838]: I1128 10:16:32.826548 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 28 10:16:32 crc kubenswrapper[4838]: I1128 10:16:32.828479 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 10:16:32 crc kubenswrapper[4838]: I1128 10:16:32.831850 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/e086f615-67f3-43ba-8f8f-6c25889eb972-fernet-keys\") pod \"keystone-bootstrap-prfd7\" (UID: \"e086f615-67f3-43ba-8f8f-6c25889eb972\") " pod="openstack/keystone-bootstrap-prfd7" Nov 28 10:16:32 crc kubenswrapper[4838]: I1128 10:16:32.837311 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 28 10:16:32 crc kubenswrapper[4838]: I1128 10:16:32.837922 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 28 10:16:32 crc kubenswrapper[4838]: I1128 10:16:32.839218 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/e086f615-67f3-43ba-8f8f-6c25889eb972-credential-keys\") pod \"keystone-bootstrap-prfd7\" (UID: \"e086f615-67f3-43ba-8f8f-6c25889eb972\") " pod="openstack/keystone-bootstrap-prfd7" Nov 28 10:16:32 crc kubenswrapper[4838]: I1128 10:16:32.839763 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e086f615-67f3-43ba-8f8f-6c25889eb972-combined-ca-bundle\") pod \"keystone-bootstrap-prfd7\" (UID: \"e086f615-67f3-43ba-8f8f-6c25889eb972\") " pod="openstack/keystone-bootstrap-prfd7" Nov 28 10:16:32 crc kubenswrapper[4838]: I1128 10:16:32.844058 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9b97c7c8-0036-4f7d-a899-74a0bc1c8206-config\") pod \"dnsmasq-dns-6546db6db7-wstkp\" (UID: \"9b97c7c8-0036-4f7d-a899-74a0bc1c8206\") " pod="openstack/dnsmasq-dns-6546db6db7-wstkp" Nov 28 10:16:32 crc kubenswrapper[4838]: I1128 10:16:32.847588 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9b97c7c8-0036-4f7d-a899-74a0bc1c8206-ovsdbserver-nb\") pod \"dnsmasq-dns-6546db6db7-wstkp\" (UID: \"9b97c7c8-0036-4f7d-a899-74a0bc1c8206\") " pod="openstack/dnsmasq-dns-6546db6db7-wstkp" Nov 28 10:16:32 crc kubenswrapper[4838]: I1128 10:16:32.860966 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9b97c7c8-0036-4f7d-a899-74a0bc1c8206-dns-svc\") pod \"dnsmasq-dns-6546db6db7-wstkp\" (UID: \"9b97c7c8-0036-4f7d-a899-74a0bc1c8206\") " pod="openstack/dnsmasq-dns-6546db6db7-wstkp" Nov 28 10:16:32 crc kubenswrapper[4838]: I1128 10:16:32.893683 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4jh42\" (UniqueName: \"kubernetes.io/projected/9b97c7c8-0036-4f7d-a899-74a0bc1c8206-kube-api-access-4jh42\") pod \"dnsmasq-dns-6546db6db7-wstkp\" (UID: \"9b97c7c8-0036-4f7d-a899-74a0bc1c8206\") " pod="openstack/dnsmasq-dns-6546db6db7-wstkp" Nov 28 10:16:32 crc kubenswrapper[4838]: I1128 10:16:32.894271 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e086f615-67f3-43ba-8f8f-6c25889eb972-config-data\") pod \"keystone-bootstrap-prfd7\" (UID: \"e086f615-67f3-43ba-8f8f-6c25889eb972\") " pod="openstack/keystone-bootstrap-prfd7" Nov 28 10:16:32 crc kubenswrapper[4838]: I1128 10:16:32.914166 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e086f615-67f3-43ba-8f8f-6c25889eb972-scripts\") pod \"keystone-bootstrap-prfd7\" (UID: \"e086f615-67f3-43ba-8f8f-6c25889eb972\") " pod="openstack/keystone-bootstrap-prfd7" Nov 28 10:16:32 crc kubenswrapper[4838]: I1128 10:16:32.919632 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 28 10:16:32 crc kubenswrapper[4838]: I1128 10:16:32.928533 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b6bd887d-04f5-45c0-b831-1d16262bbf08-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"b6bd887d-04f5-45c0-b831-1d16262bbf08\") " pod="openstack/ceilometer-0" Nov 28 10:16:32 crc kubenswrapper[4838]: I1128 10:16:32.928584 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/d4f1cd16-7995-4964-87d8-ab904bc11ca5-db-sync-config-data\") pod \"cinder-db-sync-qdl6f\" (UID: \"d4f1cd16-7995-4964-87d8-ab904bc11ca5\") " pod="openstack/cinder-db-sync-qdl6f" Nov 28 10:16:32 crc kubenswrapper[4838]: I1128 10:16:32.928625 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/b6bd887d-04f5-45c0-b831-1d16262bbf08-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"b6bd887d-04f5-45c0-b831-1d16262bbf08\") " pod="openstack/ceilometer-0" Nov 28 10:16:32 crc kubenswrapper[4838]: I1128 10:16:32.928643 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b6bd887d-04f5-45c0-b831-1d16262bbf08-log-httpd\") pod \"ceilometer-0\" (UID: \"b6bd887d-04f5-45c0-b831-1d16262bbf08\") " pod="openstack/ceilometer-0" Nov 28 10:16:32 crc kubenswrapper[4838]: I1128 10:16:32.928665 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b6bd887d-04f5-45c0-b831-1d16262bbf08-config-data\") pod \"ceilometer-0\" (UID: \"b6bd887d-04f5-45c0-b831-1d16262bbf08\") " pod="openstack/ceilometer-0" Nov 28 10:16:32 crc kubenswrapper[4838]: I1128 10:16:32.928701 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9jg2s\" (UniqueName: \"kubernetes.io/projected/b6bd887d-04f5-45c0-b831-1d16262bbf08-kube-api-access-9jg2s\") pod \"ceilometer-0\" (UID: \"b6bd887d-04f5-45c0-b831-1d16262bbf08\") " pod="openstack/ceilometer-0" Nov 28 10:16:32 crc kubenswrapper[4838]: I1128 10:16:32.928748 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lrpzs\" (UniqueName: \"kubernetes.io/projected/d4f1cd16-7995-4964-87d8-ab904bc11ca5-kube-api-access-lrpzs\") pod \"cinder-db-sync-qdl6f\" (UID: \"d4f1cd16-7995-4964-87d8-ab904bc11ca5\") " pod="openstack/cinder-db-sync-qdl6f" Nov 28 10:16:32 crc kubenswrapper[4838]: I1128 10:16:32.928788 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/d4f1cd16-7995-4964-87d8-ab904bc11ca5-etc-machine-id\") pod \"cinder-db-sync-qdl6f\" (UID: \"d4f1cd16-7995-4964-87d8-ab904bc11ca5\") " pod="openstack/cinder-db-sync-qdl6f" Nov 28 10:16:32 crc kubenswrapper[4838]: I1128 10:16:32.928809 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d4f1cd16-7995-4964-87d8-ab904bc11ca5-scripts\") pod \"cinder-db-sync-qdl6f\" (UID: \"d4f1cd16-7995-4964-87d8-ab904bc11ca5\") " pod="openstack/cinder-db-sync-qdl6f" Nov 28 10:16:32 crc kubenswrapper[4838]: I1128 10:16:32.928838 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d4f1cd16-7995-4964-87d8-ab904bc11ca5-config-data\") pod \"cinder-db-sync-qdl6f\" (UID: \"d4f1cd16-7995-4964-87d8-ab904bc11ca5\") " pod="openstack/cinder-db-sync-qdl6f" Nov 28 10:16:32 crc kubenswrapper[4838]: I1128 10:16:32.929201 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d4f1cd16-7995-4964-87d8-ab904bc11ca5-combined-ca-bundle\") pod \"cinder-db-sync-qdl6f\" (UID: \"d4f1cd16-7995-4964-87d8-ab904bc11ca5\") " pod="openstack/cinder-db-sync-qdl6f" Nov 28 10:16:32 crc kubenswrapper[4838]: I1128 10:16:32.929236 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b6bd887d-04f5-45c0-b831-1d16262bbf08-scripts\") pod \"ceilometer-0\" (UID: \"b6bd887d-04f5-45c0-b831-1d16262bbf08\") " pod="openstack/ceilometer-0" Nov 28 10:16:32 crc kubenswrapper[4838]: I1128 10:16:32.929261 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b6bd887d-04f5-45c0-b831-1d16262bbf08-run-httpd\") pod \"ceilometer-0\" (UID: \"b6bd887d-04f5-45c0-b831-1d16262bbf08\") " pod="openstack/ceilometer-0" Nov 28 10:16:32 crc kubenswrapper[4838]: I1128 10:16:32.936303 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6546db6db7-wstkp" Nov 28 10:16:32 crc kubenswrapper[4838]: I1128 10:16:32.992127 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d8ch6\" (UniqueName: \"kubernetes.io/projected/e086f615-67f3-43ba-8f8f-6c25889eb972-kube-api-access-d8ch6\") pod \"keystone-bootstrap-prfd7\" (UID: \"e086f615-67f3-43ba-8f8f-6c25889eb972\") " pod="openstack/keystone-bootstrap-prfd7" Nov 28 10:16:33 crc kubenswrapper[4838]: I1128 10:16:33.033213 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/d4f1cd16-7995-4964-87d8-ab904bc11ca5-etc-machine-id\") pod \"cinder-db-sync-qdl6f\" (UID: \"d4f1cd16-7995-4964-87d8-ab904bc11ca5\") " pod="openstack/cinder-db-sync-qdl6f" Nov 28 10:16:33 crc kubenswrapper[4838]: I1128 10:16:33.033265 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d4f1cd16-7995-4964-87d8-ab904bc11ca5-scripts\") pod \"cinder-db-sync-qdl6f\" (UID: \"d4f1cd16-7995-4964-87d8-ab904bc11ca5\") " pod="openstack/cinder-db-sync-qdl6f" Nov 28 10:16:33 crc kubenswrapper[4838]: I1128 10:16:33.033552 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d4f1cd16-7995-4964-87d8-ab904bc11ca5-config-data\") pod \"cinder-db-sync-qdl6f\" (UID: \"d4f1cd16-7995-4964-87d8-ab904bc11ca5\") " pod="openstack/cinder-db-sync-qdl6f" Nov 28 10:16:33 crc kubenswrapper[4838]: I1128 10:16:33.033655 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d4f1cd16-7995-4964-87d8-ab904bc11ca5-combined-ca-bundle\") pod \"cinder-db-sync-qdl6f\" (UID: \"d4f1cd16-7995-4964-87d8-ab904bc11ca5\") " pod="openstack/cinder-db-sync-qdl6f" Nov 28 10:16:33 crc kubenswrapper[4838]: I1128 10:16:33.033740 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b6bd887d-04f5-45c0-b831-1d16262bbf08-scripts\") pod \"ceilometer-0\" (UID: \"b6bd887d-04f5-45c0-b831-1d16262bbf08\") " pod="openstack/ceilometer-0" Nov 28 10:16:33 crc kubenswrapper[4838]: I1128 10:16:33.033791 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b6bd887d-04f5-45c0-b831-1d16262bbf08-run-httpd\") pod \"ceilometer-0\" (UID: \"b6bd887d-04f5-45c0-b831-1d16262bbf08\") " pod="openstack/ceilometer-0" Nov 28 10:16:33 crc kubenswrapper[4838]: I1128 10:16:33.034133 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/d4f1cd16-7995-4964-87d8-ab904bc11ca5-etc-machine-id\") pod \"cinder-db-sync-qdl6f\" (UID: \"d4f1cd16-7995-4964-87d8-ab904bc11ca5\") " pod="openstack/cinder-db-sync-qdl6f" Nov 28 10:16:33 crc kubenswrapper[4838]: I1128 10:16:33.034364 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b6bd887d-04f5-45c0-b831-1d16262bbf08-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"b6bd887d-04f5-45c0-b831-1d16262bbf08\") " pod="openstack/ceilometer-0" Nov 28 10:16:33 crc kubenswrapper[4838]: I1128 10:16:33.034395 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/d4f1cd16-7995-4964-87d8-ab904bc11ca5-db-sync-config-data\") pod \"cinder-db-sync-qdl6f\" (UID: \"d4f1cd16-7995-4964-87d8-ab904bc11ca5\") " pod="openstack/cinder-db-sync-qdl6f" Nov 28 10:16:33 crc kubenswrapper[4838]: I1128 10:16:33.051213 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b6bd887d-04f5-45c0-b831-1d16262bbf08-run-httpd\") pod \"ceilometer-0\" (UID: \"b6bd887d-04f5-45c0-b831-1d16262bbf08\") " pod="openstack/ceilometer-0" Nov 28 10:16:33 crc kubenswrapper[4838]: I1128 10:16:33.050626 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/b6bd887d-04f5-45c0-b831-1d16262bbf08-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"b6bd887d-04f5-45c0-b831-1d16262bbf08\") " pod="openstack/ceilometer-0" Nov 28 10:16:33 crc kubenswrapper[4838]: I1128 10:16:33.053093 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b6bd887d-04f5-45c0-b831-1d16262bbf08-log-httpd\") pod \"ceilometer-0\" (UID: \"b6bd887d-04f5-45c0-b831-1d16262bbf08\") " pod="openstack/ceilometer-0" Nov 28 10:16:33 crc kubenswrapper[4838]: I1128 10:16:33.053132 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b6bd887d-04f5-45c0-b831-1d16262bbf08-config-data\") pod \"ceilometer-0\" (UID: \"b6bd887d-04f5-45c0-b831-1d16262bbf08\") " pod="openstack/ceilometer-0" Nov 28 10:16:33 crc kubenswrapper[4838]: I1128 10:16:33.053170 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9jg2s\" (UniqueName: \"kubernetes.io/projected/b6bd887d-04f5-45c0-b831-1d16262bbf08-kube-api-access-9jg2s\") pod \"ceilometer-0\" (UID: \"b6bd887d-04f5-45c0-b831-1d16262bbf08\") " pod="openstack/ceilometer-0" Nov 28 10:16:33 crc kubenswrapper[4838]: I1128 10:16:33.055938 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b6bd887d-04f5-45c0-b831-1d16262bbf08-log-httpd\") pod \"ceilometer-0\" (UID: \"b6bd887d-04f5-45c0-b831-1d16262bbf08\") " pod="openstack/ceilometer-0" Nov 28 10:16:33 crc kubenswrapper[4838]: I1128 10:16:33.058313 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b6bd887d-04f5-45c0-b831-1d16262bbf08-scripts\") pod \"ceilometer-0\" (UID: \"b6bd887d-04f5-45c0-b831-1d16262bbf08\") " pod="openstack/ceilometer-0" Nov 28 10:16:33 crc kubenswrapper[4838]: I1128 10:16:33.058827 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lrpzs\" (UniqueName: \"kubernetes.io/projected/d4f1cd16-7995-4964-87d8-ab904bc11ca5-kube-api-access-lrpzs\") pod \"cinder-db-sync-qdl6f\" (UID: \"d4f1cd16-7995-4964-87d8-ab904bc11ca5\") " pod="openstack/cinder-db-sync-qdl6f" Nov 28 10:16:33 crc kubenswrapper[4838]: I1128 10:16:33.060608 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d4f1cd16-7995-4964-87d8-ab904bc11ca5-scripts\") pod \"cinder-db-sync-qdl6f\" (UID: \"d4f1cd16-7995-4964-87d8-ab904bc11ca5\") " pod="openstack/cinder-db-sync-qdl6f" Nov 28 10:16:33 crc kubenswrapper[4838]: I1128 10:16:33.060648 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d4f1cd16-7995-4964-87d8-ab904bc11ca5-config-data\") pod \"cinder-db-sync-qdl6f\" (UID: \"d4f1cd16-7995-4964-87d8-ab904bc11ca5\") " pod="openstack/cinder-db-sync-qdl6f" Nov 28 10:16:33 crc kubenswrapper[4838]: I1128 10:16:33.061124 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d4f1cd16-7995-4964-87d8-ab904bc11ca5-combined-ca-bundle\") pod \"cinder-db-sync-qdl6f\" (UID: \"d4f1cd16-7995-4964-87d8-ab904bc11ca5\") " pod="openstack/cinder-db-sync-qdl6f" Nov 28 10:16:33 crc kubenswrapper[4838]: I1128 10:16:33.061484 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/d4f1cd16-7995-4964-87d8-ab904bc11ca5-db-sync-config-data\") pod \"cinder-db-sync-qdl6f\" (UID: \"d4f1cd16-7995-4964-87d8-ab904bc11ca5\") " pod="openstack/cinder-db-sync-qdl6f" Nov 28 10:16:33 crc kubenswrapper[4838]: I1128 10:16:33.061508 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b6bd887d-04f5-45c0-b831-1d16262bbf08-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"b6bd887d-04f5-45c0-b831-1d16262bbf08\") " pod="openstack/ceilometer-0" Nov 28 10:16:33 crc kubenswrapper[4838]: I1128 10:16:33.061523 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-sync-s4dkv"] Nov 28 10:16:33 crc kubenswrapper[4838]: I1128 10:16:33.062198 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/b6bd887d-04f5-45c0-b831-1d16262bbf08-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"b6bd887d-04f5-45c0-b831-1d16262bbf08\") " pod="openstack/ceilometer-0" Nov 28 10:16:33 crc kubenswrapper[4838]: I1128 10:16:33.062998 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-s4dkv" Nov 28 10:16:33 crc kubenswrapper[4838]: I1128 10:16:33.067316 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-2w4fz" Nov 28 10:16:33 crc kubenswrapper[4838]: I1128 10:16:33.067491 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Nov 28 10:16:33 crc kubenswrapper[4838]: I1128 10:16:33.069185 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Nov 28 10:16:33 crc kubenswrapper[4838]: I1128 10:16:33.078648 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lrpzs\" (UniqueName: \"kubernetes.io/projected/d4f1cd16-7995-4964-87d8-ab904bc11ca5-kube-api-access-lrpzs\") pod \"cinder-db-sync-qdl6f\" (UID: \"d4f1cd16-7995-4964-87d8-ab904bc11ca5\") " pod="openstack/cinder-db-sync-qdl6f" Nov 28 10:16:33 crc kubenswrapper[4838]: I1128 10:16:33.082402 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9jg2s\" (UniqueName: \"kubernetes.io/projected/b6bd887d-04f5-45c0-b831-1d16262bbf08-kube-api-access-9jg2s\") pod \"ceilometer-0\" (UID: \"b6bd887d-04f5-45c0-b831-1d16262bbf08\") " pod="openstack/ceilometer-0" Nov 28 10:16:33 crc kubenswrapper[4838]: I1128 10:16:33.082631 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b6bd887d-04f5-45c0-b831-1d16262bbf08-config-data\") pod \"ceilometer-0\" (UID: \"b6bd887d-04f5-45c0-b831-1d16262bbf08\") " pod="openstack/ceilometer-0" Nov 28 10:16:33 crc kubenswrapper[4838]: I1128 10:16:33.083592 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-s4dkv"] Nov 28 10:16:33 crc kubenswrapper[4838]: I1128 10:16:33.092881 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-sync-8ttzd"] Nov 28 10:16:33 crc kubenswrapper[4838]: I1128 10:16:33.093860 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-8ttzd" Nov 28 10:16:33 crc kubenswrapper[4838]: I1128 10:16:33.096473 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Nov 28 10:16:33 crc kubenswrapper[4838]: I1128 10:16:33.096702 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-wp2h4" Nov 28 10:16:33 crc kubenswrapper[4838]: I1128 10:16:33.110207 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-8ttzd"] Nov 28 10:16:33 crc kubenswrapper[4838]: I1128 10:16:33.118385 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-sync-jg6k6"] Nov 28 10:16:33 crc kubenswrapper[4838]: I1128 10:16:33.119312 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-jg6k6" Nov 28 10:16:33 crc kubenswrapper[4838]: I1128 10:16:33.121578 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-wp5vt" Nov 28 10:16:33 crc kubenswrapper[4838]: I1128 10:16:33.121907 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Nov 28 10:16:33 crc kubenswrapper[4838]: I1128 10:16:33.122289 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Nov 28 10:16:33 crc kubenswrapper[4838]: I1128 10:16:33.124429 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-qdl6f" Nov 28 10:16:33 crc kubenswrapper[4838]: I1128 10:16:33.135159 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6546db6db7-wstkp"] Nov 28 10:16:33 crc kubenswrapper[4838]: I1128 10:16:33.145528 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-jg6k6"] Nov 28 10:16:33 crc kubenswrapper[4838]: I1128 10:16:33.160120 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-7987f74bbc-pkpc9"] Nov 28 10:16:33 crc kubenswrapper[4838]: I1128 10:16:33.161345 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7987f74bbc-pkpc9" Nov 28 10:16:33 crc kubenswrapper[4838]: I1128 10:16:33.169812 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/1f0e700a-b642-4062-94fd-dec398ba7a22-config\") pod \"neutron-db-sync-s4dkv\" (UID: \"1f0e700a-b642-4062-94fd-dec398ba7a22\") " pod="openstack/neutron-db-sync-s4dkv" Nov 28 10:16:33 crc kubenswrapper[4838]: I1128 10:16:33.169877 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/57482c21-bcdb-4a48-93ed-41ddca82a9fb-logs\") pod \"placement-db-sync-jg6k6\" (UID: \"57482c21-bcdb-4a48-93ed-41ddca82a9fb\") " pod="openstack/placement-db-sync-jg6k6" Nov 28 10:16:33 crc kubenswrapper[4838]: I1128 10:16:33.169897 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/435894c3-ccee-4f57-8afa-d225888db755-db-sync-config-data\") pod \"barbican-db-sync-8ttzd\" (UID: \"435894c3-ccee-4f57-8afa-d225888db755\") " pod="openstack/barbican-db-sync-8ttzd" Nov 28 10:16:33 crc kubenswrapper[4838]: I1128 10:16:33.169914 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2x9nc\" (UniqueName: \"kubernetes.io/projected/57482c21-bcdb-4a48-93ed-41ddca82a9fb-kube-api-access-2x9nc\") pod \"placement-db-sync-jg6k6\" (UID: \"57482c21-bcdb-4a48-93ed-41ddca82a9fb\") " pod="openstack/placement-db-sync-jg6k6" Nov 28 10:16:33 crc kubenswrapper[4838]: I1128 10:16:33.169983 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4mbxq\" (UniqueName: \"kubernetes.io/projected/435894c3-ccee-4f57-8afa-d225888db755-kube-api-access-4mbxq\") pod \"barbican-db-sync-8ttzd\" (UID: \"435894c3-ccee-4f57-8afa-d225888db755\") " pod="openstack/barbican-db-sync-8ttzd" Nov 28 10:16:33 crc kubenswrapper[4838]: I1128 10:16:33.170064 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/57482c21-bcdb-4a48-93ed-41ddca82a9fb-scripts\") pod \"placement-db-sync-jg6k6\" (UID: \"57482c21-bcdb-4a48-93ed-41ddca82a9fb\") " pod="openstack/placement-db-sync-jg6k6" Nov 28 10:16:33 crc kubenswrapper[4838]: I1128 10:16:33.170137 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/57482c21-bcdb-4a48-93ed-41ddca82a9fb-combined-ca-bundle\") pod \"placement-db-sync-jg6k6\" (UID: \"57482c21-bcdb-4a48-93ed-41ddca82a9fb\") " pod="openstack/placement-db-sync-jg6k6" Nov 28 10:16:33 crc kubenswrapper[4838]: I1128 10:16:33.170239 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/435894c3-ccee-4f57-8afa-d225888db755-combined-ca-bundle\") pod \"barbican-db-sync-8ttzd\" (UID: \"435894c3-ccee-4f57-8afa-d225888db755\") " pod="openstack/barbican-db-sync-8ttzd" Nov 28 10:16:33 crc kubenswrapper[4838]: I1128 10:16:33.170271 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/57482c21-bcdb-4a48-93ed-41ddca82a9fb-config-data\") pod \"placement-db-sync-jg6k6\" (UID: \"57482c21-bcdb-4a48-93ed-41ddca82a9fb\") " pod="openstack/placement-db-sync-jg6k6" Nov 28 10:16:33 crc kubenswrapper[4838]: I1128 10:16:33.170489 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q94wj\" (UniqueName: \"kubernetes.io/projected/1f0e700a-b642-4062-94fd-dec398ba7a22-kube-api-access-q94wj\") pod \"neutron-db-sync-s4dkv\" (UID: \"1f0e700a-b642-4062-94fd-dec398ba7a22\") " pod="openstack/neutron-db-sync-s4dkv" Nov 28 10:16:33 crc kubenswrapper[4838]: I1128 10:16:33.170530 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1f0e700a-b642-4062-94fd-dec398ba7a22-combined-ca-bundle\") pod \"neutron-db-sync-s4dkv\" (UID: \"1f0e700a-b642-4062-94fd-dec398ba7a22\") " pod="openstack/neutron-db-sync-s4dkv" Nov 28 10:16:33 crc kubenswrapper[4838]: I1128 10:16:33.174761 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7987f74bbc-pkpc9"] Nov 28 10:16:33 crc kubenswrapper[4838]: I1128 10:16:33.238093 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 10:16:33 crc kubenswrapper[4838]: I1128 10:16:33.247829 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-prfd7" Nov 28 10:16:33 crc kubenswrapper[4838]: I1128 10:16:33.272001 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4mbxq\" (UniqueName: \"kubernetes.io/projected/435894c3-ccee-4f57-8afa-d225888db755-kube-api-access-4mbxq\") pod \"barbican-db-sync-8ttzd\" (UID: \"435894c3-ccee-4f57-8afa-d225888db755\") " pod="openstack/barbican-db-sync-8ttzd" Nov 28 10:16:33 crc kubenswrapper[4838]: I1128 10:16:33.272045 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/57482c21-bcdb-4a48-93ed-41ddca82a9fb-scripts\") pod \"placement-db-sync-jg6k6\" (UID: \"57482c21-bcdb-4a48-93ed-41ddca82a9fb\") " pod="openstack/placement-db-sync-jg6k6" Nov 28 10:16:33 crc kubenswrapper[4838]: I1128 10:16:33.272076 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/57482c21-bcdb-4a48-93ed-41ddca82a9fb-combined-ca-bundle\") pod \"placement-db-sync-jg6k6\" (UID: \"57482c21-bcdb-4a48-93ed-41ddca82a9fb\") " pod="openstack/placement-db-sync-jg6k6" Nov 28 10:16:33 crc kubenswrapper[4838]: I1128 10:16:33.273159 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/435894c3-ccee-4f57-8afa-d225888db755-combined-ca-bundle\") pod \"barbican-db-sync-8ttzd\" (UID: \"435894c3-ccee-4f57-8afa-d225888db755\") " pod="openstack/barbican-db-sync-8ttzd" Nov 28 10:16:33 crc kubenswrapper[4838]: I1128 10:16:33.273194 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/57482c21-bcdb-4a48-93ed-41ddca82a9fb-config-data\") pod \"placement-db-sync-jg6k6\" (UID: \"57482c21-bcdb-4a48-93ed-41ddca82a9fb\") " pod="openstack/placement-db-sync-jg6k6" Nov 28 10:16:33 crc kubenswrapper[4838]: I1128 10:16:33.273242 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q94wj\" (UniqueName: \"kubernetes.io/projected/1f0e700a-b642-4062-94fd-dec398ba7a22-kube-api-access-q94wj\") pod \"neutron-db-sync-s4dkv\" (UID: \"1f0e700a-b642-4062-94fd-dec398ba7a22\") " pod="openstack/neutron-db-sync-s4dkv" Nov 28 10:16:33 crc kubenswrapper[4838]: I1128 10:16:33.273269 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1f0e700a-b642-4062-94fd-dec398ba7a22-combined-ca-bundle\") pod \"neutron-db-sync-s4dkv\" (UID: \"1f0e700a-b642-4062-94fd-dec398ba7a22\") " pod="openstack/neutron-db-sync-s4dkv" Nov 28 10:16:33 crc kubenswrapper[4838]: I1128 10:16:33.273366 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/1b11db77-4c89-43b5-b2c4-f72e865025b3-ovsdbserver-nb\") pod \"dnsmasq-dns-7987f74bbc-pkpc9\" (UID: \"1b11db77-4c89-43b5-b2c4-f72e865025b3\") " pod="openstack/dnsmasq-dns-7987f74bbc-pkpc9" Nov 28 10:16:33 crc kubenswrapper[4838]: I1128 10:16:33.273408 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/1f0e700a-b642-4062-94fd-dec398ba7a22-config\") pod \"neutron-db-sync-s4dkv\" (UID: \"1f0e700a-b642-4062-94fd-dec398ba7a22\") " pod="openstack/neutron-db-sync-s4dkv" Nov 28 10:16:33 crc kubenswrapper[4838]: I1128 10:16:33.273427 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hz499\" (UniqueName: \"kubernetes.io/projected/1b11db77-4c89-43b5-b2c4-f72e865025b3-kube-api-access-hz499\") pod \"dnsmasq-dns-7987f74bbc-pkpc9\" (UID: \"1b11db77-4c89-43b5-b2c4-f72e865025b3\") " pod="openstack/dnsmasq-dns-7987f74bbc-pkpc9" Nov 28 10:16:33 crc kubenswrapper[4838]: I1128 10:16:33.273448 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1b11db77-4c89-43b5-b2c4-f72e865025b3-config\") pod \"dnsmasq-dns-7987f74bbc-pkpc9\" (UID: \"1b11db77-4c89-43b5-b2c4-f72e865025b3\") " pod="openstack/dnsmasq-dns-7987f74bbc-pkpc9" Nov 28 10:16:33 crc kubenswrapper[4838]: I1128 10:16:33.273502 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1b11db77-4c89-43b5-b2c4-f72e865025b3-dns-svc\") pod \"dnsmasq-dns-7987f74bbc-pkpc9\" (UID: \"1b11db77-4c89-43b5-b2c4-f72e865025b3\") " pod="openstack/dnsmasq-dns-7987f74bbc-pkpc9" Nov 28 10:16:33 crc kubenswrapper[4838]: I1128 10:16:33.273553 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/57482c21-bcdb-4a48-93ed-41ddca82a9fb-logs\") pod \"placement-db-sync-jg6k6\" (UID: \"57482c21-bcdb-4a48-93ed-41ddca82a9fb\") " pod="openstack/placement-db-sync-jg6k6" Nov 28 10:16:33 crc kubenswrapper[4838]: I1128 10:16:33.273575 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/435894c3-ccee-4f57-8afa-d225888db755-db-sync-config-data\") pod \"barbican-db-sync-8ttzd\" (UID: \"435894c3-ccee-4f57-8afa-d225888db755\") " pod="openstack/barbican-db-sync-8ttzd" Nov 28 10:16:33 crc kubenswrapper[4838]: I1128 10:16:33.273594 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2x9nc\" (UniqueName: \"kubernetes.io/projected/57482c21-bcdb-4a48-93ed-41ddca82a9fb-kube-api-access-2x9nc\") pod \"placement-db-sync-jg6k6\" (UID: \"57482c21-bcdb-4a48-93ed-41ddca82a9fb\") " pod="openstack/placement-db-sync-jg6k6" Nov 28 10:16:33 crc kubenswrapper[4838]: I1128 10:16:33.273612 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/1b11db77-4c89-43b5-b2c4-f72e865025b3-ovsdbserver-sb\") pod \"dnsmasq-dns-7987f74bbc-pkpc9\" (UID: \"1b11db77-4c89-43b5-b2c4-f72e865025b3\") " pod="openstack/dnsmasq-dns-7987f74bbc-pkpc9" Nov 28 10:16:33 crc kubenswrapper[4838]: I1128 10:16:33.274485 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/57482c21-bcdb-4a48-93ed-41ddca82a9fb-logs\") pod \"placement-db-sync-jg6k6\" (UID: \"57482c21-bcdb-4a48-93ed-41ddca82a9fb\") " pod="openstack/placement-db-sync-jg6k6" Nov 28 10:16:33 crc kubenswrapper[4838]: I1128 10:16:33.277809 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/435894c3-ccee-4f57-8afa-d225888db755-combined-ca-bundle\") pod \"barbican-db-sync-8ttzd\" (UID: \"435894c3-ccee-4f57-8afa-d225888db755\") " pod="openstack/barbican-db-sync-8ttzd" Nov 28 10:16:33 crc kubenswrapper[4838]: I1128 10:16:33.279190 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/435894c3-ccee-4f57-8afa-d225888db755-db-sync-config-data\") pod \"barbican-db-sync-8ttzd\" (UID: \"435894c3-ccee-4f57-8afa-d225888db755\") " pod="openstack/barbican-db-sync-8ttzd" Nov 28 10:16:33 crc kubenswrapper[4838]: I1128 10:16:33.279672 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/1f0e700a-b642-4062-94fd-dec398ba7a22-config\") pod \"neutron-db-sync-s4dkv\" (UID: \"1f0e700a-b642-4062-94fd-dec398ba7a22\") " pod="openstack/neutron-db-sync-s4dkv" Nov 28 10:16:33 crc kubenswrapper[4838]: I1128 10:16:33.281439 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/57482c21-bcdb-4a48-93ed-41ddca82a9fb-config-data\") pod \"placement-db-sync-jg6k6\" (UID: \"57482c21-bcdb-4a48-93ed-41ddca82a9fb\") " pod="openstack/placement-db-sync-jg6k6" Nov 28 10:16:33 crc kubenswrapper[4838]: I1128 10:16:33.281567 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1f0e700a-b642-4062-94fd-dec398ba7a22-combined-ca-bundle\") pod \"neutron-db-sync-s4dkv\" (UID: \"1f0e700a-b642-4062-94fd-dec398ba7a22\") " pod="openstack/neutron-db-sync-s4dkv" Nov 28 10:16:33 crc kubenswrapper[4838]: I1128 10:16:33.287842 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/57482c21-bcdb-4a48-93ed-41ddca82a9fb-combined-ca-bundle\") pod \"placement-db-sync-jg6k6\" (UID: \"57482c21-bcdb-4a48-93ed-41ddca82a9fb\") " pod="openstack/placement-db-sync-jg6k6" Nov 28 10:16:33 crc kubenswrapper[4838]: I1128 10:16:33.288082 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/57482c21-bcdb-4a48-93ed-41ddca82a9fb-scripts\") pod \"placement-db-sync-jg6k6\" (UID: \"57482c21-bcdb-4a48-93ed-41ddca82a9fb\") " pod="openstack/placement-db-sync-jg6k6" Nov 28 10:16:33 crc kubenswrapper[4838]: I1128 10:16:33.289949 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4mbxq\" (UniqueName: \"kubernetes.io/projected/435894c3-ccee-4f57-8afa-d225888db755-kube-api-access-4mbxq\") pod \"barbican-db-sync-8ttzd\" (UID: \"435894c3-ccee-4f57-8afa-d225888db755\") " pod="openstack/barbican-db-sync-8ttzd" Nov 28 10:16:33 crc kubenswrapper[4838]: I1128 10:16:33.292153 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q94wj\" (UniqueName: \"kubernetes.io/projected/1f0e700a-b642-4062-94fd-dec398ba7a22-kube-api-access-q94wj\") pod \"neutron-db-sync-s4dkv\" (UID: \"1f0e700a-b642-4062-94fd-dec398ba7a22\") " pod="openstack/neutron-db-sync-s4dkv" Nov 28 10:16:33 crc kubenswrapper[4838]: I1128 10:16:33.296527 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2x9nc\" (UniqueName: \"kubernetes.io/projected/57482c21-bcdb-4a48-93ed-41ddca82a9fb-kube-api-access-2x9nc\") pod \"placement-db-sync-jg6k6\" (UID: \"57482c21-bcdb-4a48-93ed-41ddca82a9fb\") " pod="openstack/placement-db-sync-jg6k6" Nov 28 10:16:33 crc kubenswrapper[4838]: I1128 10:16:33.377467 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/1b11db77-4c89-43b5-b2c4-f72e865025b3-ovsdbserver-nb\") pod \"dnsmasq-dns-7987f74bbc-pkpc9\" (UID: \"1b11db77-4c89-43b5-b2c4-f72e865025b3\") " pod="openstack/dnsmasq-dns-7987f74bbc-pkpc9" Nov 28 10:16:33 crc kubenswrapper[4838]: I1128 10:16:33.377518 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hz499\" (UniqueName: \"kubernetes.io/projected/1b11db77-4c89-43b5-b2c4-f72e865025b3-kube-api-access-hz499\") pod \"dnsmasq-dns-7987f74bbc-pkpc9\" (UID: \"1b11db77-4c89-43b5-b2c4-f72e865025b3\") " pod="openstack/dnsmasq-dns-7987f74bbc-pkpc9" Nov 28 10:16:33 crc kubenswrapper[4838]: I1128 10:16:33.377541 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1b11db77-4c89-43b5-b2c4-f72e865025b3-config\") pod \"dnsmasq-dns-7987f74bbc-pkpc9\" (UID: \"1b11db77-4c89-43b5-b2c4-f72e865025b3\") " pod="openstack/dnsmasq-dns-7987f74bbc-pkpc9" Nov 28 10:16:33 crc kubenswrapper[4838]: I1128 10:16:33.377568 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1b11db77-4c89-43b5-b2c4-f72e865025b3-dns-svc\") pod \"dnsmasq-dns-7987f74bbc-pkpc9\" (UID: \"1b11db77-4c89-43b5-b2c4-f72e865025b3\") " pod="openstack/dnsmasq-dns-7987f74bbc-pkpc9" Nov 28 10:16:33 crc kubenswrapper[4838]: I1128 10:16:33.377605 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/1b11db77-4c89-43b5-b2c4-f72e865025b3-ovsdbserver-sb\") pod \"dnsmasq-dns-7987f74bbc-pkpc9\" (UID: \"1b11db77-4c89-43b5-b2c4-f72e865025b3\") " pod="openstack/dnsmasq-dns-7987f74bbc-pkpc9" Nov 28 10:16:33 crc kubenswrapper[4838]: I1128 10:16:33.378541 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/1b11db77-4c89-43b5-b2c4-f72e865025b3-ovsdbserver-sb\") pod \"dnsmasq-dns-7987f74bbc-pkpc9\" (UID: \"1b11db77-4c89-43b5-b2c4-f72e865025b3\") " pod="openstack/dnsmasq-dns-7987f74bbc-pkpc9" Nov 28 10:16:33 crc kubenswrapper[4838]: I1128 10:16:33.379145 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-s4dkv" Nov 28 10:16:33 crc kubenswrapper[4838]: I1128 10:16:33.379195 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1b11db77-4c89-43b5-b2c4-f72e865025b3-config\") pod \"dnsmasq-dns-7987f74bbc-pkpc9\" (UID: \"1b11db77-4c89-43b5-b2c4-f72e865025b3\") " pod="openstack/dnsmasq-dns-7987f74bbc-pkpc9" Nov 28 10:16:33 crc kubenswrapper[4838]: I1128 10:16:33.379491 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/1b11db77-4c89-43b5-b2c4-f72e865025b3-ovsdbserver-nb\") pod \"dnsmasq-dns-7987f74bbc-pkpc9\" (UID: \"1b11db77-4c89-43b5-b2c4-f72e865025b3\") " pod="openstack/dnsmasq-dns-7987f74bbc-pkpc9" Nov 28 10:16:33 crc kubenswrapper[4838]: I1128 10:16:33.381154 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1b11db77-4c89-43b5-b2c4-f72e865025b3-dns-svc\") pod \"dnsmasq-dns-7987f74bbc-pkpc9\" (UID: \"1b11db77-4c89-43b5-b2c4-f72e865025b3\") " pod="openstack/dnsmasq-dns-7987f74bbc-pkpc9" Nov 28 10:16:33 crc kubenswrapper[4838]: I1128 10:16:33.394628 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hz499\" (UniqueName: \"kubernetes.io/projected/1b11db77-4c89-43b5-b2c4-f72e865025b3-kube-api-access-hz499\") pod \"dnsmasq-dns-7987f74bbc-pkpc9\" (UID: \"1b11db77-4c89-43b5-b2c4-f72e865025b3\") " pod="openstack/dnsmasq-dns-7987f74bbc-pkpc9" Nov 28 10:16:33 crc kubenswrapper[4838]: I1128 10:16:33.410281 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-8ttzd" Nov 28 10:16:33 crc kubenswrapper[4838]: I1128 10:16:33.432475 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-jg6k6" Nov 28 10:16:33 crc kubenswrapper[4838]: I1128 10:16:33.475638 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7987f74bbc-pkpc9" Nov 28 10:16:33 crc kubenswrapper[4838]: I1128 10:16:33.522254 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6546db6db7-wstkp"] Nov 28 10:16:33 crc kubenswrapper[4838]: W1128 10:16:33.545948 4838 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9b97c7c8_0036_4f7d_a899_74a0bc1c8206.slice/crio-f3a7694842b6393e3df01e3eeef6e62bfc332895bc22876e46902399c77b69e7 WatchSource:0}: Error finding container f3a7694842b6393e3df01e3eeef6e62bfc332895bc22876e46902399c77b69e7: Status 404 returned error can't find the container with id f3a7694842b6393e3df01e3eeef6e62bfc332895bc22876e46902399c77b69e7 Nov 28 10:16:33 crc kubenswrapper[4838]: I1128 10:16:33.688089 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-qdl6f"] Nov 28 10:16:33 crc kubenswrapper[4838]: I1128 10:16:33.692893 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 28 10:16:33 crc kubenswrapper[4838]: I1128 10:16:33.750817 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-prfd7"] Nov 28 10:16:33 crc kubenswrapper[4838]: I1128 10:16:33.960154 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-8ttzd"] Nov 28 10:16:33 crc kubenswrapper[4838]: I1128 10:16:33.971079 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-s4dkv"] Nov 28 10:16:33 crc kubenswrapper[4838]: W1128 10:16:33.974330 4838 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod435894c3_ccee_4f57_8afa_d225888db755.slice/crio-6f0d0bb1e7e26960895ad2653cf0b366d590e05db91ccc384b44dfb8d1c34e1a WatchSource:0}: Error finding container 6f0d0bb1e7e26960895ad2653cf0b366d590e05db91ccc384b44dfb8d1c34e1a: Status 404 returned error can't find the container with id 6f0d0bb1e7e26960895ad2653cf0b366d590e05db91ccc384b44dfb8d1c34e1a Nov 28 10:16:34 crc kubenswrapper[4838]: I1128 10:16:34.104582 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7987f74bbc-pkpc9"] Nov 28 10:16:34 crc kubenswrapper[4838]: I1128 10:16:34.123750 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-jg6k6"] Nov 28 10:16:34 crc kubenswrapper[4838]: W1128 10:16:34.132026 4838 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod57482c21_bcdb_4a48_93ed_41ddca82a9fb.slice/crio-1d788379d848116f49a27f5aafdc8c8a4eadab232c3220b694aab1a5967beea6 WatchSource:0}: Error finding container 1d788379d848116f49a27f5aafdc8c8a4eadab232c3220b694aab1a5967beea6: Status 404 returned error can't find the container with id 1d788379d848116f49a27f5aafdc8c8a4eadab232c3220b694aab1a5967beea6 Nov 28 10:16:34 crc kubenswrapper[4838]: I1128 10:16:34.330680 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-8ttzd" event={"ID":"435894c3-ccee-4f57-8afa-d225888db755","Type":"ContainerStarted","Data":"6f0d0bb1e7e26960895ad2653cf0b366d590e05db91ccc384b44dfb8d1c34e1a"} Nov 28 10:16:34 crc kubenswrapper[4838]: I1128 10:16:34.332429 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-prfd7" event={"ID":"e086f615-67f3-43ba-8f8f-6c25889eb972","Type":"ContainerStarted","Data":"7d3e99aa3c380850369bca076ddc18dc078bc00cc5df9176c58055ecb3805926"} Nov 28 10:16:34 crc kubenswrapper[4838]: I1128 10:16:34.332449 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-prfd7" event={"ID":"e086f615-67f3-43ba-8f8f-6c25889eb972","Type":"ContainerStarted","Data":"d98c2fd316ee6aba032fbeac7118a01300ed287818f625586e1681fb26dc9a0b"} Nov 28 10:16:34 crc kubenswrapper[4838]: I1128 10:16:34.333538 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b6bd887d-04f5-45c0-b831-1d16262bbf08","Type":"ContainerStarted","Data":"de9499d5b3f9216845c1ce6207a026bac8dfba2c9bcb91980ec5e7ff1ecd636e"} Nov 28 10:16:34 crc kubenswrapper[4838]: I1128 10:16:34.334892 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-s4dkv" event={"ID":"1f0e700a-b642-4062-94fd-dec398ba7a22","Type":"ContainerStarted","Data":"98b9dd4ba1006b6494657179047d568cd92be833abeccc5dd03f5ecbd3ccdc02"} Nov 28 10:16:34 crc kubenswrapper[4838]: I1128 10:16:34.334919 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-s4dkv" event={"ID":"1f0e700a-b642-4062-94fd-dec398ba7a22","Type":"ContainerStarted","Data":"83cc1404a9b000db548432d104618a6e8b30bb3a937ad688bf5b66866e2e2f2c"} Nov 28 10:16:34 crc kubenswrapper[4838]: I1128 10:16:34.336163 4838 generic.go:334] "Generic (PLEG): container finished" podID="9b97c7c8-0036-4f7d-a899-74a0bc1c8206" containerID="45fa4cffa521128e6f416fc69c15dc5cb470ae92ecbfb97ca8e5e569fbe0c700" exitCode=0 Nov 28 10:16:34 crc kubenswrapper[4838]: I1128 10:16:34.336237 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6546db6db7-wstkp" event={"ID":"9b97c7c8-0036-4f7d-a899-74a0bc1c8206","Type":"ContainerDied","Data":"45fa4cffa521128e6f416fc69c15dc5cb470ae92ecbfb97ca8e5e569fbe0c700"} Nov 28 10:16:34 crc kubenswrapper[4838]: I1128 10:16:34.336254 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6546db6db7-wstkp" event={"ID":"9b97c7c8-0036-4f7d-a899-74a0bc1c8206","Type":"ContainerStarted","Data":"f3a7694842b6393e3df01e3eeef6e62bfc332895bc22876e46902399c77b69e7"} Nov 28 10:16:34 crc kubenswrapper[4838]: I1128 10:16:34.339910 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-qdl6f" event={"ID":"d4f1cd16-7995-4964-87d8-ab904bc11ca5","Type":"ContainerStarted","Data":"5515aa05d29520621f947285f4b535b4f6ff45921497fd8217fec3616af4d600"} Nov 28 10:16:34 crc kubenswrapper[4838]: I1128 10:16:34.341030 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-jg6k6" event={"ID":"57482c21-bcdb-4a48-93ed-41ddca82a9fb","Type":"ContainerStarted","Data":"1d788379d848116f49a27f5aafdc8c8a4eadab232c3220b694aab1a5967beea6"} Nov 28 10:16:34 crc kubenswrapper[4838]: I1128 10:16:34.342084 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7987f74bbc-pkpc9" event={"ID":"1b11db77-4c89-43b5-b2c4-f72e865025b3","Type":"ContainerStarted","Data":"64552d8fbe3d2a4c842f6482779271b161e224706b0f2dc959b687afa128fcac"} Nov 28 10:16:34 crc kubenswrapper[4838]: I1128 10:16:34.342107 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7987f74bbc-pkpc9" event={"ID":"1b11db77-4c89-43b5-b2c4-f72e865025b3","Type":"ContainerStarted","Data":"353218c1f4abafdf6b075d63fa04ada9a7c8dad93e4081286b26f726e24dcbbb"} Nov 28 10:16:34 crc kubenswrapper[4838]: I1128 10:16:34.362907 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-prfd7" podStartSLOduration=2.362880104 podStartE2EDuration="2.362880104s" podCreationTimestamp="2025-11-28 10:16:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 10:16:34.349540257 +0000 UTC m=+1166.048514427" watchObservedRunningTime="2025-11-28 10:16:34.362880104 +0000 UTC m=+1166.061854294" Nov 28 10:16:34 crc kubenswrapper[4838]: I1128 10:16:34.404431 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-db-sync-s4dkv" podStartSLOduration=2.404412895 podStartE2EDuration="2.404412895s" podCreationTimestamp="2025-11-28 10:16:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 10:16:34.402775951 +0000 UTC m=+1166.101750121" watchObservedRunningTime="2025-11-28 10:16:34.404412895 +0000 UTC m=+1166.103387065" Nov 28 10:16:34 crc kubenswrapper[4838]: I1128 10:16:34.678574 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6546db6db7-wstkp" Nov 28 10:16:34 crc kubenswrapper[4838]: I1128 10:16:34.747237 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 28 10:16:34 crc kubenswrapper[4838]: I1128 10:16:34.821172 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9b97c7c8-0036-4f7d-a899-74a0bc1c8206-ovsdbserver-sb\") pod \"9b97c7c8-0036-4f7d-a899-74a0bc1c8206\" (UID: \"9b97c7c8-0036-4f7d-a899-74a0bc1c8206\") " Nov 28 10:16:34 crc kubenswrapper[4838]: I1128 10:16:34.821391 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9b97c7c8-0036-4f7d-a899-74a0bc1c8206-ovsdbserver-nb\") pod \"9b97c7c8-0036-4f7d-a899-74a0bc1c8206\" (UID: \"9b97c7c8-0036-4f7d-a899-74a0bc1c8206\") " Nov 28 10:16:34 crc kubenswrapper[4838]: I1128 10:16:34.821985 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9b97c7c8-0036-4f7d-a899-74a0bc1c8206-dns-svc\") pod \"9b97c7c8-0036-4f7d-a899-74a0bc1c8206\" (UID: \"9b97c7c8-0036-4f7d-a899-74a0bc1c8206\") " Nov 28 10:16:34 crc kubenswrapper[4838]: I1128 10:16:34.822006 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9b97c7c8-0036-4f7d-a899-74a0bc1c8206-config\") pod \"9b97c7c8-0036-4f7d-a899-74a0bc1c8206\" (UID: \"9b97c7c8-0036-4f7d-a899-74a0bc1c8206\") " Nov 28 10:16:34 crc kubenswrapper[4838]: I1128 10:16:34.822256 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4jh42\" (UniqueName: \"kubernetes.io/projected/9b97c7c8-0036-4f7d-a899-74a0bc1c8206-kube-api-access-4jh42\") pod \"9b97c7c8-0036-4f7d-a899-74a0bc1c8206\" (UID: \"9b97c7c8-0036-4f7d-a899-74a0bc1c8206\") " Nov 28 10:16:34 crc kubenswrapper[4838]: I1128 10:16:34.835042 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9b97c7c8-0036-4f7d-a899-74a0bc1c8206-kube-api-access-4jh42" (OuterVolumeSpecName: "kube-api-access-4jh42") pod "9b97c7c8-0036-4f7d-a899-74a0bc1c8206" (UID: "9b97c7c8-0036-4f7d-a899-74a0bc1c8206"). InnerVolumeSpecName "kube-api-access-4jh42". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:16:34 crc kubenswrapper[4838]: I1128 10:16:34.846240 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9b97c7c8-0036-4f7d-a899-74a0bc1c8206-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "9b97c7c8-0036-4f7d-a899-74a0bc1c8206" (UID: "9b97c7c8-0036-4f7d-a899-74a0bc1c8206"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 10:16:34 crc kubenswrapper[4838]: I1128 10:16:34.849220 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9b97c7c8-0036-4f7d-a899-74a0bc1c8206-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "9b97c7c8-0036-4f7d-a899-74a0bc1c8206" (UID: "9b97c7c8-0036-4f7d-a899-74a0bc1c8206"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 10:16:34 crc kubenswrapper[4838]: I1128 10:16:34.851960 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9b97c7c8-0036-4f7d-a899-74a0bc1c8206-config" (OuterVolumeSpecName: "config") pod "9b97c7c8-0036-4f7d-a899-74a0bc1c8206" (UID: "9b97c7c8-0036-4f7d-a899-74a0bc1c8206"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 10:16:34 crc kubenswrapper[4838]: I1128 10:16:34.866272 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9b97c7c8-0036-4f7d-a899-74a0bc1c8206-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "9b97c7c8-0036-4f7d-a899-74a0bc1c8206" (UID: "9b97c7c8-0036-4f7d-a899-74a0bc1c8206"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 10:16:34 crc kubenswrapper[4838]: I1128 10:16:34.925832 4838 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9b97c7c8-0036-4f7d-a899-74a0bc1c8206-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 28 10:16:34 crc kubenswrapper[4838]: I1128 10:16:34.925876 4838 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9b97c7c8-0036-4f7d-a899-74a0bc1c8206-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 28 10:16:34 crc kubenswrapper[4838]: I1128 10:16:34.925887 4838 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9b97c7c8-0036-4f7d-a899-74a0bc1c8206-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 10:16:34 crc kubenswrapper[4838]: I1128 10:16:34.925899 4838 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9b97c7c8-0036-4f7d-a899-74a0bc1c8206-config\") on node \"crc\" DevicePath \"\"" Nov 28 10:16:34 crc kubenswrapper[4838]: I1128 10:16:34.925908 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4jh42\" (UniqueName: \"kubernetes.io/projected/9b97c7c8-0036-4f7d-a899-74a0bc1c8206-kube-api-access-4jh42\") on node \"crc\" DevicePath \"\"" Nov 28 10:16:35 crc kubenswrapper[4838]: I1128 10:16:35.356636 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6546db6db7-wstkp" event={"ID":"9b97c7c8-0036-4f7d-a899-74a0bc1c8206","Type":"ContainerDied","Data":"f3a7694842b6393e3df01e3eeef6e62bfc332895bc22876e46902399c77b69e7"} Nov 28 10:16:35 crc kubenswrapper[4838]: I1128 10:16:35.356656 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6546db6db7-wstkp" Nov 28 10:16:35 crc kubenswrapper[4838]: I1128 10:16:35.356701 4838 scope.go:117] "RemoveContainer" containerID="45fa4cffa521128e6f416fc69c15dc5cb470ae92ecbfb97ca8e5e569fbe0c700" Nov 28 10:16:35 crc kubenswrapper[4838]: I1128 10:16:35.359913 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7987f74bbc-pkpc9" event={"ID":"1b11db77-4c89-43b5-b2c4-f72e865025b3","Type":"ContainerDied","Data":"64552d8fbe3d2a4c842f6482779271b161e224706b0f2dc959b687afa128fcac"} Nov 28 10:16:35 crc kubenswrapper[4838]: I1128 10:16:35.360814 4838 generic.go:334] "Generic (PLEG): container finished" podID="1b11db77-4c89-43b5-b2c4-f72e865025b3" containerID="64552d8fbe3d2a4c842f6482779271b161e224706b0f2dc959b687afa128fcac" exitCode=0 Nov 28 10:16:35 crc kubenswrapper[4838]: I1128 10:16:35.500937 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6546db6db7-wstkp"] Nov 28 10:16:35 crc kubenswrapper[4838]: I1128 10:16:35.509804 4838 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6546db6db7-wstkp"] Nov 28 10:16:36 crc kubenswrapper[4838]: I1128 10:16:36.375548 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7987f74bbc-pkpc9" event={"ID":"1b11db77-4c89-43b5-b2c4-f72e865025b3","Type":"ContainerStarted","Data":"8de83020bee197333d0589834e31bc771f5f9c952b02e750d44ce828a4a1f3c7"} Nov 28 10:16:36 crc kubenswrapper[4838]: I1128 10:16:36.375866 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-7987f74bbc-pkpc9" Nov 28 10:16:36 crc kubenswrapper[4838]: I1128 10:16:36.397239 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-7987f74bbc-pkpc9" podStartSLOduration=3.397224568 podStartE2EDuration="3.397224568s" podCreationTimestamp="2025-11-28 10:16:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 10:16:36.393229191 +0000 UTC m=+1168.092203371" watchObservedRunningTime="2025-11-28 10:16:36.397224568 +0000 UTC m=+1168.096198738" Nov 28 10:16:36 crc kubenswrapper[4838]: I1128 10:16:36.576025 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9b97c7c8-0036-4f7d-a899-74a0bc1c8206" path="/var/lib/kubelet/pods/9b97c7c8-0036-4f7d-a899-74a0bc1c8206/volumes" Nov 28 10:16:37 crc kubenswrapper[4838]: I1128 10:16:37.397076 4838 generic.go:334] "Generic (PLEG): container finished" podID="e086f615-67f3-43ba-8f8f-6c25889eb972" containerID="7d3e99aa3c380850369bca076ddc18dc078bc00cc5df9176c58055ecb3805926" exitCode=0 Nov 28 10:16:37 crc kubenswrapper[4838]: I1128 10:16:37.397160 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-prfd7" event={"ID":"e086f615-67f3-43ba-8f8f-6c25889eb972","Type":"ContainerDied","Data":"7d3e99aa3c380850369bca076ddc18dc078bc00cc5df9176c58055ecb3805926"} Nov 28 10:16:40 crc kubenswrapper[4838]: I1128 10:16:40.689171 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-prfd7" Nov 28 10:16:40 crc kubenswrapper[4838]: I1128 10:16:40.741522 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e086f615-67f3-43ba-8f8f-6c25889eb972-scripts\") pod \"e086f615-67f3-43ba-8f8f-6c25889eb972\" (UID: \"e086f615-67f3-43ba-8f8f-6c25889eb972\") " Nov 28 10:16:40 crc kubenswrapper[4838]: I1128 10:16:40.741642 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e086f615-67f3-43ba-8f8f-6c25889eb972-config-data\") pod \"e086f615-67f3-43ba-8f8f-6c25889eb972\" (UID: \"e086f615-67f3-43ba-8f8f-6c25889eb972\") " Nov 28 10:16:40 crc kubenswrapper[4838]: I1128 10:16:40.742439 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/e086f615-67f3-43ba-8f8f-6c25889eb972-credential-keys\") pod \"e086f615-67f3-43ba-8f8f-6c25889eb972\" (UID: \"e086f615-67f3-43ba-8f8f-6c25889eb972\") " Nov 28 10:16:40 crc kubenswrapper[4838]: I1128 10:16:40.742514 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d8ch6\" (UniqueName: \"kubernetes.io/projected/e086f615-67f3-43ba-8f8f-6c25889eb972-kube-api-access-d8ch6\") pod \"e086f615-67f3-43ba-8f8f-6c25889eb972\" (UID: \"e086f615-67f3-43ba-8f8f-6c25889eb972\") " Nov 28 10:16:40 crc kubenswrapper[4838]: I1128 10:16:40.742591 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/e086f615-67f3-43ba-8f8f-6c25889eb972-fernet-keys\") pod \"e086f615-67f3-43ba-8f8f-6c25889eb972\" (UID: \"e086f615-67f3-43ba-8f8f-6c25889eb972\") " Nov 28 10:16:40 crc kubenswrapper[4838]: I1128 10:16:40.742742 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e086f615-67f3-43ba-8f8f-6c25889eb972-combined-ca-bundle\") pod \"e086f615-67f3-43ba-8f8f-6c25889eb972\" (UID: \"e086f615-67f3-43ba-8f8f-6c25889eb972\") " Nov 28 10:16:40 crc kubenswrapper[4838]: I1128 10:16:40.767995 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e086f615-67f3-43ba-8f8f-6c25889eb972-scripts" (OuterVolumeSpecName: "scripts") pod "e086f615-67f3-43ba-8f8f-6c25889eb972" (UID: "e086f615-67f3-43ba-8f8f-6c25889eb972"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:16:40 crc kubenswrapper[4838]: I1128 10:16:40.769619 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e086f615-67f3-43ba-8f8f-6c25889eb972-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "e086f615-67f3-43ba-8f8f-6c25889eb972" (UID: "e086f615-67f3-43ba-8f8f-6c25889eb972"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:16:40 crc kubenswrapper[4838]: I1128 10:16:40.769818 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e086f615-67f3-43ba-8f8f-6c25889eb972-kube-api-access-d8ch6" (OuterVolumeSpecName: "kube-api-access-d8ch6") pod "e086f615-67f3-43ba-8f8f-6c25889eb972" (UID: "e086f615-67f3-43ba-8f8f-6c25889eb972"). InnerVolumeSpecName "kube-api-access-d8ch6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:16:40 crc kubenswrapper[4838]: I1128 10:16:40.769925 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e086f615-67f3-43ba-8f8f-6c25889eb972-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "e086f615-67f3-43ba-8f8f-6c25889eb972" (UID: "e086f615-67f3-43ba-8f8f-6c25889eb972"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:16:40 crc kubenswrapper[4838]: I1128 10:16:40.781352 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e086f615-67f3-43ba-8f8f-6c25889eb972-config-data" (OuterVolumeSpecName: "config-data") pod "e086f615-67f3-43ba-8f8f-6c25889eb972" (UID: "e086f615-67f3-43ba-8f8f-6c25889eb972"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:16:40 crc kubenswrapper[4838]: I1128 10:16:40.789170 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e086f615-67f3-43ba-8f8f-6c25889eb972-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e086f615-67f3-43ba-8f8f-6c25889eb972" (UID: "e086f615-67f3-43ba-8f8f-6c25889eb972"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:16:40 crc kubenswrapper[4838]: I1128 10:16:40.845645 4838 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e086f615-67f3-43ba-8f8f-6c25889eb972-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 10:16:40 crc kubenswrapper[4838]: I1128 10:16:40.845691 4838 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e086f615-67f3-43ba-8f8f-6c25889eb972-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 10:16:40 crc kubenswrapper[4838]: I1128 10:16:40.845704 4838 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e086f615-67f3-43ba-8f8f-6c25889eb972-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 10:16:40 crc kubenswrapper[4838]: I1128 10:16:40.845745 4838 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/e086f615-67f3-43ba-8f8f-6c25889eb972-credential-keys\") on node \"crc\" DevicePath \"\"" Nov 28 10:16:40 crc kubenswrapper[4838]: I1128 10:16:40.845760 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d8ch6\" (UniqueName: \"kubernetes.io/projected/e086f615-67f3-43ba-8f8f-6c25889eb972-kube-api-access-d8ch6\") on node \"crc\" DevicePath \"\"" Nov 28 10:16:40 crc kubenswrapper[4838]: I1128 10:16:40.845774 4838 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/e086f615-67f3-43ba-8f8f-6c25889eb972-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 28 10:16:41 crc kubenswrapper[4838]: I1128 10:16:41.428791 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-prfd7" event={"ID":"e086f615-67f3-43ba-8f8f-6c25889eb972","Type":"ContainerDied","Data":"d98c2fd316ee6aba032fbeac7118a01300ed287818f625586e1681fb26dc9a0b"} Nov 28 10:16:41 crc kubenswrapper[4838]: I1128 10:16:41.428838 4838 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d98c2fd316ee6aba032fbeac7118a01300ed287818f625586e1681fb26dc9a0b" Nov 28 10:16:41 crc kubenswrapper[4838]: I1128 10:16:41.428916 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-prfd7" Nov 28 10:16:41 crc kubenswrapper[4838]: I1128 10:16:41.787594 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-prfd7"] Nov 28 10:16:41 crc kubenswrapper[4838]: I1128 10:16:41.796279 4838 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-prfd7"] Nov 28 10:16:41 crc kubenswrapper[4838]: I1128 10:16:41.871429 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-xhqld"] Nov 28 10:16:41 crc kubenswrapper[4838]: E1128 10:16:41.871883 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9b97c7c8-0036-4f7d-a899-74a0bc1c8206" containerName="init" Nov 28 10:16:41 crc kubenswrapper[4838]: I1128 10:16:41.871896 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="9b97c7c8-0036-4f7d-a899-74a0bc1c8206" containerName="init" Nov 28 10:16:41 crc kubenswrapper[4838]: E1128 10:16:41.871909 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e086f615-67f3-43ba-8f8f-6c25889eb972" containerName="keystone-bootstrap" Nov 28 10:16:41 crc kubenswrapper[4838]: I1128 10:16:41.871916 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="e086f615-67f3-43ba-8f8f-6c25889eb972" containerName="keystone-bootstrap" Nov 28 10:16:41 crc kubenswrapper[4838]: I1128 10:16:41.872085 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="9b97c7c8-0036-4f7d-a899-74a0bc1c8206" containerName="init" Nov 28 10:16:41 crc kubenswrapper[4838]: I1128 10:16:41.872099 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="e086f615-67f3-43ba-8f8f-6c25889eb972" containerName="keystone-bootstrap" Nov 28 10:16:41 crc kubenswrapper[4838]: I1128 10:16:41.872657 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-xhqld" Nov 28 10:16:41 crc kubenswrapper[4838]: I1128 10:16:41.877916 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Nov 28 10:16:41 crc kubenswrapper[4838]: I1128 10:16:41.877916 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 28 10:16:41 crc kubenswrapper[4838]: I1128 10:16:41.878002 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-2bjp9" Nov 28 10:16:41 crc kubenswrapper[4838]: I1128 10:16:41.878153 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 28 10:16:41 crc kubenswrapper[4838]: I1128 10:16:41.878164 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 28 10:16:41 crc kubenswrapper[4838]: I1128 10:16:41.882610 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-xhqld"] Nov 28 10:16:41 crc kubenswrapper[4838]: I1128 10:16:41.964700 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/acd4b461-b9cd-4f4b-89e9-8f4e46112938-config-data\") pod \"keystone-bootstrap-xhqld\" (UID: \"acd4b461-b9cd-4f4b-89e9-8f4e46112938\") " pod="openstack/keystone-bootstrap-xhqld" Nov 28 10:16:41 crc kubenswrapper[4838]: I1128 10:16:41.964847 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/acd4b461-b9cd-4f4b-89e9-8f4e46112938-credential-keys\") pod \"keystone-bootstrap-xhqld\" (UID: \"acd4b461-b9cd-4f4b-89e9-8f4e46112938\") " pod="openstack/keystone-bootstrap-xhqld" Nov 28 10:16:41 crc kubenswrapper[4838]: I1128 10:16:41.964899 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/acd4b461-b9cd-4f4b-89e9-8f4e46112938-scripts\") pod \"keystone-bootstrap-xhqld\" (UID: \"acd4b461-b9cd-4f4b-89e9-8f4e46112938\") " pod="openstack/keystone-bootstrap-xhqld" Nov 28 10:16:41 crc kubenswrapper[4838]: I1128 10:16:41.964996 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/acd4b461-b9cd-4f4b-89e9-8f4e46112938-combined-ca-bundle\") pod \"keystone-bootstrap-xhqld\" (UID: \"acd4b461-b9cd-4f4b-89e9-8f4e46112938\") " pod="openstack/keystone-bootstrap-xhqld" Nov 28 10:16:41 crc kubenswrapper[4838]: I1128 10:16:41.965025 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7z92m\" (UniqueName: \"kubernetes.io/projected/acd4b461-b9cd-4f4b-89e9-8f4e46112938-kube-api-access-7z92m\") pod \"keystone-bootstrap-xhqld\" (UID: \"acd4b461-b9cd-4f4b-89e9-8f4e46112938\") " pod="openstack/keystone-bootstrap-xhqld" Nov 28 10:16:41 crc kubenswrapper[4838]: I1128 10:16:41.965074 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/acd4b461-b9cd-4f4b-89e9-8f4e46112938-fernet-keys\") pod \"keystone-bootstrap-xhqld\" (UID: \"acd4b461-b9cd-4f4b-89e9-8f4e46112938\") " pod="openstack/keystone-bootstrap-xhqld" Nov 28 10:16:42 crc kubenswrapper[4838]: I1128 10:16:42.067266 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/acd4b461-b9cd-4f4b-89e9-8f4e46112938-combined-ca-bundle\") pod \"keystone-bootstrap-xhqld\" (UID: \"acd4b461-b9cd-4f4b-89e9-8f4e46112938\") " pod="openstack/keystone-bootstrap-xhqld" Nov 28 10:16:42 crc kubenswrapper[4838]: I1128 10:16:42.067311 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7z92m\" (UniqueName: \"kubernetes.io/projected/acd4b461-b9cd-4f4b-89e9-8f4e46112938-kube-api-access-7z92m\") pod \"keystone-bootstrap-xhqld\" (UID: \"acd4b461-b9cd-4f4b-89e9-8f4e46112938\") " pod="openstack/keystone-bootstrap-xhqld" Nov 28 10:16:42 crc kubenswrapper[4838]: I1128 10:16:42.067330 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/acd4b461-b9cd-4f4b-89e9-8f4e46112938-fernet-keys\") pod \"keystone-bootstrap-xhqld\" (UID: \"acd4b461-b9cd-4f4b-89e9-8f4e46112938\") " pod="openstack/keystone-bootstrap-xhqld" Nov 28 10:16:42 crc kubenswrapper[4838]: I1128 10:16:42.067414 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/acd4b461-b9cd-4f4b-89e9-8f4e46112938-config-data\") pod \"keystone-bootstrap-xhqld\" (UID: \"acd4b461-b9cd-4f4b-89e9-8f4e46112938\") " pod="openstack/keystone-bootstrap-xhqld" Nov 28 10:16:42 crc kubenswrapper[4838]: I1128 10:16:42.067471 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/acd4b461-b9cd-4f4b-89e9-8f4e46112938-credential-keys\") pod \"keystone-bootstrap-xhqld\" (UID: \"acd4b461-b9cd-4f4b-89e9-8f4e46112938\") " pod="openstack/keystone-bootstrap-xhqld" Nov 28 10:16:42 crc kubenswrapper[4838]: I1128 10:16:42.067487 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/acd4b461-b9cd-4f4b-89e9-8f4e46112938-scripts\") pod \"keystone-bootstrap-xhqld\" (UID: \"acd4b461-b9cd-4f4b-89e9-8f4e46112938\") " pod="openstack/keystone-bootstrap-xhqld" Nov 28 10:16:42 crc kubenswrapper[4838]: I1128 10:16:42.072464 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/acd4b461-b9cd-4f4b-89e9-8f4e46112938-credential-keys\") pod \"keystone-bootstrap-xhqld\" (UID: \"acd4b461-b9cd-4f4b-89e9-8f4e46112938\") " pod="openstack/keystone-bootstrap-xhqld" Nov 28 10:16:42 crc kubenswrapper[4838]: I1128 10:16:42.074572 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/acd4b461-b9cd-4f4b-89e9-8f4e46112938-config-data\") pod \"keystone-bootstrap-xhqld\" (UID: \"acd4b461-b9cd-4f4b-89e9-8f4e46112938\") " pod="openstack/keystone-bootstrap-xhqld" Nov 28 10:16:42 crc kubenswrapper[4838]: I1128 10:16:42.075133 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/acd4b461-b9cd-4f4b-89e9-8f4e46112938-combined-ca-bundle\") pod \"keystone-bootstrap-xhqld\" (UID: \"acd4b461-b9cd-4f4b-89e9-8f4e46112938\") " pod="openstack/keystone-bootstrap-xhqld" Nov 28 10:16:42 crc kubenswrapper[4838]: I1128 10:16:42.081310 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/acd4b461-b9cd-4f4b-89e9-8f4e46112938-fernet-keys\") pod \"keystone-bootstrap-xhqld\" (UID: \"acd4b461-b9cd-4f4b-89e9-8f4e46112938\") " pod="openstack/keystone-bootstrap-xhqld" Nov 28 10:16:42 crc kubenswrapper[4838]: I1128 10:16:42.089621 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/acd4b461-b9cd-4f4b-89e9-8f4e46112938-scripts\") pod \"keystone-bootstrap-xhqld\" (UID: \"acd4b461-b9cd-4f4b-89e9-8f4e46112938\") " pod="openstack/keystone-bootstrap-xhqld" Nov 28 10:16:42 crc kubenswrapper[4838]: I1128 10:16:42.094735 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7z92m\" (UniqueName: \"kubernetes.io/projected/acd4b461-b9cd-4f4b-89e9-8f4e46112938-kube-api-access-7z92m\") pod \"keystone-bootstrap-xhqld\" (UID: \"acd4b461-b9cd-4f4b-89e9-8f4e46112938\") " pod="openstack/keystone-bootstrap-xhqld" Nov 28 10:16:42 crc kubenswrapper[4838]: I1128 10:16:42.197250 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-xhqld" Nov 28 10:16:42 crc kubenswrapper[4838]: I1128 10:16:42.575503 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e086f615-67f3-43ba-8f8f-6c25889eb972" path="/var/lib/kubelet/pods/e086f615-67f3-43ba-8f8f-6c25889eb972/volumes" Nov 28 10:16:43 crc kubenswrapper[4838]: I1128 10:16:43.477948 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-7987f74bbc-pkpc9" Nov 28 10:16:43 crc kubenswrapper[4838]: I1128 10:16:43.542572 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-54f9b7b8d9-kxcgn"] Nov 28 10:16:43 crc kubenswrapper[4838]: I1128 10:16:43.543978 4838 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-54f9b7b8d9-kxcgn" podUID="2e5b70fe-78a6-4c9a-b0fb-77f1d0e7c2d2" containerName="dnsmasq-dns" containerID="cri-o://f50eef4ef7b6d2f9da4b3f4a982f5900bae1db40cf0ac32340b73745f9f95797" gracePeriod=10 Nov 28 10:16:44 crc kubenswrapper[4838]: I1128 10:16:44.466120 4838 generic.go:334] "Generic (PLEG): container finished" podID="2e5b70fe-78a6-4c9a-b0fb-77f1d0e7c2d2" containerID="f50eef4ef7b6d2f9da4b3f4a982f5900bae1db40cf0ac32340b73745f9f95797" exitCode=0 Nov 28 10:16:44 crc kubenswrapper[4838]: I1128 10:16:44.466162 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-54f9b7b8d9-kxcgn" event={"ID":"2e5b70fe-78a6-4c9a-b0fb-77f1d0e7c2d2","Type":"ContainerDied","Data":"f50eef4ef7b6d2f9da4b3f4a982f5900bae1db40cf0ac32340b73745f9f95797"} Nov 28 10:16:47 crc kubenswrapper[4838]: I1128 10:16:47.934870 4838 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-54f9b7b8d9-kxcgn" podUID="2e5b70fe-78a6-4c9a-b0fb-77f1d0e7c2d2" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.122:5353: connect: connection refused" Nov 28 10:16:52 crc kubenswrapper[4838]: I1128 10:16:52.935138 4838 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-54f9b7b8d9-kxcgn" podUID="2e5b70fe-78a6-4c9a-b0fb-77f1d0e7c2d2" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.122:5353: connect: connection refused" Nov 28 10:16:55 crc kubenswrapper[4838]: E1128 10:16:55.499894 4838 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified" Nov 28 10:16:55 crc kubenswrapper[4838]: E1128 10:16:55.500771 4838 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:cinder-db-sync,Image:quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_set_configs && /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:etc-machine-id,ReadOnly:true,MountPath:/etc/machine-id,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:scripts,ReadOnly:true,MountPath:/usr/local/bin/container-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/config-data/merged,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/cinder/cinder.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:db-sync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-lrpzs,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:*0,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod cinder-db-sync-qdl6f_openstack(d4f1cd16-7995-4964-87d8-ab904bc11ca5): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 28 10:16:55 crc kubenswrapper[4838]: E1128 10:16:55.502193 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cinder-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/cinder-db-sync-qdl6f" podUID="d4f1cd16-7995-4964-87d8-ab904bc11ca5" Nov 28 10:16:55 crc kubenswrapper[4838]: I1128 10:16:55.600683 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-54f9b7b8d9-kxcgn" event={"ID":"2e5b70fe-78a6-4c9a-b0fb-77f1d0e7c2d2","Type":"ContainerDied","Data":"9e58bb5fa67f7adde39e1d74c14ca5c6d0d892929313f03becb784f251e5605b"} Nov 28 10:16:55 crc kubenswrapper[4838]: I1128 10:16:55.600729 4838 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9e58bb5fa67f7adde39e1d74c14ca5c6d0d892929313f03becb784f251e5605b" Nov 28 10:16:55 crc kubenswrapper[4838]: E1128 10:16:55.603055 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cinder-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified\\\"\"" pod="openstack/cinder-db-sync-qdl6f" podUID="d4f1cd16-7995-4964-87d8-ab904bc11ca5" Nov 28 10:16:55 crc kubenswrapper[4838]: I1128 10:16:55.643885 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-54f9b7b8d9-kxcgn" Nov 28 10:16:55 crc kubenswrapper[4838]: I1128 10:16:55.723499 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-t2mm7\" (UniqueName: \"kubernetes.io/projected/2e5b70fe-78a6-4c9a-b0fb-77f1d0e7c2d2-kube-api-access-t2mm7\") pod \"2e5b70fe-78a6-4c9a-b0fb-77f1d0e7c2d2\" (UID: \"2e5b70fe-78a6-4c9a-b0fb-77f1d0e7c2d2\") " Nov 28 10:16:55 crc kubenswrapper[4838]: I1128 10:16:55.723578 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2e5b70fe-78a6-4c9a-b0fb-77f1d0e7c2d2-dns-svc\") pod \"2e5b70fe-78a6-4c9a-b0fb-77f1d0e7c2d2\" (UID: \"2e5b70fe-78a6-4c9a-b0fb-77f1d0e7c2d2\") " Nov 28 10:16:55 crc kubenswrapper[4838]: I1128 10:16:55.723648 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2e5b70fe-78a6-4c9a-b0fb-77f1d0e7c2d2-config\") pod \"2e5b70fe-78a6-4c9a-b0fb-77f1d0e7c2d2\" (UID: \"2e5b70fe-78a6-4c9a-b0fb-77f1d0e7c2d2\") " Nov 28 10:16:55 crc kubenswrapper[4838]: I1128 10:16:55.723750 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2e5b70fe-78a6-4c9a-b0fb-77f1d0e7c2d2-ovsdbserver-sb\") pod \"2e5b70fe-78a6-4c9a-b0fb-77f1d0e7c2d2\" (UID: \"2e5b70fe-78a6-4c9a-b0fb-77f1d0e7c2d2\") " Nov 28 10:16:55 crc kubenswrapper[4838]: I1128 10:16:55.723786 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2e5b70fe-78a6-4c9a-b0fb-77f1d0e7c2d2-ovsdbserver-nb\") pod \"2e5b70fe-78a6-4c9a-b0fb-77f1d0e7c2d2\" (UID: \"2e5b70fe-78a6-4c9a-b0fb-77f1d0e7c2d2\") " Nov 28 10:16:55 crc kubenswrapper[4838]: I1128 10:16:55.731258 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2e5b70fe-78a6-4c9a-b0fb-77f1d0e7c2d2-kube-api-access-t2mm7" (OuterVolumeSpecName: "kube-api-access-t2mm7") pod "2e5b70fe-78a6-4c9a-b0fb-77f1d0e7c2d2" (UID: "2e5b70fe-78a6-4c9a-b0fb-77f1d0e7c2d2"). InnerVolumeSpecName "kube-api-access-t2mm7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:16:55 crc kubenswrapper[4838]: I1128 10:16:55.810557 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2e5b70fe-78a6-4c9a-b0fb-77f1d0e7c2d2-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "2e5b70fe-78a6-4c9a-b0fb-77f1d0e7c2d2" (UID: "2e5b70fe-78a6-4c9a-b0fb-77f1d0e7c2d2"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 10:16:55 crc kubenswrapper[4838]: I1128 10:16:55.812146 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2e5b70fe-78a6-4c9a-b0fb-77f1d0e7c2d2-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "2e5b70fe-78a6-4c9a-b0fb-77f1d0e7c2d2" (UID: "2e5b70fe-78a6-4c9a-b0fb-77f1d0e7c2d2"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 10:16:55 crc kubenswrapper[4838]: I1128 10:16:55.812244 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2e5b70fe-78a6-4c9a-b0fb-77f1d0e7c2d2-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "2e5b70fe-78a6-4c9a-b0fb-77f1d0e7c2d2" (UID: "2e5b70fe-78a6-4c9a-b0fb-77f1d0e7c2d2"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 10:16:55 crc kubenswrapper[4838]: I1128 10:16:55.825641 4838 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2e5b70fe-78a6-4c9a-b0fb-77f1d0e7c2d2-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 28 10:16:55 crc kubenswrapper[4838]: I1128 10:16:55.825671 4838 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2e5b70fe-78a6-4c9a-b0fb-77f1d0e7c2d2-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 28 10:16:55 crc kubenswrapper[4838]: I1128 10:16:55.825684 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-t2mm7\" (UniqueName: \"kubernetes.io/projected/2e5b70fe-78a6-4c9a-b0fb-77f1d0e7c2d2-kube-api-access-t2mm7\") on node \"crc\" DevicePath \"\"" Nov 28 10:16:55 crc kubenswrapper[4838]: I1128 10:16:55.825702 4838 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2e5b70fe-78a6-4c9a-b0fb-77f1d0e7c2d2-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 10:16:55 crc kubenswrapper[4838]: I1128 10:16:55.835144 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2e5b70fe-78a6-4c9a-b0fb-77f1d0e7c2d2-config" (OuterVolumeSpecName: "config") pod "2e5b70fe-78a6-4c9a-b0fb-77f1d0e7c2d2" (UID: "2e5b70fe-78a6-4c9a-b0fb-77f1d0e7c2d2"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 10:16:55 crc kubenswrapper[4838]: I1128 10:16:55.890455 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-xhqld"] Nov 28 10:16:55 crc kubenswrapper[4838]: W1128 10:16:55.896686 4838 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podacd4b461_b9cd_4f4b_89e9_8f4e46112938.slice/crio-c50d1334fc964f68523b699a973c5019437e1fa2868e9dcbc6703090ffce3042 WatchSource:0}: Error finding container c50d1334fc964f68523b699a973c5019437e1fa2868e9dcbc6703090ffce3042: Status 404 returned error can't find the container with id c50d1334fc964f68523b699a973c5019437e1fa2868e9dcbc6703090ffce3042 Nov 28 10:16:55 crc kubenswrapper[4838]: I1128 10:16:55.926938 4838 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2e5b70fe-78a6-4c9a-b0fb-77f1d0e7c2d2-config\") on node \"crc\" DevicePath \"\"" Nov 28 10:16:56 crc kubenswrapper[4838]: I1128 10:16:56.612104 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-xhqld" event={"ID":"acd4b461-b9cd-4f4b-89e9-8f4e46112938","Type":"ContainerStarted","Data":"ea7a51e6574a25bb9640eadf80f21965c39cbc6410bec5c8c57593439ee47c45"} Nov 28 10:16:56 crc kubenswrapper[4838]: I1128 10:16:56.612466 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-xhqld" event={"ID":"acd4b461-b9cd-4f4b-89e9-8f4e46112938","Type":"ContainerStarted","Data":"c50d1334fc964f68523b699a973c5019437e1fa2868e9dcbc6703090ffce3042"} Nov 28 10:16:56 crc kubenswrapper[4838]: I1128 10:16:56.615103 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-8ttzd" event={"ID":"435894c3-ccee-4f57-8afa-d225888db755","Type":"ContainerStarted","Data":"1e791633c3e1b899c75ddc4230587da1b7e592a0064d665ce153f947754e9429"} Nov 28 10:16:56 crc kubenswrapper[4838]: I1128 10:16:56.623796 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b6bd887d-04f5-45c0-b831-1d16262bbf08","Type":"ContainerStarted","Data":"3f39291203323511b7dacbb127e1410ad584d8fd37be3292550a3d7cc5ffbf02"} Nov 28 10:16:56 crc kubenswrapper[4838]: I1128 10:16:56.641104 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-54f9b7b8d9-kxcgn" Nov 28 10:16:56 crc kubenswrapper[4838]: I1128 10:16:56.641358 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-jg6k6" event={"ID":"57482c21-bcdb-4a48-93ed-41ddca82a9fb","Type":"ContainerStarted","Data":"7d8a363cab24054d2ba2b9141f07207bcfd30a07ac3f6023ba2cd123e67272da"} Nov 28 10:16:56 crc kubenswrapper[4838]: I1128 10:16:56.650599 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-xhqld" podStartSLOduration=15.650578176 podStartE2EDuration="15.650578176s" podCreationTimestamp="2025-11-28 10:16:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 10:16:56.638952533 +0000 UTC m=+1188.337926753" watchObservedRunningTime="2025-11-28 10:16:56.650578176 +0000 UTC m=+1188.349552346" Nov 28 10:16:56 crc kubenswrapper[4838]: I1128 10:16:56.657293 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-db-sync-8ttzd" podStartSLOduration=3.17277284 podStartE2EDuration="24.657273416s" podCreationTimestamp="2025-11-28 10:16:32 +0000 UTC" firstStartedPulling="2025-11-28 10:16:33.983663004 +0000 UTC m=+1165.682637174" lastFinishedPulling="2025-11-28 10:16:55.46816358 +0000 UTC m=+1187.167137750" observedRunningTime="2025-11-28 10:16:56.656445814 +0000 UTC m=+1188.355419994" watchObservedRunningTime="2025-11-28 10:16:56.657273416 +0000 UTC m=+1188.356247586" Nov 28 10:16:56 crc kubenswrapper[4838]: I1128 10:16:56.681958 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-54f9b7b8d9-kxcgn"] Nov 28 10:16:56 crc kubenswrapper[4838]: I1128 10:16:56.695606 4838 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-54f9b7b8d9-kxcgn"] Nov 28 10:16:56 crc kubenswrapper[4838]: I1128 10:16:56.703073 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-db-sync-jg6k6" podStartSLOduration=3.385647919 podStartE2EDuration="24.703050069s" podCreationTimestamp="2025-11-28 10:16:32 +0000 UTC" firstStartedPulling="2025-11-28 10:16:34.138244402 +0000 UTC m=+1165.837218572" lastFinishedPulling="2025-11-28 10:16:55.455646532 +0000 UTC m=+1187.154620722" observedRunningTime="2025-11-28 10:16:56.685117417 +0000 UTC m=+1188.384091587" watchObservedRunningTime="2025-11-28 10:16:56.703050069 +0000 UTC m=+1188.402024239" Nov 28 10:16:57 crc kubenswrapper[4838]: I1128 10:16:57.652411 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b6bd887d-04f5-45c0-b831-1d16262bbf08","Type":"ContainerStarted","Data":"37183545c8c3a969307f440966afa2391add66012dc772d301476a2e5b8bcde8"} Nov 28 10:16:58 crc kubenswrapper[4838]: I1128 10:16:58.576269 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2e5b70fe-78a6-4c9a-b0fb-77f1d0e7c2d2" path="/var/lib/kubelet/pods/2e5b70fe-78a6-4c9a-b0fb-77f1d0e7c2d2/volumes" Nov 28 10:16:58 crc kubenswrapper[4838]: I1128 10:16:58.664696 4838 generic.go:334] "Generic (PLEG): container finished" podID="57482c21-bcdb-4a48-93ed-41ddca82a9fb" containerID="7d8a363cab24054d2ba2b9141f07207bcfd30a07ac3f6023ba2cd123e67272da" exitCode=0 Nov 28 10:16:58 crc kubenswrapper[4838]: I1128 10:16:58.664762 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-jg6k6" event={"ID":"57482c21-bcdb-4a48-93ed-41ddca82a9fb","Type":"ContainerDied","Data":"7d8a363cab24054d2ba2b9141f07207bcfd30a07ac3f6023ba2cd123e67272da"} Nov 28 10:16:59 crc kubenswrapper[4838]: I1128 10:16:59.673433 4838 generic.go:334] "Generic (PLEG): container finished" podID="acd4b461-b9cd-4f4b-89e9-8f4e46112938" containerID="ea7a51e6574a25bb9640eadf80f21965c39cbc6410bec5c8c57593439ee47c45" exitCode=0 Nov 28 10:16:59 crc kubenswrapper[4838]: I1128 10:16:59.673647 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-xhqld" event={"ID":"acd4b461-b9cd-4f4b-89e9-8f4e46112938","Type":"ContainerDied","Data":"ea7a51e6574a25bb9640eadf80f21965c39cbc6410bec5c8c57593439ee47c45"} Nov 28 10:17:00 crc kubenswrapper[4838]: I1128 10:17:00.691886 4838 generic.go:334] "Generic (PLEG): container finished" podID="435894c3-ccee-4f57-8afa-d225888db755" containerID="1e791633c3e1b899c75ddc4230587da1b7e592a0064d665ce153f947754e9429" exitCode=0 Nov 28 10:17:00 crc kubenswrapper[4838]: I1128 10:17:00.692093 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-8ttzd" event={"ID":"435894c3-ccee-4f57-8afa-d225888db755","Type":"ContainerDied","Data":"1e791633c3e1b899c75ddc4230587da1b7e592a0064d665ce153f947754e9429"} Nov 28 10:17:02 crc kubenswrapper[4838]: I1128 10:17:02.713208 4838 generic.go:334] "Generic (PLEG): container finished" podID="1f0e700a-b642-4062-94fd-dec398ba7a22" containerID="98b9dd4ba1006b6494657179047d568cd92be833abeccc5dd03f5ecbd3ccdc02" exitCode=0 Nov 28 10:17:02 crc kubenswrapper[4838]: I1128 10:17:02.713880 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-s4dkv" event={"ID":"1f0e700a-b642-4062-94fd-dec398ba7a22","Type":"ContainerDied","Data":"98b9dd4ba1006b6494657179047d568cd92be833abeccc5dd03f5ecbd3ccdc02"} Nov 28 10:17:03 crc kubenswrapper[4838]: I1128 10:17:03.350770 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-8ttzd" Nov 28 10:17:03 crc kubenswrapper[4838]: I1128 10:17:03.362650 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-jg6k6" Nov 28 10:17:03 crc kubenswrapper[4838]: I1128 10:17:03.363318 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-xhqld" Nov 28 10:17:03 crc kubenswrapper[4838]: I1128 10:17:03.503548 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/435894c3-ccee-4f57-8afa-d225888db755-db-sync-config-data\") pod \"435894c3-ccee-4f57-8afa-d225888db755\" (UID: \"435894c3-ccee-4f57-8afa-d225888db755\") " Nov 28 10:17:03 crc kubenswrapper[4838]: I1128 10:17:03.503646 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/57482c21-bcdb-4a48-93ed-41ddca82a9fb-combined-ca-bundle\") pod \"57482c21-bcdb-4a48-93ed-41ddca82a9fb\" (UID: \"57482c21-bcdb-4a48-93ed-41ddca82a9fb\") " Nov 28 10:17:03 crc kubenswrapper[4838]: I1128 10:17:03.503703 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/57482c21-bcdb-4a48-93ed-41ddca82a9fb-logs\") pod \"57482c21-bcdb-4a48-93ed-41ddca82a9fb\" (UID: \"57482c21-bcdb-4a48-93ed-41ddca82a9fb\") " Nov 28 10:17:03 crc kubenswrapper[4838]: I1128 10:17:03.503749 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2x9nc\" (UniqueName: \"kubernetes.io/projected/57482c21-bcdb-4a48-93ed-41ddca82a9fb-kube-api-access-2x9nc\") pod \"57482c21-bcdb-4a48-93ed-41ddca82a9fb\" (UID: \"57482c21-bcdb-4a48-93ed-41ddca82a9fb\") " Nov 28 10:17:03 crc kubenswrapper[4838]: I1128 10:17:03.503782 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/acd4b461-b9cd-4f4b-89e9-8f4e46112938-combined-ca-bundle\") pod \"acd4b461-b9cd-4f4b-89e9-8f4e46112938\" (UID: \"acd4b461-b9cd-4f4b-89e9-8f4e46112938\") " Nov 28 10:17:03 crc kubenswrapper[4838]: I1128 10:17:03.503806 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/acd4b461-b9cd-4f4b-89e9-8f4e46112938-scripts\") pod \"acd4b461-b9cd-4f4b-89e9-8f4e46112938\" (UID: \"acd4b461-b9cd-4f4b-89e9-8f4e46112938\") " Nov 28 10:17:03 crc kubenswrapper[4838]: I1128 10:17:03.503895 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/57482c21-bcdb-4a48-93ed-41ddca82a9fb-scripts\") pod \"57482c21-bcdb-4a48-93ed-41ddca82a9fb\" (UID: \"57482c21-bcdb-4a48-93ed-41ddca82a9fb\") " Nov 28 10:17:03 crc kubenswrapper[4838]: I1128 10:17:03.503931 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/acd4b461-b9cd-4f4b-89e9-8f4e46112938-config-data\") pod \"acd4b461-b9cd-4f4b-89e9-8f4e46112938\" (UID: \"acd4b461-b9cd-4f4b-89e9-8f4e46112938\") " Nov 28 10:17:03 crc kubenswrapper[4838]: I1128 10:17:03.504018 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/acd4b461-b9cd-4f4b-89e9-8f4e46112938-fernet-keys\") pod \"acd4b461-b9cd-4f4b-89e9-8f4e46112938\" (UID: \"acd4b461-b9cd-4f4b-89e9-8f4e46112938\") " Nov 28 10:17:03 crc kubenswrapper[4838]: I1128 10:17:03.504178 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/57482c21-bcdb-4a48-93ed-41ddca82a9fb-config-data\") pod \"57482c21-bcdb-4a48-93ed-41ddca82a9fb\" (UID: \"57482c21-bcdb-4a48-93ed-41ddca82a9fb\") " Nov 28 10:17:03 crc kubenswrapper[4838]: I1128 10:17:03.504250 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4mbxq\" (UniqueName: \"kubernetes.io/projected/435894c3-ccee-4f57-8afa-d225888db755-kube-api-access-4mbxq\") pod \"435894c3-ccee-4f57-8afa-d225888db755\" (UID: \"435894c3-ccee-4f57-8afa-d225888db755\") " Nov 28 10:17:03 crc kubenswrapper[4838]: I1128 10:17:03.504286 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/acd4b461-b9cd-4f4b-89e9-8f4e46112938-credential-keys\") pod \"acd4b461-b9cd-4f4b-89e9-8f4e46112938\" (UID: \"acd4b461-b9cd-4f4b-89e9-8f4e46112938\") " Nov 28 10:17:03 crc kubenswrapper[4838]: I1128 10:17:03.504355 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/435894c3-ccee-4f57-8afa-d225888db755-combined-ca-bundle\") pod \"435894c3-ccee-4f57-8afa-d225888db755\" (UID: \"435894c3-ccee-4f57-8afa-d225888db755\") " Nov 28 10:17:03 crc kubenswrapper[4838]: I1128 10:17:03.504410 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7z92m\" (UniqueName: \"kubernetes.io/projected/acd4b461-b9cd-4f4b-89e9-8f4e46112938-kube-api-access-7z92m\") pod \"acd4b461-b9cd-4f4b-89e9-8f4e46112938\" (UID: \"acd4b461-b9cd-4f4b-89e9-8f4e46112938\") " Nov 28 10:17:03 crc kubenswrapper[4838]: I1128 10:17:03.504433 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57482c21-bcdb-4a48-93ed-41ddca82a9fb-logs" (OuterVolumeSpecName: "logs") pod "57482c21-bcdb-4a48-93ed-41ddca82a9fb" (UID: "57482c21-bcdb-4a48-93ed-41ddca82a9fb"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 10:17:03 crc kubenswrapper[4838]: I1128 10:17:03.504938 4838 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/57482c21-bcdb-4a48-93ed-41ddca82a9fb-logs\") on node \"crc\" DevicePath \"\"" Nov 28 10:17:03 crc kubenswrapper[4838]: I1128 10:17:03.508338 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/acd4b461-b9cd-4f4b-89e9-8f4e46112938-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "acd4b461-b9cd-4f4b-89e9-8f4e46112938" (UID: "acd4b461-b9cd-4f4b-89e9-8f4e46112938"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:17:03 crc kubenswrapper[4838]: I1128 10:17:03.509052 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/acd4b461-b9cd-4f4b-89e9-8f4e46112938-kube-api-access-7z92m" (OuterVolumeSpecName: "kube-api-access-7z92m") pod "acd4b461-b9cd-4f4b-89e9-8f4e46112938" (UID: "acd4b461-b9cd-4f4b-89e9-8f4e46112938"). InnerVolumeSpecName "kube-api-access-7z92m". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:17:03 crc kubenswrapper[4838]: I1128 10:17:03.509089 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/acd4b461-b9cd-4f4b-89e9-8f4e46112938-scripts" (OuterVolumeSpecName: "scripts") pod "acd4b461-b9cd-4f4b-89e9-8f4e46112938" (UID: "acd4b461-b9cd-4f4b-89e9-8f4e46112938"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:17:03 crc kubenswrapper[4838]: I1128 10:17:03.509587 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/435894c3-ccee-4f57-8afa-d225888db755-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "435894c3-ccee-4f57-8afa-d225888db755" (UID: "435894c3-ccee-4f57-8afa-d225888db755"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:17:03 crc kubenswrapper[4838]: I1128 10:17:03.510083 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/acd4b461-b9cd-4f4b-89e9-8f4e46112938-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "acd4b461-b9cd-4f4b-89e9-8f4e46112938" (UID: "acd4b461-b9cd-4f4b-89e9-8f4e46112938"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:17:03 crc kubenswrapper[4838]: I1128 10:17:03.512079 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/57482c21-bcdb-4a48-93ed-41ddca82a9fb-scripts" (OuterVolumeSpecName: "scripts") pod "57482c21-bcdb-4a48-93ed-41ddca82a9fb" (UID: "57482c21-bcdb-4a48-93ed-41ddca82a9fb"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:17:03 crc kubenswrapper[4838]: I1128 10:17:03.512339 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/435894c3-ccee-4f57-8afa-d225888db755-kube-api-access-4mbxq" (OuterVolumeSpecName: "kube-api-access-4mbxq") pod "435894c3-ccee-4f57-8afa-d225888db755" (UID: "435894c3-ccee-4f57-8afa-d225888db755"). InnerVolumeSpecName "kube-api-access-4mbxq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:17:03 crc kubenswrapper[4838]: I1128 10:17:03.522845 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/57482c21-bcdb-4a48-93ed-41ddca82a9fb-kube-api-access-2x9nc" (OuterVolumeSpecName: "kube-api-access-2x9nc") pod "57482c21-bcdb-4a48-93ed-41ddca82a9fb" (UID: "57482c21-bcdb-4a48-93ed-41ddca82a9fb"). InnerVolumeSpecName "kube-api-access-2x9nc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:17:03 crc kubenswrapper[4838]: I1128 10:17:03.528976 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/acd4b461-b9cd-4f4b-89e9-8f4e46112938-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "acd4b461-b9cd-4f4b-89e9-8f4e46112938" (UID: "acd4b461-b9cd-4f4b-89e9-8f4e46112938"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:17:03 crc kubenswrapper[4838]: I1128 10:17:03.529666 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/acd4b461-b9cd-4f4b-89e9-8f4e46112938-config-data" (OuterVolumeSpecName: "config-data") pod "acd4b461-b9cd-4f4b-89e9-8f4e46112938" (UID: "acd4b461-b9cd-4f4b-89e9-8f4e46112938"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:17:03 crc kubenswrapper[4838]: I1128 10:17:03.530838 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/435894c3-ccee-4f57-8afa-d225888db755-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "435894c3-ccee-4f57-8afa-d225888db755" (UID: "435894c3-ccee-4f57-8afa-d225888db755"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:17:03 crc kubenswrapper[4838]: I1128 10:17:03.542548 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/57482c21-bcdb-4a48-93ed-41ddca82a9fb-config-data" (OuterVolumeSpecName: "config-data") pod "57482c21-bcdb-4a48-93ed-41ddca82a9fb" (UID: "57482c21-bcdb-4a48-93ed-41ddca82a9fb"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:17:03 crc kubenswrapper[4838]: I1128 10:17:03.551573 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/57482c21-bcdb-4a48-93ed-41ddca82a9fb-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "57482c21-bcdb-4a48-93ed-41ddca82a9fb" (UID: "57482c21-bcdb-4a48-93ed-41ddca82a9fb"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:17:03 crc kubenswrapper[4838]: I1128 10:17:03.606330 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2x9nc\" (UniqueName: \"kubernetes.io/projected/57482c21-bcdb-4a48-93ed-41ddca82a9fb-kube-api-access-2x9nc\") on node \"crc\" DevicePath \"\"" Nov 28 10:17:03 crc kubenswrapper[4838]: I1128 10:17:03.606361 4838 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/acd4b461-b9cd-4f4b-89e9-8f4e46112938-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 10:17:03 crc kubenswrapper[4838]: I1128 10:17:03.606371 4838 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/acd4b461-b9cd-4f4b-89e9-8f4e46112938-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 10:17:03 crc kubenswrapper[4838]: I1128 10:17:03.606380 4838 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/57482c21-bcdb-4a48-93ed-41ddca82a9fb-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 10:17:03 crc kubenswrapper[4838]: I1128 10:17:03.606388 4838 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/acd4b461-b9cd-4f4b-89e9-8f4e46112938-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 10:17:03 crc kubenswrapper[4838]: I1128 10:17:03.606397 4838 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/acd4b461-b9cd-4f4b-89e9-8f4e46112938-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 28 10:17:03 crc kubenswrapper[4838]: I1128 10:17:03.606405 4838 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/57482c21-bcdb-4a48-93ed-41ddca82a9fb-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 10:17:03 crc kubenswrapper[4838]: I1128 10:17:03.606415 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4mbxq\" (UniqueName: \"kubernetes.io/projected/435894c3-ccee-4f57-8afa-d225888db755-kube-api-access-4mbxq\") on node \"crc\" DevicePath \"\"" Nov 28 10:17:03 crc kubenswrapper[4838]: I1128 10:17:03.606423 4838 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/acd4b461-b9cd-4f4b-89e9-8f4e46112938-credential-keys\") on node \"crc\" DevicePath \"\"" Nov 28 10:17:03 crc kubenswrapper[4838]: I1128 10:17:03.606433 4838 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/435894c3-ccee-4f57-8afa-d225888db755-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 10:17:03 crc kubenswrapper[4838]: I1128 10:17:03.606441 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7z92m\" (UniqueName: \"kubernetes.io/projected/acd4b461-b9cd-4f4b-89e9-8f4e46112938-kube-api-access-7z92m\") on node \"crc\" DevicePath \"\"" Nov 28 10:17:03 crc kubenswrapper[4838]: I1128 10:17:03.606449 4838 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/435894c3-ccee-4f57-8afa-d225888db755-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 10:17:03 crc kubenswrapper[4838]: I1128 10:17:03.606457 4838 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/57482c21-bcdb-4a48-93ed-41ddca82a9fb-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 10:17:03 crc kubenswrapper[4838]: I1128 10:17:03.727309 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-xhqld" Nov 28 10:17:03 crc kubenswrapper[4838]: I1128 10:17:03.727372 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-xhqld" event={"ID":"acd4b461-b9cd-4f4b-89e9-8f4e46112938","Type":"ContainerDied","Data":"c50d1334fc964f68523b699a973c5019437e1fa2868e9dcbc6703090ffce3042"} Nov 28 10:17:03 crc kubenswrapper[4838]: I1128 10:17:03.727409 4838 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c50d1334fc964f68523b699a973c5019437e1fa2868e9dcbc6703090ffce3042" Nov 28 10:17:03 crc kubenswrapper[4838]: I1128 10:17:03.729636 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-8ttzd" Nov 28 10:17:03 crc kubenswrapper[4838]: I1128 10:17:03.729658 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-8ttzd" event={"ID":"435894c3-ccee-4f57-8afa-d225888db755","Type":"ContainerDied","Data":"6f0d0bb1e7e26960895ad2653cf0b366d590e05db91ccc384b44dfb8d1c34e1a"} Nov 28 10:17:03 crc kubenswrapper[4838]: I1128 10:17:03.730092 4838 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6f0d0bb1e7e26960895ad2653cf0b366d590e05db91ccc384b44dfb8d1c34e1a" Nov 28 10:17:03 crc kubenswrapper[4838]: I1128 10:17:03.739887 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b6bd887d-04f5-45c0-b831-1d16262bbf08","Type":"ContainerStarted","Data":"fb6e5792d865ee441936a93349d3a59c49f77d75278e9c2efaac73c96d8c90e2"} Nov 28 10:17:03 crc kubenswrapper[4838]: I1128 10:17:03.741694 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-jg6k6" Nov 28 10:17:03 crc kubenswrapper[4838]: I1128 10:17:03.742002 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-jg6k6" event={"ID":"57482c21-bcdb-4a48-93ed-41ddca82a9fb","Type":"ContainerDied","Data":"1d788379d848116f49a27f5aafdc8c8a4eadab232c3220b694aab1a5967beea6"} Nov 28 10:17:03 crc kubenswrapper[4838]: I1128 10:17:03.742024 4838 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1d788379d848116f49a27f5aafdc8c8a4eadab232c3220b694aab1a5967beea6" Nov 28 10:17:04 crc kubenswrapper[4838]: I1128 10:17:04.054171 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-s4dkv" Nov 28 10:17:04 crc kubenswrapper[4838]: I1128 10:17:04.115280 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q94wj\" (UniqueName: \"kubernetes.io/projected/1f0e700a-b642-4062-94fd-dec398ba7a22-kube-api-access-q94wj\") pod \"1f0e700a-b642-4062-94fd-dec398ba7a22\" (UID: \"1f0e700a-b642-4062-94fd-dec398ba7a22\") " Nov 28 10:17:04 crc kubenswrapper[4838]: I1128 10:17:04.115425 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1f0e700a-b642-4062-94fd-dec398ba7a22-combined-ca-bundle\") pod \"1f0e700a-b642-4062-94fd-dec398ba7a22\" (UID: \"1f0e700a-b642-4062-94fd-dec398ba7a22\") " Nov 28 10:17:04 crc kubenswrapper[4838]: I1128 10:17:04.115706 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/1f0e700a-b642-4062-94fd-dec398ba7a22-config\") pod \"1f0e700a-b642-4062-94fd-dec398ba7a22\" (UID: \"1f0e700a-b642-4062-94fd-dec398ba7a22\") " Nov 28 10:17:04 crc kubenswrapper[4838]: I1128 10:17:04.120558 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1f0e700a-b642-4062-94fd-dec398ba7a22-kube-api-access-q94wj" (OuterVolumeSpecName: "kube-api-access-q94wj") pod "1f0e700a-b642-4062-94fd-dec398ba7a22" (UID: "1f0e700a-b642-4062-94fd-dec398ba7a22"). InnerVolumeSpecName "kube-api-access-q94wj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:17:04 crc kubenswrapper[4838]: I1128 10:17:04.143057 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1f0e700a-b642-4062-94fd-dec398ba7a22-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "1f0e700a-b642-4062-94fd-dec398ba7a22" (UID: "1f0e700a-b642-4062-94fd-dec398ba7a22"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:17:04 crc kubenswrapper[4838]: I1128 10:17:04.148154 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1f0e700a-b642-4062-94fd-dec398ba7a22-config" (OuterVolumeSpecName: "config") pod "1f0e700a-b642-4062-94fd-dec398ba7a22" (UID: "1f0e700a-b642-4062-94fd-dec398ba7a22"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:17:04 crc kubenswrapper[4838]: I1128 10:17:04.217708 4838 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/1f0e700a-b642-4062-94fd-dec398ba7a22-config\") on node \"crc\" DevicePath \"\"" Nov 28 10:17:04 crc kubenswrapper[4838]: I1128 10:17:04.217768 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q94wj\" (UniqueName: \"kubernetes.io/projected/1f0e700a-b642-4062-94fd-dec398ba7a22-kube-api-access-q94wj\") on node \"crc\" DevicePath \"\"" Nov 28 10:17:04 crc kubenswrapper[4838]: I1128 10:17:04.217808 4838 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1f0e700a-b642-4062-94fd-dec398ba7a22-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 10:17:04 crc kubenswrapper[4838]: I1128 10:17:04.591525 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-7f5f5f8b64-f2wff"] Nov 28 10:17:04 crc kubenswrapper[4838]: E1128 10:17:04.591974 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2e5b70fe-78a6-4c9a-b0fb-77f1d0e7c2d2" containerName="dnsmasq-dns" Nov 28 10:17:04 crc kubenswrapper[4838]: I1128 10:17:04.591996 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="2e5b70fe-78a6-4c9a-b0fb-77f1d0e7c2d2" containerName="dnsmasq-dns" Nov 28 10:17:04 crc kubenswrapper[4838]: E1128 10:17:04.592014 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2e5b70fe-78a6-4c9a-b0fb-77f1d0e7c2d2" containerName="init" Nov 28 10:17:04 crc kubenswrapper[4838]: I1128 10:17:04.592023 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="2e5b70fe-78a6-4c9a-b0fb-77f1d0e7c2d2" containerName="init" Nov 28 10:17:04 crc kubenswrapper[4838]: E1128 10:17:04.592036 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="57482c21-bcdb-4a48-93ed-41ddca82a9fb" containerName="placement-db-sync" Nov 28 10:17:04 crc kubenswrapper[4838]: I1128 10:17:04.592045 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="57482c21-bcdb-4a48-93ed-41ddca82a9fb" containerName="placement-db-sync" Nov 28 10:17:04 crc kubenswrapper[4838]: E1128 10:17:04.592062 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1f0e700a-b642-4062-94fd-dec398ba7a22" containerName="neutron-db-sync" Nov 28 10:17:04 crc kubenswrapper[4838]: I1128 10:17:04.592086 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="1f0e700a-b642-4062-94fd-dec398ba7a22" containerName="neutron-db-sync" Nov 28 10:17:04 crc kubenswrapper[4838]: E1128 10:17:04.592105 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="435894c3-ccee-4f57-8afa-d225888db755" containerName="barbican-db-sync" Nov 28 10:17:04 crc kubenswrapper[4838]: I1128 10:17:04.592113 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="435894c3-ccee-4f57-8afa-d225888db755" containerName="barbican-db-sync" Nov 28 10:17:04 crc kubenswrapper[4838]: E1128 10:17:04.592133 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="acd4b461-b9cd-4f4b-89e9-8f4e46112938" containerName="keystone-bootstrap" Nov 28 10:17:04 crc kubenswrapper[4838]: I1128 10:17:04.592141 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="acd4b461-b9cd-4f4b-89e9-8f4e46112938" containerName="keystone-bootstrap" Nov 28 10:17:04 crc kubenswrapper[4838]: I1128 10:17:04.592391 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="2e5b70fe-78a6-4c9a-b0fb-77f1d0e7c2d2" containerName="dnsmasq-dns" Nov 28 10:17:04 crc kubenswrapper[4838]: I1128 10:17:04.592407 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="57482c21-bcdb-4a48-93ed-41ddca82a9fb" containerName="placement-db-sync" Nov 28 10:17:04 crc kubenswrapper[4838]: I1128 10:17:04.592425 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="435894c3-ccee-4f57-8afa-d225888db755" containerName="barbican-db-sync" Nov 28 10:17:04 crc kubenswrapper[4838]: I1128 10:17:04.592439 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="1f0e700a-b642-4062-94fd-dec398ba7a22" containerName="neutron-db-sync" Nov 28 10:17:04 crc kubenswrapper[4838]: I1128 10:17:04.592449 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="acd4b461-b9cd-4f4b-89e9-8f4e46112938" containerName="keystone-bootstrap" Nov 28 10:17:04 crc kubenswrapper[4838]: I1128 10:17:04.593499 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-7f5f5f8b64-f2wff" Nov 28 10:17:04 crc kubenswrapper[4838]: I1128 10:17:04.597483 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-internal-svc" Nov 28 10:17:04 crc kubenswrapper[4838]: I1128 10:17:04.597696 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-wp5vt" Nov 28 10:17:04 crc kubenswrapper[4838]: I1128 10:17:04.598008 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Nov 28 10:17:04 crc kubenswrapper[4838]: I1128 10:17:04.600163 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Nov 28 10:17:04 crc kubenswrapper[4838]: I1128 10:17:04.600190 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-public-svc" Nov 28 10:17:04 crc kubenswrapper[4838]: I1128 10:17:04.646030 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-698bf66db7-q4nv6"] Nov 28 10:17:04 crc kubenswrapper[4838]: I1128 10:17:04.647400 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-698bf66db7-q4nv6" Nov 28 10:17:04 crc kubenswrapper[4838]: I1128 10:17:04.649274 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 28 10:17:04 crc kubenswrapper[4838]: I1128 10:17:04.652940 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-7f5f5f8b64-f2wff"] Nov 28 10:17:04 crc kubenswrapper[4838]: I1128 10:17:04.653238 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-2bjp9" Nov 28 10:17:04 crc kubenswrapper[4838]: I1128 10:17:04.656538 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-internal-svc" Nov 28 10:17:04 crc kubenswrapper[4838]: I1128 10:17:04.656761 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 28 10:17:04 crc kubenswrapper[4838]: I1128 10:17:04.657267 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-public-svc" Nov 28 10:17:04 crc kubenswrapper[4838]: I1128 10:17:04.667247 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-698bf66db7-q4nv6"] Nov 28 10:17:04 crc kubenswrapper[4838]: I1128 10:17:04.675736 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 28 10:17:04 crc kubenswrapper[4838]: I1128 10:17:04.729944 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/4d77c8cd-d0c5-4bb9-84a3-e3a00f7c9a99-public-tls-certs\") pod \"keystone-698bf66db7-q4nv6\" (UID: \"4d77c8cd-d0c5-4bb9-84a3-e3a00f7c9a99\") " pod="openstack/keystone-698bf66db7-q4nv6" Nov 28 10:17:04 crc kubenswrapper[4838]: I1128 10:17:04.729995 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f4ad309b-1078-40e9-abd7-d1b476971fce-config-data\") pod \"placement-7f5f5f8b64-f2wff\" (UID: \"f4ad309b-1078-40e9-abd7-d1b476971fce\") " pod="openstack/placement-7f5f5f8b64-f2wff" Nov 28 10:17:04 crc kubenswrapper[4838]: I1128 10:17:04.730023 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/4d77c8cd-d0c5-4bb9-84a3-e3a00f7c9a99-internal-tls-certs\") pod \"keystone-698bf66db7-q4nv6\" (UID: \"4d77c8cd-d0c5-4bb9-84a3-e3a00f7c9a99\") " pod="openstack/keystone-698bf66db7-q4nv6" Nov 28 10:17:04 crc kubenswrapper[4838]: I1128 10:17:04.730047 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/4d77c8cd-d0c5-4bb9-84a3-e3a00f7c9a99-fernet-keys\") pod \"keystone-698bf66db7-q4nv6\" (UID: \"4d77c8cd-d0c5-4bb9-84a3-e3a00f7c9a99\") " pod="openstack/keystone-698bf66db7-q4nv6" Nov 28 10:17:04 crc kubenswrapper[4838]: I1128 10:17:04.730068 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f4ad309b-1078-40e9-abd7-d1b476971fce-logs\") pod \"placement-7f5f5f8b64-f2wff\" (UID: \"f4ad309b-1078-40e9-abd7-d1b476971fce\") " pod="openstack/placement-7f5f5f8b64-f2wff" Nov 28 10:17:04 crc kubenswrapper[4838]: I1128 10:17:04.730108 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/f4ad309b-1078-40e9-abd7-d1b476971fce-public-tls-certs\") pod \"placement-7f5f5f8b64-f2wff\" (UID: \"f4ad309b-1078-40e9-abd7-d1b476971fce\") " pod="openstack/placement-7f5f5f8b64-f2wff" Nov 28 10:17:04 crc kubenswrapper[4838]: I1128 10:17:04.730140 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4d77c8cd-d0c5-4bb9-84a3-e3a00f7c9a99-config-data\") pod \"keystone-698bf66db7-q4nv6\" (UID: \"4d77c8cd-d0c5-4bb9-84a3-e3a00f7c9a99\") " pod="openstack/keystone-698bf66db7-q4nv6" Nov 28 10:17:04 crc kubenswrapper[4838]: I1128 10:17:04.730161 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f4ad309b-1078-40e9-abd7-d1b476971fce-scripts\") pod \"placement-7f5f5f8b64-f2wff\" (UID: \"f4ad309b-1078-40e9-abd7-d1b476971fce\") " pod="openstack/placement-7f5f5f8b64-f2wff" Nov 28 10:17:04 crc kubenswrapper[4838]: I1128 10:17:04.730206 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kpvkw\" (UniqueName: \"kubernetes.io/projected/4d77c8cd-d0c5-4bb9-84a3-e3a00f7c9a99-kube-api-access-kpvkw\") pod \"keystone-698bf66db7-q4nv6\" (UID: \"4d77c8cd-d0c5-4bb9-84a3-e3a00f7c9a99\") " pod="openstack/keystone-698bf66db7-q4nv6" Nov 28 10:17:04 crc kubenswrapper[4838]: I1128 10:17:04.730249 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f4ad309b-1078-40e9-abd7-d1b476971fce-combined-ca-bundle\") pod \"placement-7f5f5f8b64-f2wff\" (UID: \"f4ad309b-1078-40e9-abd7-d1b476971fce\") " pod="openstack/placement-7f5f5f8b64-f2wff" Nov 28 10:17:04 crc kubenswrapper[4838]: I1128 10:17:04.730277 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4d77c8cd-d0c5-4bb9-84a3-e3a00f7c9a99-scripts\") pod \"keystone-698bf66db7-q4nv6\" (UID: \"4d77c8cd-d0c5-4bb9-84a3-e3a00f7c9a99\") " pod="openstack/keystone-698bf66db7-q4nv6" Nov 28 10:17:04 crc kubenswrapper[4838]: I1128 10:17:04.730309 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4d77c8cd-d0c5-4bb9-84a3-e3a00f7c9a99-combined-ca-bundle\") pod \"keystone-698bf66db7-q4nv6\" (UID: \"4d77c8cd-d0c5-4bb9-84a3-e3a00f7c9a99\") " pod="openstack/keystone-698bf66db7-q4nv6" Nov 28 10:17:04 crc kubenswrapper[4838]: I1128 10:17:04.730337 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/f4ad309b-1078-40e9-abd7-d1b476971fce-internal-tls-certs\") pod \"placement-7f5f5f8b64-f2wff\" (UID: \"f4ad309b-1078-40e9-abd7-d1b476971fce\") " pod="openstack/placement-7f5f5f8b64-f2wff" Nov 28 10:17:04 crc kubenswrapper[4838]: I1128 10:17:04.730550 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/4d77c8cd-d0c5-4bb9-84a3-e3a00f7c9a99-credential-keys\") pod \"keystone-698bf66db7-q4nv6\" (UID: \"4d77c8cd-d0c5-4bb9-84a3-e3a00f7c9a99\") " pod="openstack/keystone-698bf66db7-q4nv6" Nov 28 10:17:04 crc kubenswrapper[4838]: I1128 10:17:04.730621 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bltf5\" (UniqueName: \"kubernetes.io/projected/f4ad309b-1078-40e9-abd7-d1b476971fce-kube-api-access-bltf5\") pod \"placement-7f5f5f8b64-f2wff\" (UID: \"f4ad309b-1078-40e9-abd7-d1b476971fce\") " pod="openstack/placement-7f5f5f8b64-f2wff" Nov 28 10:17:04 crc kubenswrapper[4838]: I1128 10:17:04.764512 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-s4dkv" event={"ID":"1f0e700a-b642-4062-94fd-dec398ba7a22","Type":"ContainerDied","Data":"83cc1404a9b000db548432d104618a6e8b30bb3a937ad688bf5b66866e2e2f2c"} Nov 28 10:17:04 crc kubenswrapper[4838]: I1128 10:17:04.764548 4838 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="83cc1404a9b000db548432d104618a6e8b30bb3a937ad688bf5b66866e2e2f2c" Nov 28 10:17:04 crc kubenswrapper[4838]: I1128 10:17:04.764619 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-s4dkv" Nov 28 10:17:04 crc kubenswrapper[4838]: I1128 10:17:04.784706 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-worker-6669b75dd9-q6nlg"] Nov 28 10:17:04 crc kubenswrapper[4838]: I1128 10:17:04.786026 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-6669b75dd9-q6nlg" Nov 28 10:17:04 crc kubenswrapper[4838]: I1128 10:17:04.789009 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-wp2h4" Nov 28 10:17:04 crc kubenswrapper[4838]: I1128 10:17:04.789156 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-worker-config-data" Nov 28 10:17:04 crc kubenswrapper[4838]: I1128 10:17:04.789595 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Nov 28 10:17:04 crc kubenswrapper[4838]: I1128 10:17:04.824537 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-keystone-listener-65687b7854-r7rh5"] Nov 28 10:17:04 crc kubenswrapper[4838]: I1128 10:17:04.826216 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-65687b7854-r7rh5" Nov 28 10:17:04 crc kubenswrapper[4838]: I1128 10:17:04.829416 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-keystone-listener-config-data" Nov 28 10:17:04 crc kubenswrapper[4838]: I1128 10:17:04.831742 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/4d77c8cd-d0c5-4bb9-84a3-e3a00f7c9a99-public-tls-certs\") pod \"keystone-698bf66db7-q4nv6\" (UID: \"4d77c8cd-d0c5-4bb9-84a3-e3a00f7c9a99\") " pod="openstack/keystone-698bf66db7-q4nv6" Nov 28 10:17:04 crc kubenswrapper[4838]: I1128 10:17:04.831782 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f4ad309b-1078-40e9-abd7-d1b476971fce-config-data\") pod \"placement-7f5f5f8b64-f2wff\" (UID: \"f4ad309b-1078-40e9-abd7-d1b476971fce\") " pod="openstack/placement-7f5f5f8b64-f2wff" Nov 28 10:17:04 crc kubenswrapper[4838]: I1128 10:17:04.831804 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/4d77c8cd-d0c5-4bb9-84a3-e3a00f7c9a99-fernet-keys\") pod \"keystone-698bf66db7-q4nv6\" (UID: \"4d77c8cd-d0c5-4bb9-84a3-e3a00f7c9a99\") " pod="openstack/keystone-698bf66db7-q4nv6" Nov 28 10:17:04 crc kubenswrapper[4838]: I1128 10:17:04.831818 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/4d77c8cd-d0c5-4bb9-84a3-e3a00f7c9a99-internal-tls-certs\") pod \"keystone-698bf66db7-q4nv6\" (UID: \"4d77c8cd-d0c5-4bb9-84a3-e3a00f7c9a99\") " pod="openstack/keystone-698bf66db7-q4nv6" Nov 28 10:17:04 crc kubenswrapper[4838]: I1128 10:17:04.831843 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f4ad309b-1078-40e9-abd7-d1b476971fce-logs\") pod \"placement-7f5f5f8b64-f2wff\" (UID: \"f4ad309b-1078-40e9-abd7-d1b476971fce\") " pod="openstack/placement-7f5f5f8b64-f2wff" Nov 28 10:17:04 crc kubenswrapper[4838]: I1128 10:17:04.831871 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/f4ad309b-1078-40e9-abd7-d1b476971fce-public-tls-certs\") pod \"placement-7f5f5f8b64-f2wff\" (UID: \"f4ad309b-1078-40e9-abd7-d1b476971fce\") " pod="openstack/placement-7f5f5f8b64-f2wff" Nov 28 10:17:04 crc kubenswrapper[4838]: I1128 10:17:04.831886 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4d77c8cd-d0c5-4bb9-84a3-e3a00f7c9a99-config-data\") pod \"keystone-698bf66db7-q4nv6\" (UID: \"4d77c8cd-d0c5-4bb9-84a3-e3a00f7c9a99\") " pod="openstack/keystone-698bf66db7-q4nv6" Nov 28 10:17:04 crc kubenswrapper[4838]: I1128 10:17:04.831901 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f4ad309b-1078-40e9-abd7-d1b476971fce-scripts\") pod \"placement-7f5f5f8b64-f2wff\" (UID: \"f4ad309b-1078-40e9-abd7-d1b476971fce\") " pod="openstack/placement-7f5f5f8b64-f2wff" Nov 28 10:17:04 crc kubenswrapper[4838]: I1128 10:17:04.831936 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kpvkw\" (UniqueName: \"kubernetes.io/projected/4d77c8cd-d0c5-4bb9-84a3-e3a00f7c9a99-kube-api-access-kpvkw\") pod \"keystone-698bf66db7-q4nv6\" (UID: \"4d77c8cd-d0c5-4bb9-84a3-e3a00f7c9a99\") " pod="openstack/keystone-698bf66db7-q4nv6" Nov 28 10:17:04 crc kubenswrapper[4838]: I1128 10:17:04.831969 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f4ad309b-1078-40e9-abd7-d1b476971fce-combined-ca-bundle\") pod \"placement-7f5f5f8b64-f2wff\" (UID: \"f4ad309b-1078-40e9-abd7-d1b476971fce\") " pod="openstack/placement-7f5f5f8b64-f2wff" Nov 28 10:17:04 crc kubenswrapper[4838]: I1128 10:17:04.831989 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4d77c8cd-d0c5-4bb9-84a3-e3a00f7c9a99-scripts\") pod \"keystone-698bf66db7-q4nv6\" (UID: \"4d77c8cd-d0c5-4bb9-84a3-e3a00f7c9a99\") " pod="openstack/keystone-698bf66db7-q4nv6" Nov 28 10:17:04 crc kubenswrapper[4838]: I1128 10:17:04.832009 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4d77c8cd-d0c5-4bb9-84a3-e3a00f7c9a99-combined-ca-bundle\") pod \"keystone-698bf66db7-q4nv6\" (UID: \"4d77c8cd-d0c5-4bb9-84a3-e3a00f7c9a99\") " pod="openstack/keystone-698bf66db7-q4nv6" Nov 28 10:17:04 crc kubenswrapper[4838]: I1128 10:17:04.832035 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/f4ad309b-1078-40e9-abd7-d1b476971fce-internal-tls-certs\") pod \"placement-7f5f5f8b64-f2wff\" (UID: \"f4ad309b-1078-40e9-abd7-d1b476971fce\") " pod="openstack/placement-7f5f5f8b64-f2wff" Nov 28 10:17:04 crc kubenswrapper[4838]: I1128 10:17:04.832072 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/4d77c8cd-d0c5-4bb9-84a3-e3a00f7c9a99-credential-keys\") pod \"keystone-698bf66db7-q4nv6\" (UID: \"4d77c8cd-d0c5-4bb9-84a3-e3a00f7c9a99\") " pod="openstack/keystone-698bf66db7-q4nv6" Nov 28 10:17:04 crc kubenswrapper[4838]: I1128 10:17:04.832094 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bltf5\" (UniqueName: \"kubernetes.io/projected/f4ad309b-1078-40e9-abd7-d1b476971fce-kube-api-access-bltf5\") pod \"placement-7f5f5f8b64-f2wff\" (UID: \"f4ad309b-1078-40e9-abd7-d1b476971fce\") " pod="openstack/placement-7f5f5f8b64-f2wff" Nov 28 10:17:04 crc kubenswrapper[4838]: I1128 10:17:04.834960 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f4ad309b-1078-40e9-abd7-d1b476971fce-logs\") pod \"placement-7f5f5f8b64-f2wff\" (UID: \"f4ad309b-1078-40e9-abd7-d1b476971fce\") " pod="openstack/placement-7f5f5f8b64-f2wff" Nov 28 10:17:04 crc kubenswrapper[4838]: I1128 10:17:04.843917 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/4d77c8cd-d0c5-4bb9-84a3-e3a00f7c9a99-public-tls-certs\") pod \"keystone-698bf66db7-q4nv6\" (UID: \"4d77c8cd-d0c5-4bb9-84a3-e3a00f7c9a99\") " pod="openstack/keystone-698bf66db7-q4nv6" Nov 28 10:17:04 crc kubenswrapper[4838]: I1128 10:17:04.845448 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/4d77c8cd-d0c5-4bb9-84a3-e3a00f7c9a99-fernet-keys\") pod \"keystone-698bf66db7-q4nv6\" (UID: \"4d77c8cd-d0c5-4bb9-84a3-e3a00f7c9a99\") " pod="openstack/keystone-698bf66db7-q4nv6" Nov 28 10:17:04 crc kubenswrapper[4838]: I1128 10:17:04.846096 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4d77c8cd-d0c5-4bb9-84a3-e3a00f7c9a99-config-data\") pod \"keystone-698bf66db7-q4nv6\" (UID: \"4d77c8cd-d0c5-4bb9-84a3-e3a00f7c9a99\") " pod="openstack/keystone-698bf66db7-q4nv6" Nov 28 10:17:04 crc kubenswrapper[4838]: I1128 10:17:04.849499 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f4ad309b-1078-40e9-abd7-d1b476971fce-config-data\") pod \"placement-7f5f5f8b64-f2wff\" (UID: \"f4ad309b-1078-40e9-abd7-d1b476971fce\") " pod="openstack/placement-7f5f5f8b64-f2wff" Nov 28 10:17:04 crc kubenswrapper[4838]: I1128 10:17:04.849876 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/4d77c8cd-d0c5-4bb9-84a3-e3a00f7c9a99-internal-tls-certs\") pod \"keystone-698bf66db7-q4nv6\" (UID: \"4d77c8cd-d0c5-4bb9-84a3-e3a00f7c9a99\") " pod="openstack/keystone-698bf66db7-q4nv6" Nov 28 10:17:04 crc kubenswrapper[4838]: I1128 10:17:04.850544 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f4ad309b-1078-40e9-abd7-d1b476971fce-scripts\") pod \"placement-7f5f5f8b64-f2wff\" (UID: \"f4ad309b-1078-40e9-abd7-d1b476971fce\") " pod="openstack/placement-7f5f5f8b64-f2wff" Nov 28 10:17:04 crc kubenswrapper[4838]: I1128 10:17:04.851455 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/f4ad309b-1078-40e9-abd7-d1b476971fce-public-tls-certs\") pod \"placement-7f5f5f8b64-f2wff\" (UID: \"f4ad309b-1078-40e9-abd7-d1b476971fce\") " pod="openstack/placement-7f5f5f8b64-f2wff" Nov 28 10:17:04 crc kubenswrapper[4838]: I1128 10:17:04.856652 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/4d77c8cd-d0c5-4bb9-84a3-e3a00f7c9a99-credential-keys\") pod \"keystone-698bf66db7-q4nv6\" (UID: \"4d77c8cd-d0c5-4bb9-84a3-e3a00f7c9a99\") " pod="openstack/keystone-698bf66db7-q4nv6" Nov 28 10:17:04 crc kubenswrapper[4838]: I1128 10:17:04.856810 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4d77c8cd-d0c5-4bb9-84a3-e3a00f7c9a99-scripts\") pod \"keystone-698bf66db7-q4nv6\" (UID: \"4d77c8cd-d0c5-4bb9-84a3-e3a00f7c9a99\") " pod="openstack/keystone-698bf66db7-q4nv6" Nov 28 10:17:04 crc kubenswrapper[4838]: I1128 10:17:04.858860 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kpvkw\" (UniqueName: \"kubernetes.io/projected/4d77c8cd-d0c5-4bb9-84a3-e3a00f7c9a99-kube-api-access-kpvkw\") pod \"keystone-698bf66db7-q4nv6\" (UID: \"4d77c8cd-d0c5-4bb9-84a3-e3a00f7c9a99\") " pod="openstack/keystone-698bf66db7-q4nv6" Nov 28 10:17:04 crc kubenswrapper[4838]: I1128 10:17:04.859396 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/f4ad309b-1078-40e9-abd7-d1b476971fce-internal-tls-certs\") pod \"placement-7f5f5f8b64-f2wff\" (UID: \"f4ad309b-1078-40e9-abd7-d1b476971fce\") " pod="openstack/placement-7f5f5f8b64-f2wff" Nov 28 10:17:04 crc kubenswrapper[4838]: I1128 10:17:04.861841 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-6669b75dd9-q6nlg"] Nov 28 10:17:04 crc kubenswrapper[4838]: I1128 10:17:04.862857 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bltf5\" (UniqueName: \"kubernetes.io/projected/f4ad309b-1078-40e9-abd7-d1b476971fce-kube-api-access-bltf5\") pod \"placement-7f5f5f8b64-f2wff\" (UID: \"f4ad309b-1078-40e9-abd7-d1b476971fce\") " pod="openstack/placement-7f5f5f8b64-f2wff" Nov 28 10:17:04 crc kubenswrapper[4838]: I1128 10:17:04.870938 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f4ad309b-1078-40e9-abd7-d1b476971fce-combined-ca-bundle\") pod \"placement-7f5f5f8b64-f2wff\" (UID: \"f4ad309b-1078-40e9-abd7-d1b476971fce\") " pod="openstack/placement-7f5f5f8b64-f2wff" Nov 28 10:17:04 crc kubenswrapper[4838]: I1128 10:17:04.885267 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-65687b7854-r7rh5"] Nov 28 10:17:04 crc kubenswrapper[4838]: I1128 10:17:04.886364 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4d77c8cd-d0c5-4bb9-84a3-e3a00f7c9a99-combined-ca-bundle\") pod \"keystone-698bf66db7-q4nv6\" (UID: \"4d77c8cd-d0c5-4bb9-84a3-e3a00f7c9a99\") " pod="openstack/keystone-698bf66db7-q4nv6" Nov 28 10:17:04 crc kubenswrapper[4838]: I1128 10:17:04.933738 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/de6a5f4a-30c6-4f42-88e7-3f113c1ed53b-combined-ca-bundle\") pod \"barbican-keystone-listener-65687b7854-r7rh5\" (UID: \"de6a5f4a-30c6-4f42-88e7-3f113c1ed53b\") " pod="openstack/barbican-keystone-listener-65687b7854-r7rh5" Nov 28 10:17:04 crc kubenswrapper[4838]: I1128 10:17:04.933796 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/de6a5f4a-30c6-4f42-88e7-3f113c1ed53b-logs\") pod \"barbican-keystone-listener-65687b7854-r7rh5\" (UID: \"de6a5f4a-30c6-4f42-88e7-3f113c1ed53b\") " pod="openstack/barbican-keystone-listener-65687b7854-r7rh5" Nov 28 10:17:04 crc kubenswrapper[4838]: I1128 10:17:04.933846 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/39e38efd-bd92-419d-90e8-f6630032e7d7-config-data-custom\") pod \"barbican-worker-6669b75dd9-q6nlg\" (UID: \"39e38efd-bd92-419d-90e8-f6630032e7d7\") " pod="openstack/barbican-worker-6669b75dd9-q6nlg" Nov 28 10:17:04 crc kubenswrapper[4838]: I1128 10:17:04.933881 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/de6a5f4a-30c6-4f42-88e7-3f113c1ed53b-config-data-custom\") pod \"barbican-keystone-listener-65687b7854-r7rh5\" (UID: \"de6a5f4a-30c6-4f42-88e7-3f113c1ed53b\") " pod="openstack/barbican-keystone-listener-65687b7854-r7rh5" Nov 28 10:17:04 crc kubenswrapper[4838]: I1128 10:17:04.933944 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/39e38efd-bd92-419d-90e8-f6630032e7d7-combined-ca-bundle\") pod \"barbican-worker-6669b75dd9-q6nlg\" (UID: \"39e38efd-bd92-419d-90e8-f6630032e7d7\") " pod="openstack/barbican-worker-6669b75dd9-q6nlg" Nov 28 10:17:04 crc kubenswrapper[4838]: I1128 10:17:04.933989 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/39e38efd-bd92-419d-90e8-f6630032e7d7-config-data\") pod \"barbican-worker-6669b75dd9-q6nlg\" (UID: \"39e38efd-bd92-419d-90e8-f6630032e7d7\") " pod="openstack/barbican-worker-6669b75dd9-q6nlg" Nov 28 10:17:04 crc kubenswrapper[4838]: I1128 10:17:04.934016 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/de6a5f4a-30c6-4f42-88e7-3f113c1ed53b-config-data\") pod \"barbican-keystone-listener-65687b7854-r7rh5\" (UID: \"de6a5f4a-30c6-4f42-88e7-3f113c1ed53b\") " pod="openstack/barbican-keystone-listener-65687b7854-r7rh5" Nov 28 10:17:04 crc kubenswrapper[4838]: I1128 10:17:04.934071 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8cjf8\" (UniqueName: \"kubernetes.io/projected/de6a5f4a-30c6-4f42-88e7-3f113c1ed53b-kube-api-access-8cjf8\") pod \"barbican-keystone-listener-65687b7854-r7rh5\" (UID: \"de6a5f4a-30c6-4f42-88e7-3f113c1ed53b\") " pod="openstack/barbican-keystone-listener-65687b7854-r7rh5" Nov 28 10:17:04 crc kubenswrapper[4838]: I1128 10:17:04.934102 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ntlff\" (UniqueName: \"kubernetes.io/projected/39e38efd-bd92-419d-90e8-f6630032e7d7-kube-api-access-ntlff\") pod \"barbican-worker-6669b75dd9-q6nlg\" (UID: \"39e38efd-bd92-419d-90e8-f6630032e7d7\") " pod="openstack/barbican-worker-6669b75dd9-q6nlg" Nov 28 10:17:04 crc kubenswrapper[4838]: I1128 10:17:04.934138 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/39e38efd-bd92-419d-90e8-f6630032e7d7-logs\") pod \"barbican-worker-6669b75dd9-q6nlg\" (UID: \"39e38efd-bd92-419d-90e8-f6630032e7d7\") " pod="openstack/barbican-worker-6669b75dd9-q6nlg" Nov 28 10:17:04 crc kubenswrapper[4838]: I1128 10:17:04.947544 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-699df9757c-dvp4f"] Nov 28 10:17:04 crc kubenswrapper[4838]: I1128 10:17:04.949058 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-699df9757c-dvp4f" Nov 28 10:17:04 crc kubenswrapper[4838]: I1128 10:17:04.961636 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-7f5f5f8b64-f2wff" Nov 28 10:17:04 crc kubenswrapper[4838]: I1128 10:17:04.976429 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-698bf66db7-q4nv6" Nov 28 10:17:04 crc kubenswrapper[4838]: I1128 10:17:04.983375 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-699df9757c-dvp4f"] Nov 28 10:17:05 crc kubenswrapper[4838]: I1128 10:17:05.012337 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-9858c78b6-nx8kz"] Nov 28 10:17:05 crc kubenswrapper[4838]: I1128 10:17:05.013805 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-9858c78b6-nx8kz" Nov 28 10:17:05 crc kubenswrapper[4838]: I1128 10:17:05.018554 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-api-config-data" Nov 28 10:17:05 crc kubenswrapper[4838]: I1128 10:17:05.035380 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ntlff\" (UniqueName: \"kubernetes.io/projected/39e38efd-bd92-419d-90e8-f6630032e7d7-kube-api-access-ntlff\") pod \"barbican-worker-6669b75dd9-q6nlg\" (UID: \"39e38efd-bd92-419d-90e8-f6630032e7d7\") " pod="openstack/barbican-worker-6669b75dd9-q6nlg" Nov 28 10:17:05 crc kubenswrapper[4838]: I1128 10:17:05.035441 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/39e38efd-bd92-419d-90e8-f6630032e7d7-logs\") pod \"barbican-worker-6669b75dd9-q6nlg\" (UID: \"39e38efd-bd92-419d-90e8-f6630032e7d7\") " pod="openstack/barbican-worker-6669b75dd9-q6nlg" Nov 28 10:17:05 crc kubenswrapper[4838]: I1128 10:17:05.035469 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/72b3211c-56cf-4987-9a43-2021dea17799-config\") pod \"dnsmasq-dns-699df9757c-dvp4f\" (UID: \"72b3211c-56cf-4987-9a43-2021dea17799\") " pod="openstack/dnsmasq-dns-699df9757c-dvp4f" Nov 28 10:17:05 crc kubenswrapper[4838]: I1128 10:17:05.035490 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mlknw\" (UniqueName: \"kubernetes.io/projected/72b3211c-56cf-4987-9a43-2021dea17799-kube-api-access-mlknw\") pod \"dnsmasq-dns-699df9757c-dvp4f\" (UID: \"72b3211c-56cf-4987-9a43-2021dea17799\") " pod="openstack/dnsmasq-dns-699df9757c-dvp4f" Nov 28 10:17:05 crc kubenswrapper[4838]: I1128 10:17:05.035527 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/de6a5f4a-30c6-4f42-88e7-3f113c1ed53b-combined-ca-bundle\") pod \"barbican-keystone-listener-65687b7854-r7rh5\" (UID: \"de6a5f4a-30c6-4f42-88e7-3f113c1ed53b\") " pod="openstack/barbican-keystone-listener-65687b7854-r7rh5" Nov 28 10:17:05 crc kubenswrapper[4838]: I1128 10:17:05.035544 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/de6a5f4a-30c6-4f42-88e7-3f113c1ed53b-logs\") pod \"barbican-keystone-listener-65687b7854-r7rh5\" (UID: \"de6a5f4a-30c6-4f42-88e7-3f113c1ed53b\") " pod="openstack/barbican-keystone-listener-65687b7854-r7rh5" Nov 28 10:17:05 crc kubenswrapper[4838]: I1128 10:17:05.035575 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/39e38efd-bd92-419d-90e8-f6630032e7d7-config-data-custom\") pod \"barbican-worker-6669b75dd9-q6nlg\" (UID: \"39e38efd-bd92-419d-90e8-f6630032e7d7\") " pod="openstack/barbican-worker-6669b75dd9-q6nlg" Nov 28 10:17:05 crc kubenswrapper[4838]: I1128 10:17:05.035598 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/72b3211c-56cf-4987-9a43-2021dea17799-ovsdbserver-sb\") pod \"dnsmasq-dns-699df9757c-dvp4f\" (UID: \"72b3211c-56cf-4987-9a43-2021dea17799\") " pod="openstack/dnsmasq-dns-699df9757c-dvp4f" Nov 28 10:17:05 crc kubenswrapper[4838]: I1128 10:17:05.035616 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/de6a5f4a-30c6-4f42-88e7-3f113c1ed53b-config-data-custom\") pod \"barbican-keystone-listener-65687b7854-r7rh5\" (UID: \"de6a5f4a-30c6-4f42-88e7-3f113c1ed53b\") " pod="openstack/barbican-keystone-listener-65687b7854-r7rh5" Nov 28 10:17:05 crc kubenswrapper[4838]: I1128 10:17:05.035648 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/72b3211c-56cf-4987-9a43-2021dea17799-dns-svc\") pod \"dnsmasq-dns-699df9757c-dvp4f\" (UID: \"72b3211c-56cf-4987-9a43-2021dea17799\") " pod="openstack/dnsmasq-dns-699df9757c-dvp4f" Nov 28 10:17:05 crc kubenswrapper[4838]: I1128 10:17:05.035671 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/72b3211c-56cf-4987-9a43-2021dea17799-ovsdbserver-nb\") pod \"dnsmasq-dns-699df9757c-dvp4f\" (UID: \"72b3211c-56cf-4987-9a43-2021dea17799\") " pod="openstack/dnsmasq-dns-699df9757c-dvp4f" Nov 28 10:17:05 crc kubenswrapper[4838]: I1128 10:17:05.035694 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/39e38efd-bd92-419d-90e8-f6630032e7d7-combined-ca-bundle\") pod \"barbican-worker-6669b75dd9-q6nlg\" (UID: \"39e38efd-bd92-419d-90e8-f6630032e7d7\") " pod="openstack/barbican-worker-6669b75dd9-q6nlg" Nov 28 10:17:05 crc kubenswrapper[4838]: I1128 10:17:05.035733 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/39e38efd-bd92-419d-90e8-f6630032e7d7-config-data\") pod \"barbican-worker-6669b75dd9-q6nlg\" (UID: \"39e38efd-bd92-419d-90e8-f6630032e7d7\") " pod="openstack/barbican-worker-6669b75dd9-q6nlg" Nov 28 10:17:05 crc kubenswrapper[4838]: I1128 10:17:05.035753 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/de6a5f4a-30c6-4f42-88e7-3f113c1ed53b-config-data\") pod \"barbican-keystone-listener-65687b7854-r7rh5\" (UID: \"de6a5f4a-30c6-4f42-88e7-3f113c1ed53b\") " pod="openstack/barbican-keystone-listener-65687b7854-r7rh5" Nov 28 10:17:05 crc kubenswrapper[4838]: I1128 10:17:05.035773 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8cjf8\" (UniqueName: \"kubernetes.io/projected/de6a5f4a-30c6-4f42-88e7-3f113c1ed53b-kube-api-access-8cjf8\") pod \"barbican-keystone-listener-65687b7854-r7rh5\" (UID: \"de6a5f4a-30c6-4f42-88e7-3f113c1ed53b\") " pod="openstack/barbican-keystone-listener-65687b7854-r7rh5" Nov 28 10:17:05 crc kubenswrapper[4838]: I1128 10:17:05.040090 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/39e38efd-bd92-419d-90e8-f6630032e7d7-logs\") pod \"barbican-worker-6669b75dd9-q6nlg\" (UID: \"39e38efd-bd92-419d-90e8-f6630032e7d7\") " pod="openstack/barbican-worker-6669b75dd9-q6nlg" Nov 28 10:17:05 crc kubenswrapper[4838]: I1128 10:17:05.045000 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/de6a5f4a-30c6-4f42-88e7-3f113c1ed53b-logs\") pod \"barbican-keystone-listener-65687b7854-r7rh5\" (UID: \"de6a5f4a-30c6-4f42-88e7-3f113c1ed53b\") " pod="openstack/barbican-keystone-listener-65687b7854-r7rh5" Nov 28 10:17:05 crc kubenswrapper[4838]: I1128 10:17:05.047610 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/39e38efd-bd92-419d-90e8-f6630032e7d7-config-data\") pod \"barbican-worker-6669b75dd9-q6nlg\" (UID: \"39e38efd-bd92-419d-90e8-f6630032e7d7\") " pod="openstack/barbican-worker-6669b75dd9-q6nlg" Nov 28 10:17:05 crc kubenswrapper[4838]: I1128 10:17:05.051178 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/de6a5f4a-30c6-4f42-88e7-3f113c1ed53b-config-data-custom\") pod \"barbican-keystone-listener-65687b7854-r7rh5\" (UID: \"de6a5f4a-30c6-4f42-88e7-3f113c1ed53b\") " pod="openstack/barbican-keystone-listener-65687b7854-r7rh5" Nov 28 10:17:05 crc kubenswrapper[4838]: I1128 10:17:05.052556 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/de6a5f4a-30c6-4f42-88e7-3f113c1ed53b-config-data\") pod \"barbican-keystone-listener-65687b7854-r7rh5\" (UID: \"de6a5f4a-30c6-4f42-88e7-3f113c1ed53b\") " pod="openstack/barbican-keystone-listener-65687b7854-r7rh5" Nov 28 10:17:05 crc kubenswrapper[4838]: I1128 10:17:05.061118 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/39e38efd-bd92-419d-90e8-f6630032e7d7-config-data-custom\") pod \"barbican-worker-6669b75dd9-q6nlg\" (UID: \"39e38efd-bd92-419d-90e8-f6630032e7d7\") " pod="openstack/barbican-worker-6669b75dd9-q6nlg" Nov 28 10:17:05 crc kubenswrapper[4838]: I1128 10:17:05.062192 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-9858c78b6-nx8kz"] Nov 28 10:17:05 crc kubenswrapper[4838]: I1128 10:17:05.064368 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/39e38efd-bd92-419d-90e8-f6630032e7d7-combined-ca-bundle\") pod \"barbican-worker-6669b75dd9-q6nlg\" (UID: \"39e38efd-bd92-419d-90e8-f6630032e7d7\") " pod="openstack/barbican-worker-6669b75dd9-q6nlg" Nov 28 10:17:05 crc kubenswrapper[4838]: I1128 10:17:05.065377 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/de6a5f4a-30c6-4f42-88e7-3f113c1ed53b-combined-ca-bundle\") pod \"barbican-keystone-listener-65687b7854-r7rh5\" (UID: \"de6a5f4a-30c6-4f42-88e7-3f113c1ed53b\") " pod="openstack/barbican-keystone-listener-65687b7854-r7rh5" Nov 28 10:17:05 crc kubenswrapper[4838]: I1128 10:17:05.082640 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8cjf8\" (UniqueName: \"kubernetes.io/projected/de6a5f4a-30c6-4f42-88e7-3f113c1ed53b-kube-api-access-8cjf8\") pod \"barbican-keystone-listener-65687b7854-r7rh5\" (UID: \"de6a5f4a-30c6-4f42-88e7-3f113c1ed53b\") " pod="openstack/barbican-keystone-listener-65687b7854-r7rh5" Nov 28 10:17:05 crc kubenswrapper[4838]: I1128 10:17:05.089384 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ntlff\" (UniqueName: \"kubernetes.io/projected/39e38efd-bd92-419d-90e8-f6630032e7d7-kube-api-access-ntlff\") pod \"barbican-worker-6669b75dd9-q6nlg\" (UID: \"39e38efd-bd92-419d-90e8-f6630032e7d7\") " pod="openstack/barbican-worker-6669b75dd9-q6nlg" Nov 28 10:17:05 crc kubenswrapper[4838]: I1128 10:17:05.132227 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-6669b75dd9-q6nlg" Nov 28 10:17:05 crc kubenswrapper[4838]: I1128 10:17:05.138318 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/72b3211c-56cf-4987-9a43-2021dea17799-ovsdbserver-sb\") pod \"dnsmasq-dns-699df9757c-dvp4f\" (UID: \"72b3211c-56cf-4987-9a43-2021dea17799\") " pod="openstack/dnsmasq-dns-699df9757c-dvp4f" Nov 28 10:17:05 crc kubenswrapper[4838]: I1128 10:17:05.138376 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/72b3211c-56cf-4987-9a43-2021dea17799-dns-svc\") pod \"dnsmasq-dns-699df9757c-dvp4f\" (UID: \"72b3211c-56cf-4987-9a43-2021dea17799\") " pod="openstack/dnsmasq-dns-699df9757c-dvp4f" Nov 28 10:17:05 crc kubenswrapper[4838]: I1128 10:17:05.138402 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z67wp\" (UniqueName: \"kubernetes.io/projected/57292635-e413-4511-830d-e536d3c8e398-kube-api-access-z67wp\") pod \"barbican-api-9858c78b6-nx8kz\" (UID: \"57292635-e413-4511-830d-e536d3c8e398\") " pod="openstack/barbican-api-9858c78b6-nx8kz" Nov 28 10:17:05 crc kubenswrapper[4838]: I1128 10:17:05.138422 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/72b3211c-56cf-4987-9a43-2021dea17799-ovsdbserver-nb\") pod \"dnsmasq-dns-699df9757c-dvp4f\" (UID: \"72b3211c-56cf-4987-9a43-2021dea17799\") " pod="openstack/dnsmasq-dns-699df9757c-dvp4f" Nov 28 10:17:05 crc kubenswrapper[4838]: I1128 10:17:05.138438 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/57292635-e413-4511-830d-e536d3c8e398-logs\") pod \"barbican-api-9858c78b6-nx8kz\" (UID: \"57292635-e413-4511-830d-e536d3c8e398\") " pod="openstack/barbican-api-9858c78b6-nx8kz" Nov 28 10:17:05 crc kubenswrapper[4838]: I1128 10:17:05.138470 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/57292635-e413-4511-830d-e536d3c8e398-combined-ca-bundle\") pod \"barbican-api-9858c78b6-nx8kz\" (UID: \"57292635-e413-4511-830d-e536d3c8e398\") " pod="openstack/barbican-api-9858c78b6-nx8kz" Nov 28 10:17:05 crc kubenswrapper[4838]: I1128 10:17:05.138526 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/72b3211c-56cf-4987-9a43-2021dea17799-config\") pod \"dnsmasq-dns-699df9757c-dvp4f\" (UID: \"72b3211c-56cf-4987-9a43-2021dea17799\") " pod="openstack/dnsmasq-dns-699df9757c-dvp4f" Nov 28 10:17:05 crc kubenswrapper[4838]: I1128 10:17:05.138545 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mlknw\" (UniqueName: \"kubernetes.io/projected/72b3211c-56cf-4987-9a43-2021dea17799-kube-api-access-mlknw\") pod \"dnsmasq-dns-699df9757c-dvp4f\" (UID: \"72b3211c-56cf-4987-9a43-2021dea17799\") " pod="openstack/dnsmasq-dns-699df9757c-dvp4f" Nov 28 10:17:05 crc kubenswrapper[4838]: I1128 10:17:05.138560 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/57292635-e413-4511-830d-e536d3c8e398-config-data\") pod \"barbican-api-9858c78b6-nx8kz\" (UID: \"57292635-e413-4511-830d-e536d3c8e398\") " pod="openstack/barbican-api-9858c78b6-nx8kz" Nov 28 10:17:05 crc kubenswrapper[4838]: I1128 10:17:05.138609 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/57292635-e413-4511-830d-e536d3c8e398-config-data-custom\") pod \"barbican-api-9858c78b6-nx8kz\" (UID: \"57292635-e413-4511-830d-e536d3c8e398\") " pod="openstack/barbican-api-9858c78b6-nx8kz" Nov 28 10:17:05 crc kubenswrapper[4838]: I1128 10:17:05.139400 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/72b3211c-56cf-4987-9a43-2021dea17799-ovsdbserver-sb\") pod \"dnsmasq-dns-699df9757c-dvp4f\" (UID: \"72b3211c-56cf-4987-9a43-2021dea17799\") " pod="openstack/dnsmasq-dns-699df9757c-dvp4f" Nov 28 10:17:05 crc kubenswrapper[4838]: I1128 10:17:05.140575 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/72b3211c-56cf-4987-9a43-2021dea17799-ovsdbserver-nb\") pod \"dnsmasq-dns-699df9757c-dvp4f\" (UID: \"72b3211c-56cf-4987-9a43-2021dea17799\") " pod="openstack/dnsmasq-dns-699df9757c-dvp4f" Nov 28 10:17:05 crc kubenswrapper[4838]: I1128 10:17:05.141134 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/72b3211c-56cf-4987-9a43-2021dea17799-config\") pod \"dnsmasq-dns-699df9757c-dvp4f\" (UID: \"72b3211c-56cf-4987-9a43-2021dea17799\") " pod="openstack/dnsmasq-dns-699df9757c-dvp4f" Nov 28 10:17:05 crc kubenswrapper[4838]: I1128 10:17:05.143782 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/72b3211c-56cf-4987-9a43-2021dea17799-dns-svc\") pod \"dnsmasq-dns-699df9757c-dvp4f\" (UID: \"72b3211c-56cf-4987-9a43-2021dea17799\") " pod="openstack/dnsmasq-dns-699df9757c-dvp4f" Nov 28 10:17:05 crc kubenswrapper[4838]: I1128 10:17:05.146958 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-699df9757c-dvp4f"] Nov 28 10:17:05 crc kubenswrapper[4838]: E1128 10:17:05.147517 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[kube-api-access-mlknw], unattached volumes=[], failed to process volumes=[]: context canceled" pod="openstack/dnsmasq-dns-699df9757c-dvp4f" podUID="72b3211c-56cf-4987-9a43-2021dea17799" Nov 28 10:17:05 crc kubenswrapper[4838]: I1128 10:17:05.177470 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6bb684768f-7wcp7"] Nov 28 10:17:05 crc kubenswrapper[4838]: I1128 10:17:05.181371 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6bb684768f-7wcp7" Nov 28 10:17:05 crc kubenswrapper[4838]: I1128 10:17:05.185907 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mlknw\" (UniqueName: \"kubernetes.io/projected/72b3211c-56cf-4987-9a43-2021dea17799-kube-api-access-mlknw\") pod \"dnsmasq-dns-699df9757c-dvp4f\" (UID: \"72b3211c-56cf-4987-9a43-2021dea17799\") " pod="openstack/dnsmasq-dns-699df9757c-dvp4f" Nov 28 10:17:05 crc kubenswrapper[4838]: I1128 10:17:05.203998 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-7ff57884d4-gkgfr"] Nov 28 10:17:05 crc kubenswrapper[4838]: I1128 10:17:05.205625 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-7ff57884d4-gkgfr" Nov 28 10:17:05 crc kubenswrapper[4838]: I1128 10:17:05.213078 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-ovndbs" Nov 28 10:17:05 crc kubenswrapper[4838]: I1128 10:17:05.213521 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-2w4fz" Nov 28 10:17:05 crc kubenswrapper[4838]: I1128 10:17:05.213701 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Nov 28 10:17:05 crc kubenswrapper[4838]: I1128 10:17:05.213982 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Nov 28 10:17:05 crc kubenswrapper[4838]: I1128 10:17:05.232430 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6bb684768f-7wcp7"] Nov 28 10:17:05 crc kubenswrapper[4838]: I1128 10:17:05.244422 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8j7vg\" (UniqueName: \"kubernetes.io/projected/3eeec0e8-8302-414a-882e-22921c0e0872-kube-api-access-8j7vg\") pod \"dnsmasq-dns-6bb684768f-7wcp7\" (UID: \"3eeec0e8-8302-414a-882e-22921c0e0872\") " pod="openstack/dnsmasq-dns-6bb684768f-7wcp7" Nov 28 10:17:05 crc kubenswrapper[4838]: I1128 10:17:05.244467 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3eeec0e8-8302-414a-882e-22921c0e0872-dns-svc\") pod \"dnsmasq-dns-6bb684768f-7wcp7\" (UID: \"3eeec0e8-8302-414a-882e-22921c0e0872\") " pod="openstack/dnsmasq-dns-6bb684768f-7wcp7" Nov 28 10:17:05 crc kubenswrapper[4838]: I1128 10:17:05.244505 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/3eeec0e8-8302-414a-882e-22921c0e0872-ovsdbserver-sb\") pod \"dnsmasq-dns-6bb684768f-7wcp7\" (UID: \"3eeec0e8-8302-414a-882e-22921c0e0872\") " pod="openstack/dnsmasq-dns-6bb684768f-7wcp7" Nov 28 10:17:05 crc kubenswrapper[4838]: I1128 10:17:05.244536 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3eeec0e8-8302-414a-882e-22921c0e0872-config\") pod \"dnsmasq-dns-6bb684768f-7wcp7\" (UID: \"3eeec0e8-8302-414a-882e-22921c0e0872\") " pod="openstack/dnsmasq-dns-6bb684768f-7wcp7" Nov 28 10:17:05 crc kubenswrapper[4838]: I1128 10:17:05.244561 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/57292635-e413-4511-830d-e536d3c8e398-config-data\") pod \"barbican-api-9858c78b6-nx8kz\" (UID: \"57292635-e413-4511-830d-e536d3c8e398\") " pod="openstack/barbican-api-9858c78b6-nx8kz" Nov 28 10:17:05 crc kubenswrapper[4838]: I1128 10:17:05.244600 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3eeec0e8-8302-414a-882e-22921c0e0872-ovsdbserver-nb\") pod \"dnsmasq-dns-6bb684768f-7wcp7\" (UID: \"3eeec0e8-8302-414a-882e-22921c0e0872\") " pod="openstack/dnsmasq-dns-6bb684768f-7wcp7" Nov 28 10:17:05 crc kubenswrapper[4838]: I1128 10:17:05.244640 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/57292635-e413-4511-830d-e536d3c8e398-config-data-custom\") pod \"barbican-api-9858c78b6-nx8kz\" (UID: \"57292635-e413-4511-830d-e536d3c8e398\") " pod="openstack/barbican-api-9858c78b6-nx8kz" Nov 28 10:17:05 crc kubenswrapper[4838]: I1128 10:17:05.244689 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z67wp\" (UniqueName: \"kubernetes.io/projected/57292635-e413-4511-830d-e536d3c8e398-kube-api-access-z67wp\") pod \"barbican-api-9858c78b6-nx8kz\" (UID: \"57292635-e413-4511-830d-e536d3c8e398\") " pod="openstack/barbican-api-9858c78b6-nx8kz" Nov 28 10:17:05 crc kubenswrapper[4838]: I1128 10:17:05.244726 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/57292635-e413-4511-830d-e536d3c8e398-logs\") pod \"barbican-api-9858c78b6-nx8kz\" (UID: \"57292635-e413-4511-830d-e536d3c8e398\") " pod="openstack/barbican-api-9858c78b6-nx8kz" Nov 28 10:17:05 crc kubenswrapper[4838]: I1128 10:17:05.244759 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/57292635-e413-4511-830d-e536d3c8e398-combined-ca-bundle\") pod \"barbican-api-9858c78b6-nx8kz\" (UID: \"57292635-e413-4511-830d-e536d3c8e398\") " pod="openstack/barbican-api-9858c78b6-nx8kz" Nov 28 10:17:05 crc kubenswrapper[4838]: I1128 10:17:05.249238 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/57292635-e413-4511-830d-e536d3c8e398-combined-ca-bundle\") pod \"barbican-api-9858c78b6-nx8kz\" (UID: \"57292635-e413-4511-830d-e536d3c8e398\") " pod="openstack/barbican-api-9858c78b6-nx8kz" Nov 28 10:17:05 crc kubenswrapper[4838]: I1128 10:17:05.249888 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/57292635-e413-4511-830d-e536d3c8e398-logs\") pod \"barbican-api-9858c78b6-nx8kz\" (UID: \"57292635-e413-4511-830d-e536d3c8e398\") " pod="openstack/barbican-api-9858c78b6-nx8kz" Nov 28 10:17:05 crc kubenswrapper[4838]: I1128 10:17:05.263481 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/57292635-e413-4511-830d-e536d3c8e398-config-data\") pod \"barbican-api-9858c78b6-nx8kz\" (UID: \"57292635-e413-4511-830d-e536d3c8e398\") " pod="openstack/barbican-api-9858c78b6-nx8kz" Nov 28 10:17:05 crc kubenswrapper[4838]: I1128 10:17:05.272798 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-65687b7854-r7rh5" Nov 28 10:17:05 crc kubenswrapper[4838]: I1128 10:17:05.291474 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z67wp\" (UniqueName: \"kubernetes.io/projected/57292635-e413-4511-830d-e536d3c8e398-kube-api-access-z67wp\") pod \"barbican-api-9858c78b6-nx8kz\" (UID: \"57292635-e413-4511-830d-e536d3c8e398\") " pod="openstack/barbican-api-9858c78b6-nx8kz" Nov 28 10:17:05 crc kubenswrapper[4838]: I1128 10:17:05.292972 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/57292635-e413-4511-830d-e536d3c8e398-config-data-custom\") pod \"barbican-api-9858c78b6-nx8kz\" (UID: \"57292635-e413-4511-830d-e536d3c8e398\") " pod="openstack/barbican-api-9858c78b6-nx8kz" Nov 28 10:17:05 crc kubenswrapper[4838]: I1128 10:17:05.324455 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-7ff57884d4-gkgfr"] Nov 28 10:17:05 crc kubenswrapper[4838]: I1128 10:17:05.347830 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9f301ff0-c619-4fb2-a1e6-a7254b39e13f-combined-ca-bundle\") pod \"neutron-7ff57884d4-gkgfr\" (UID: \"9f301ff0-c619-4fb2-a1e6-a7254b39e13f\") " pod="openstack/neutron-7ff57884d4-gkgfr" Nov 28 10:17:05 crc kubenswrapper[4838]: I1128 10:17:05.347901 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/9f301ff0-c619-4fb2-a1e6-a7254b39e13f-httpd-config\") pod \"neutron-7ff57884d4-gkgfr\" (UID: \"9f301ff0-c619-4fb2-a1e6-a7254b39e13f\") " pod="openstack/neutron-7ff57884d4-gkgfr" Nov 28 10:17:05 crc kubenswrapper[4838]: I1128 10:17:05.347997 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/9f301ff0-c619-4fb2-a1e6-a7254b39e13f-config\") pod \"neutron-7ff57884d4-gkgfr\" (UID: \"9f301ff0-c619-4fb2-a1e6-a7254b39e13f\") " pod="openstack/neutron-7ff57884d4-gkgfr" Nov 28 10:17:05 crc kubenswrapper[4838]: I1128 10:17:05.348113 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8j7vg\" (UniqueName: \"kubernetes.io/projected/3eeec0e8-8302-414a-882e-22921c0e0872-kube-api-access-8j7vg\") pod \"dnsmasq-dns-6bb684768f-7wcp7\" (UID: \"3eeec0e8-8302-414a-882e-22921c0e0872\") " pod="openstack/dnsmasq-dns-6bb684768f-7wcp7" Nov 28 10:17:05 crc kubenswrapper[4838]: I1128 10:17:05.348141 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3eeec0e8-8302-414a-882e-22921c0e0872-dns-svc\") pod \"dnsmasq-dns-6bb684768f-7wcp7\" (UID: \"3eeec0e8-8302-414a-882e-22921c0e0872\") " pod="openstack/dnsmasq-dns-6bb684768f-7wcp7" Nov 28 10:17:05 crc kubenswrapper[4838]: I1128 10:17:05.348203 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/3eeec0e8-8302-414a-882e-22921c0e0872-ovsdbserver-sb\") pod \"dnsmasq-dns-6bb684768f-7wcp7\" (UID: \"3eeec0e8-8302-414a-882e-22921c0e0872\") " pod="openstack/dnsmasq-dns-6bb684768f-7wcp7" Nov 28 10:17:05 crc kubenswrapper[4838]: I1128 10:17:05.350703 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3eeec0e8-8302-414a-882e-22921c0e0872-config\") pod \"dnsmasq-dns-6bb684768f-7wcp7\" (UID: \"3eeec0e8-8302-414a-882e-22921c0e0872\") " pod="openstack/dnsmasq-dns-6bb684768f-7wcp7" Nov 28 10:17:05 crc kubenswrapper[4838]: I1128 10:17:05.350877 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/9f301ff0-c619-4fb2-a1e6-a7254b39e13f-ovndb-tls-certs\") pod \"neutron-7ff57884d4-gkgfr\" (UID: \"9f301ff0-c619-4fb2-a1e6-a7254b39e13f\") " pod="openstack/neutron-7ff57884d4-gkgfr" Nov 28 10:17:05 crc kubenswrapper[4838]: I1128 10:17:05.350929 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3eeec0e8-8302-414a-882e-22921c0e0872-ovsdbserver-nb\") pod \"dnsmasq-dns-6bb684768f-7wcp7\" (UID: \"3eeec0e8-8302-414a-882e-22921c0e0872\") " pod="openstack/dnsmasq-dns-6bb684768f-7wcp7" Nov 28 10:17:05 crc kubenswrapper[4838]: I1128 10:17:05.350955 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xvmkz\" (UniqueName: \"kubernetes.io/projected/9f301ff0-c619-4fb2-a1e6-a7254b39e13f-kube-api-access-xvmkz\") pod \"neutron-7ff57884d4-gkgfr\" (UID: \"9f301ff0-c619-4fb2-a1e6-a7254b39e13f\") " pod="openstack/neutron-7ff57884d4-gkgfr" Nov 28 10:17:05 crc kubenswrapper[4838]: I1128 10:17:05.352690 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3eeec0e8-8302-414a-882e-22921c0e0872-config\") pod \"dnsmasq-dns-6bb684768f-7wcp7\" (UID: \"3eeec0e8-8302-414a-882e-22921c0e0872\") " pod="openstack/dnsmasq-dns-6bb684768f-7wcp7" Nov 28 10:17:05 crc kubenswrapper[4838]: I1128 10:17:05.353296 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3eeec0e8-8302-414a-882e-22921c0e0872-dns-svc\") pod \"dnsmasq-dns-6bb684768f-7wcp7\" (UID: \"3eeec0e8-8302-414a-882e-22921c0e0872\") " pod="openstack/dnsmasq-dns-6bb684768f-7wcp7" Nov 28 10:17:05 crc kubenswrapper[4838]: I1128 10:17:05.356465 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3eeec0e8-8302-414a-882e-22921c0e0872-ovsdbserver-nb\") pod \"dnsmasq-dns-6bb684768f-7wcp7\" (UID: \"3eeec0e8-8302-414a-882e-22921c0e0872\") " pod="openstack/dnsmasq-dns-6bb684768f-7wcp7" Nov 28 10:17:05 crc kubenswrapper[4838]: I1128 10:17:05.356707 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/3eeec0e8-8302-414a-882e-22921c0e0872-ovsdbserver-sb\") pod \"dnsmasq-dns-6bb684768f-7wcp7\" (UID: \"3eeec0e8-8302-414a-882e-22921c0e0872\") " pod="openstack/dnsmasq-dns-6bb684768f-7wcp7" Nov 28 10:17:05 crc kubenswrapper[4838]: I1128 10:17:05.389447 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8j7vg\" (UniqueName: \"kubernetes.io/projected/3eeec0e8-8302-414a-882e-22921c0e0872-kube-api-access-8j7vg\") pod \"dnsmasq-dns-6bb684768f-7wcp7\" (UID: \"3eeec0e8-8302-414a-882e-22921c0e0872\") " pod="openstack/dnsmasq-dns-6bb684768f-7wcp7" Nov 28 10:17:05 crc kubenswrapper[4838]: I1128 10:17:05.405191 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-9858c78b6-nx8kz" Nov 28 10:17:05 crc kubenswrapper[4838]: I1128 10:17:05.453003 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9f301ff0-c619-4fb2-a1e6-a7254b39e13f-combined-ca-bundle\") pod \"neutron-7ff57884d4-gkgfr\" (UID: \"9f301ff0-c619-4fb2-a1e6-a7254b39e13f\") " pod="openstack/neutron-7ff57884d4-gkgfr" Nov 28 10:17:05 crc kubenswrapper[4838]: I1128 10:17:05.453103 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/9f301ff0-c619-4fb2-a1e6-a7254b39e13f-httpd-config\") pod \"neutron-7ff57884d4-gkgfr\" (UID: \"9f301ff0-c619-4fb2-a1e6-a7254b39e13f\") " pod="openstack/neutron-7ff57884d4-gkgfr" Nov 28 10:17:05 crc kubenswrapper[4838]: I1128 10:17:05.453172 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/9f301ff0-c619-4fb2-a1e6-a7254b39e13f-config\") pod \"neutron-7ff57884d4-gkgfr\" (UID: \"9f301ff0-c619-4fb2-a1e6-a7254b39e13f\") " pod="openstack/neutron-7ff57884d4-gkgfr" Nov 28 10:17:05 crc kubenswrapper[4838]: I1128 10:17:05.453418 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/9f301ff0-c619-4fb2-a1e6-a7254b39e13f-ovndb-tls-certs\") pod \"neutron-7ff57884d4-gkgfr\" (UID: \"9f301ff0-c619-4fb2-a1e6-a7254b39e13f\") " pod="openstack/neutron-7ff57884d4-gkgfr" Nov 28 10:17:05 crc kubenswrapper[4838]: I1128 10:17:05.453471 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xvmkz\" (UniqueName: \"kubernetes.io/projected/9f301ff0-c619-4fb2-a1e6-a7254b39e13f-kube-api-access-xvmkz\") pod \"neutron-7ff57884d4-gkgfr\" (UID: \"9f301ff0-c619-4fb2-a1e6-a7254b39e13f\") " pod="openstack/neutron-7ff57884d4-gkgfr" Nov 28 10:17:05 crc kubenswrapper[4838]: I1128 10:17:05.457151 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/9f301ff0-c619-4fb2-a1e6-a7254b39e13f-httpd-config\") pod \"neutron-7ff57884d4-gkgfr\" (UID: \"9f301ff0-c619-4fb2-a1e6-a7254b39e13f\") " pod="openstack/neutron-7ff57884d4-gkgfr" Nov 28 10:17:05 crc kubenswrapper[4838]: I1128 10:17:05.460196 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9f301ff0-c619-4fb2-a1e6-a7254b39e13f-combined-ca-bundle\") pod \"neutron-7ff57884d4-gkgfr\" (UID: \"9f301ff0-c619-4fb2-a1e6-a7254b39e13f\") " pod="openstack/neutron-7ff57884d4-gkgfr" Nov 28 10:17:05 crc kubenswrapper[4838]: I1128 10:17:05.462946 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/9f301ff0-c619-4fb2-a1e6-a7254b39e13f-config\") pod \"neutron-7ff57884d4-gkgfr\" (UID: \"9f301ff0-c619-4fb2-a1e6-a7254b39e13f\") " pod="openstack/neutron-7ff57884d4-gkgfr" Nov 28 10:17:05 crc kubenswrapper[4838]: I1128 10:17:05.463085 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/9f301ff0-c619-4fb2-a1e6-a7254b39e13f-ovndb-tls-certs\") pod \"neutron-7ff57884d4-gkgfr\" (UID: \"9f301ff0-c619-4fb2-a1e6-a7254b39e13f\") " pod="openstack/neutron-7ff57884d4-gkgfr" Nov 28 10:17:05 crc kubenswrapper[4838]: I1128 10:17:05.475466 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xvmkz\" (UniqueName: \"kubernetes.io/projected/9f301ff0-c619-4fb2-a1e6-a7254b39e13f-kube-api-access-xvmkz\") pod \"neutron-7ff57884d4-gkgfr\" (UID: \"9f301ff0-c619-4fb2-a1e6-a7254b39e13f\") " pod="openstack/neutron-7ff57884d4-gkgfr" Nov 28 10:17:05 crc kubenswrapper[4838]: I1128 10:17:05.668060 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-7f5f5f8b64-f2wff"] Nov 28 10:17:05 crc kubenswrapper[4838]: W1128 10:17:05.680181 4838 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf4ad309b_1078_40e9_abd7_d1b476971fce.slice/crio-c1b415ad4be7af6732e24033b0a598e74a572cc37e954c7cb00b4116721bca36 WatchSource:0}: Error finding container c1b415ad4be7af6732e24033b0a598e74a572cc37e954c7cb00b4116721bca36: Status 404 returned error can't find the container with id c1b415ad4be7af6732e24033b0a598e74a572cc37e954c7cb00b4116721bca36 Nov 28 10:17:05 crc kubenswrapper[4838]: I1128 10:17:05.684053 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6bb684768f-7wcp7" Nov 28 10:17:05 crc kubenswrapper[4838]: I1128 10:17:05.693761 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-7ff57884d4-gkgfr" Nov 28 10:17:05 crc kubenswrapper[4838]: I1128 10:17:05.776901 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-7f5f5f8b64-f2wff" event={"ID":"f4ad309b-1078-40e9-abd7-d1b476971fce","Type":"ContainerStarted","Data":"c1b415ad4be7af6732e24033b0a598e74a572cc37e954c7cb00b4116721bca36"} Nov 28 10:17:05 crc kubenswrapper[4838]: I1128 10:17:05.782246 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-699df9757c-dvp4f" Nov 28 10:17:05 crc kubenswrapper[4838]: I1128 10:17:05.793731 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-699df9757c-dvp4f" Nov 28 10:17:05 crc kubenswrapper[4838]: I1128 10:17:05.816280 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-6669b75dd9-q6nlg"] Nov 28 10:17:05 crc kubenswrapper[4838]: I1128 10:17:05.858352 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/72b3211c-56cf-4987-9a43-2021dea17799-dns-svc\") pod \"72b3211c-56cf-4987-9a43-2021dea17799\" (UID: \"72b3211c-56cf-4987-9a43-2021dea17799\") " Nov 28 10:17:05 crc kubenswrapper[4838]: I1128 10:17:05.858447 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mlknw\" (UniqueName: \"kubernetes.io/projected/72b3211c-56cf-4987-9a43-2021dea17799-kube-api-access-mlknw\") pod \"72b3211c-56cf-4987-9a43-2021dea17799\" (UID: \"72b3211c-56cf-4987-9a43-2021dea17799\") " Nov 28 10:17:05 crc kubenswrapper[4838]: I1128 10:17:05.858506 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/72b3211c-56cf-4987-9a43-2021dea17799-ovsdbserver-sb\") pod \"72b3211c-56cf-4987-9a43-2021dea17799\" (UID: \"72b3211c-56cf-4987-9a43-2021dea17799\") " Nov 28 10:17:05 crc kubenswrapper[4838]: I1128 10:17:05.858524 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/72b3211c-56cf-4987-9a43-2021dea17799-ovsdbserver-nb\") pod \"72b3211c-56cf-4987-9a43-2021dea17799\" (UID: \"72b3211c-56cf-4987-9a43-2021dea17799\") " Nov 28 10:17:05 crc kubenswrapper[4838]: I1128 10:17:05.858613 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/72b3211c-56cf-4987-9a43-2021dea17799-config\") pod \"72b3211c-56cf-4987-9a43-2021dea17799\" (UID: \"72b3211c-56cf-4987-9a43-2021dea17799\") " Nov 28 10:17:05 crc kubenswrapper[4838]: I1128 10:17:05.859401 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/72b3211c-56cf-4987-9a43-2021dea17799-config" (OuterVolumeSpecName: "config") pod "72b3211c-56cf-4987-9a43-2021dea17799" (UID: "72b3211c-56cf-4987-9a43-2021dea17799"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 10:17:05 crc kubenswrapper[4838]: I1128 10:17:05.859698 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/72b3211c-56cf-4987-9a43-2021dea17799-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "72b3211c-56cf-4987-9a43-2021dea17799" (UID: "72b3211c-56cf-4987-9a43-2021dea17799"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 10:17:05 crc kubenswrapper[4838]: I1128 10:17:05.860035 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/72b3211c-56cf-4987-9a43-2021dea17799-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "72b3211c-56cf-4987-9a43-2021dea17799" (UID: "72b3211c-56cf-4987-9a43-2021dea17799"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 10:17:05 crc kubenswrapper[4838]: I1128 10:17:05.860135 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/72b3211c-56cf-4987-9a43-2021dea17799-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "72b3211c-56cf-4987-9a43-2021dea17799" (UID: "72b3211c-56cf-4987-9a43-2021dea17799"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 10:17:05 crc kubenswrapper[4838]: W1128 10:17:05.868802 4838 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod39e38efd_bd92_419d_90e8_f6630032e7d7.slice/crio-b6d665293c5b5e8d7fb79d7792034f710dc3fd535b38694b111d6a9767f7523e WatchSource:0}: Error finding container b6d665293c5b5e8d7fb79d7792034f710dc3fd535b38694b111d6a9767f7523e: Status 404 returned error can't find the container with id b6d665293c5b5e8d7fb79d7792034f710dc3fd535b38694b111d6a9767f7523e Nov 28 10:17:05 crc kubenswrapper[4838]: I1128 10:17:05.869608 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/72b3211c-56cf-4987-9a43-2021dea17799-kube-api-access-mlknw" (OuterVolumeSpecName: "kube-api-access-mlknw") pod "72b3211c-56cf-4987-9a43-2021dea17799" (UID: "72b3211c-56cf-4987-9a43-2021dea17799"). InnerVolumeSpecName "kube-api-access-mlknw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:17:05 crc kubenswrapper[4838]: I1128 10:17:05.962799 4838 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/72b3211c-56cf-4987-9a43-2021dea17799-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 10:17:05 crc kubenswrapper[4838]: I1128 10:17:05.963817 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mlknw\" (UniqueName: \"kubernetes.io/projected/72b3211c-56cf-4987-9a43-2021dea17799-kube-api-access-mlknw\") on node \"crc\" DevicePath \"\"" Nov 28 10:17:05 crc kubenswrapper[4838]: I1128 10:17:05.963829 4838 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/72b3211c-56cf-4987-9a43-2021dea17799-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 28 10:17:05 crc kubenswrapper[4838]: I1128 10:17:05.963837 4838 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/72b3211c-56cf-4987-9a43-2021dea17799-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 28 10:17:05 crc kubenswrapper[4838]: I1128 10:17:05.963846 4838 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/72b3211c-56cf-4987-9a43-2021dea17799-config\") on node \"crc\" DevicePath \"\"" Nov 28 10:17:05 crc kubenswrapper[4838]: I1128 10:17:05.973299 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-698bf66db7-q4nv6"] Nov 28 10:17:05 crc kubenswrapper[4838]: W1128 10:17:05.979826 4838 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4d77c8cd_d0c5_4bb9_84a3_e3a00f7c9a99.slice/crio-bdd9dbb849ed666df6a5daaea66fbebcce9da47cfc6ef352fc39a843f5a75270 WatchSource:0}: Error finding container bdd9dbb849ed666df6a5daaea66fbebcce9da47cfc6ef352fc39a843f5a75270: Status 404 returned error can't find the container with id bdd9dbb849ed666df6a5daaea66fbebcce9da47cfc6ef352fc39a843f5a75270 Nov 28 10:17:05 crc kubenswrapper[4838]: I1128 10:17:05.996440 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-65687b7854-r7rh5"] Nov 28 10:17:06 crc kubenswrapper[4838]: I1128 10:17:06.028274 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-9858c78b6-nx8kz"] Nov 28 10:17:06 crc kubenswrapper[4838]: I1128 10:17:06.300153 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6bb684768f-7wcp7"] Nov 28 10:17:06 crc kubenswrapper[4838]: I1128 10:17:06.350468 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-7ff57884d4-gkgfr"] Nov 28 10:17:06 crc kubenswrapper[4838]: W1128 10:17:06.367822 4838 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9f301ff0_c619_4fb2_a1e6_a7254b39e13f.slice/crio-4cd0dd8b7ec72a9fcf0acced6524b901d54c912bd568230b16cc6540a24fc225 WatchSource:0}: Error finding container 4cd0dd8b7ec72a9fcf0acced6524b901d54c912bd568230b16cc6540a24fc225: Status 404 returned error can't find the container with id 4cd0dd8b7ec72a9fcf0acced6524b901d54c912bd568230b16cc6540a24fc225 Nov 28 10:17:06 crc kubenswrapper[4838]: I1128 10:17:06.798445 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-7ff57884d4-gkgfr" event={"ID":"9f301ff0-c619-4fb2-a1e6-a7254b39e13f","Type":"ContainerStarted","Data":"749dc59908ae7ebd5eaf5f716fe0187be34fc2e861977219fa3340212d7b49c3"} Nov 28 10:17:06 crc kubenswrapper[4838]: I1128 10:17:06.798495 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-7ff57884d4-gkgfr" event={"ID":"9f301ff0-c619-4fb2-a1e6-a7254b39e13f","Type":"ContainerStarted","Data":"4cd0dd8b7ec72a9fcf0acced6524b901d54c912bd568230b16cc6540a24fc225"} Nov 28 10:17:06 crc kubenswrapper[4838]: I1128 10:17:06.803440 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-7f5f5f8b64-f2wff" event={"ID":"f4ad309b-1078-40e9-abd7-d1b476971fce","Type":"ContainerStarted","Data":"c3da58df92a736e89483c81a7bef7d47f8069f15ae7ce0f985a11822ad513250"} Nov 28 10:17:06 crc kubenswrapper[4838]: I1128 10:17:06.803461 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-7f5f5f8b64-f2wff" event={"ID":"f4ad309b-1078-40e9-abd7-d1b476971fce","Type":"ContainerStarted","Data":"0d7780b6645b485e00db92b29ce57b361a83fcbb2799e9ac89e30c075e738494"} Nov 28 10:17:06 crc kubenswrapper[4838]: I1128 10:17:06.803585 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-7f5f5f8b64-f2wff" Nov 28 10:17:06 crc kubenswrapper[4838]: I1128 10:17:06.803610 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-7f5f5f8b64-f2wff" Nov 28 10:17:06 crc kubenswrapper[4838]: I1128 10:17:06.804812 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-65687b7854-r7rh5" event={"ID":"de6a5f4a-30c6-4f42-88e7-3f113c1ed53b","Type":"ContainerStarted","Data":"9321244d82c272233a3cbd3e851ad18ae49bb97a25168d5a4e1163984ebb59ea"} Nov 28 10:17:06 crc kubenswrapper[4838]: I1128 10:17:06.805747 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-6669b75dd9-q6nlg" event={"ID":"39e38efd-bd92-419d-90e8-f6630032e7d7","Type":"ContainerStarted","Data":"b6d665293c5b5e8d7fb79d7792034f710dc3fd535b38694b111d6a9767f7523e"} Nov 28 10:17:06 crc kubenswrapper[4838]: I1128 10:17:06.807840 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-698bf66db7-q4nv6" event={"ID":"4d77c8cd-d0c5-4bb9-84a3-e3a00f7c9a99","Type":"ContainerStarted","Data":"9404d5028016269e499e4174ffdf61250632e00e5df0d0c86979ac52d405a6cc"} Nov 28 10:17:06 crc kubenswrapper[4838]: I1128 10:17:06.807866 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/keystone-698bf66db7-q4nv6" Nov 28 10:17:06 crc kubenswrapper[4838]: I1128 10:17:06.807879 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-698bf66db7-q4nv6" event={"ID":"4d77c8cd-d0c5-4bb9-84a3-e3a00f7c9a99","Type":"ContainerStarted","Data":"bdd9dbb849ed666df6a5daaea66fbebcce9da47cfc6ef352fc39a843f5a75270"} Nov 28 10:17:06 crc kubenswrapper[4838]: I1128 10:17:06.810615 4838 generic.go:334] "Generic (PLEG): container finished" podID="3eeec0e8-8302-414a-882e-22921c0e0872" containerID="3f5fccaa14838fdba68d88ed92ddaf340356abd833791ad2ab25ec2717cfdbc5" exitCode=0 Nov 28 10:17:06 crc kubenswrapper[4838]: I1128 10:17:06.810681 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6bb684768f-7wcp7" event={"ID":"3eeec0e8-8302-414a-882e-22921c0e0872","Type":"ContainerDied","Data":"3f5fccaa14838fdba68d88ed92ddaf340356abd833791ad2ab25ec2717cfdbc5"} Nov 28 10:17:06 crc kubenswrapper[4838]: I1128 10:17:06.810702 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6bb684768f-7wcp7" event={"ID":"3eeec0e8-8302-414a-882e-22921c0e0872","Type":"ContainerStarted","Data":"d08b2a9a018ecd7a6e01e3627fa83ab707af0f5b6c29645d61c131e9cfc4fe8a"} Nov 28 10:17:06 crc kubenswrapper[4838]: I1128 10:17:06.815263 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-699df9757c-dvp4f" Nov 28 10:17:06 crc kubenswrapper[4838]: I1128 10:17:06.816212 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-9858c78b6-nx8kz" event={"ID":"57292635-e413-4511-830d-e536d3c8e398","Type":"ContainerStarted","Data":"0f10fd058a7159094f3c16ee8be6385fb9c4e4143be3a680b9797753573f813f"} Nov 28 10:17:06 crc kubenswrapper[4838]: I1128 10:17:06.816240 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-9858c78b6-nx8kz" Nov 28 10:17:06 crc kubenswrapper[4838]: I1128 10:17:06.816251 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-9858c78b6-nx8kz" event={"ID":"57292635-e413-4511-830d-e536d3c8e398","Type":"ContainerStarted","Data":"6638064eb253ae73f3b35f7ffb0d337ccd3adc48c6ed020f4b2423ecaf580cf6"} Nov 28 10:17:06 crc kubenswrapper[4838]: I1128 10:17:06.816261 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-9858c78b6-nx8kz" event={"ID":"57292635-e413-4511-830d-e536d3c8e398","Type":"ContainerStarted","Data":"839f89ab60e7cdf19c5baa76c6b40085c74c504b1fce2f0100ef41804c6eaf7d"} Nov 28 10:17:06 crc kubenswrapper[4838]: I1128 10:17:06.816281 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-9858c78b6-nx8kz" Nov 28 10:17:06 crc kubenswrapper[4838]: I1128 10:17:06.853115 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-7f5f5f8b64-f2wff" podStartSLOduration=2.8530958379999998 podStartE2EDuration="2.853095838s" podCreationTimestamp="2025-11-28 10:17:04 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 10:17:06.843505109 +0000 UTC m=+1198.542479289" watchObservedRunningTime="2025-11-28 10:17:06.853095838 +0000 UTC m=+1198.552070008" Nov 28 10:17:06 crc kubenswrapper[4838]: I1128 10:17:06.883983 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-9858c78b6-nx8kz" podStartSLOduration=2.88396018 podStartE2EDuration="2.88396018s" podCreationTimestamp="2025-11-28 10:17:04 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 10:17:06.870502417 +0000 UTC m=+1198.569476587" watchObservedRunningTime="2025-11-28 10:17:06.88396018 +0000 UTC m=+1198.582934350" Nov 28 10:17:06 crc kubenswrapper[4838]: I1128 10:17:06.943839 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-698bf66db7-q4nv6" podStartSLOduration=2.943810092 podStartE2EDuration="2.943810092s" podCreationTimestamp="2025-11-28 10:17:04 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 10:17:06.896484777 +0000 UTC m=+1198.595458967" watchObservedRunningTime="2025-11-28 10:17:06.943810092 +0000 UTC m=+1198.642784262" Nov 28 10:17:07 crc kubenswrapper[4838]: I1128 10:17:07.064833 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-699df9757c-dvp4f"] Nov 28 10:17:07 crc kubenswrapper[4838]: I1128 10:17:07.076794 4838 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-699df9757c-dvp4f"] Nov 28 10:17:07 crc kubenswrapper[4838]: I1128 10:17:07.088467 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-69bc8cb85-2qbr6"] Nov 28 10:17:07 crc kubenswrapper[4838]: I1128 10:17:07.090093 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-69bc8cb85-2qbr6" Nov 28 10:17:07 crc kubenswrapper[4838]: I1128 10:17:07.097461 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-internal-svc" Nov 28 10:17:07 crc kubenswrapper[4838]: I1128 10:17:07.097853 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-public-svc" Nov 28 10:17:07 crc kubenswrapper[4838]: I1128 10:17:07.113074 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-69bc8cb85-2qbr6"] Nov 28 10:17:07 crc kubenswrapper[4838]: I1128 10:17:07.322605 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/5f9ddfd3-3f45-40e8-a9f8-2976dd20280f-ovndb-tls-certs\") pod \"neutron-69bc8cb85-2qbr6\" (UID: \"5f9ddfd3-3f45-40e8-a9f8-2976dd20280f\") " pod="openstack/neutron-69bc8cb85-2qbr6" Nov 28 10:17:07 crc kubenswrapper[4838]: I1128 10:17:07.322663 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xdtps\" (UniqueName: \"kubernetes.io/projected/5f9ddfd3-3f45-40e8-a9f8-2976dd20280f-kube-api-access-xdtps\") pod \"neutron-69bc8cb85-2qbr6\" (UID: \"5f9ddfd3-3f45-40e8-a9f8-2976dd20280f\") " pod="openstack/neutron-69bc8cb85-2qbr6" Nov 28 10:17:07 crc kubenswrapper[4838]: I1128 10:17:07.322689 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/5f9ddfd3-3f45-40e8-a9f8-2976dd20280f-public-tls-certs\") pod \"neutron-69bc8cb85-2qbr6\" (UID: \"5f9ddfd3-3f45-40e8-a9f8-2976dd20280f\") " pod="openstack/neutron-69bc8cb85-2qbr6" Nov 28 10:17:07 crc kubenswrapper[4838]: I1128 10:17:07.322710 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/5f9ddfd3-3f45-40e8-a9f8-2976dd20280f-internal-tls-certs\") pod \"neutron-69bc8cb85-2qbr6\" (UID: \"5f9ddfd3-3f45-40e8-a9f8-2976dd20280f\") " pod="openstack/neutron-69bc8cb85-2qbr6" Nov 28 10:17:07 crc kubenswrapper[4838]: I1128 10:17:07.322784 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5f9ddfd3-3f45-40e8-a9f8-2976dd20280f-combined-ca-bundle\") pod \"neutron-69bc8cb85-2qbr6\" (UID: \"5f9ddfd3-3f45-40e8-a9f8-2976dd20280f\") " pod="openstack/neutron-69bc8cb85-2qbr6" Nov 28 10:17:07 crc kubenswrapper[4838]: I1128 10:17:07.322809 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/5f9ddfd3-3f45-40e8-a9f8-2976dd20280f-httpd-config\") pod \"neutron-69bc8cb85-2qbr6\" (UID: \"5f9ddfd3-3f45-40e8-a9f8-2976dd20280f\") " pod="openstack/neutron-69bc8cb85-2qbr6" Nov 28 10:17:07 crc kubenswrapper[4838]: I1128 10:17:07.322823 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/5f9ddfd3-3f45-40e8-a9f8-2976dd20280f-config\") pod \"neutron-69bc8cb85-2qbr6\" (UID: \"5f9ddfd3-3f45-40e8-a9f8-2976dd20280f\") " pod="openstack/neutron-69bc8cb85-2qbr6" Nov 28 10:17:07 crc kubenswrapper[4838]: I1128 10:17:07.426268 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/5f9ddfd3-3f45-40e8-a9f8-2976dd20280f-ovndb-tls-certs\") pod \"neutron-69bc8cb85-2qbr6\" (UID: \"5f9ddfd3-3f45-40e8-a9f8-2976dd20280f\") " pod="openstack/neutron-69bc8cb85-2qbr6" Nov 28 10:17:07 crc kubenswrapper[4838]: I1128 10:17:07.426371 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xdtps\" (UniqueName: \"kubernetes.io/projected/5f9ddfd3-3f45-40e8-a9f8-2976dd20280f-kube-api-access-xdtps\") pod \"neutron-69bc8cb85-2qbr6\" (UID: \"5f9ddfd3-3f45-40e8-a9f8-2976dd20280f\") " pod="openstack/neutron-69bc8cb85-2qbr6" Nov 28 10:17:07 crc kubenswrapper[4838]: I1128 10:17:07.426401 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/5f9ddfd3-3f45-40e8-a9f8-2976dd20280f-public-tls-certs\") pod \"neutron-69bc8cb85-2qbr6\" (UID: \"5f9ddfd3-3f45-40e8-a9f8-2976dd20280f\") " pod="openstack/neutron-69bc8cb85-2qbr6" Nov 28 10:17:07 crc kubenswrapper[4838]: I1128 10:17:07.426431 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/5f9ddfd3-3f45-40e8-a9f8-2976dd20280f-internal-tls-certs\") pod \"neutron-69bc8cb85-2qbr6\" (UID: \"5f9ddfd3-3f45-40e8-a9f8-2976dd20280f\") " pod="openstack/neutron-69bc8cb85-2qbr6" Nov 28 10:17:07 crc kubenswrapper[4838]: I1128 10:17:07.426518 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5f9ddfd3-3f45-40e8-a9f8-2976dd20280f-combined-ca-bundle\") pod \"neutron-69bc8cb85-2qbr6\" (UID: \"5f9ddfd3-3f45-40e8-a9f8-2976dd20280f\") " pod="openstack/neutron-69bc8cb85-2qbr6" Nov 28 10:17:07 crc kubenswrapper[4838]: I1128 10:17:07.426560 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/5f9ddfd3-3f45-40e8-a9f8-2976dd20280f-httpd-config\") pod \"neutron-69bc8cb85-2qbr6\" (UID: \"5f9ddfd3-3f45-40e8-a9f8-2976dd20280f\") " pod="openstack/neutron-69bc8cb85-2qbr6" Nov 28 10:17:07 crc kubenswrapper[4838]: I1128 10:17:07.426586 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/5f9ddfd3-3f45-40e8-a9f8-2976dd20280f-config\") pod \"neutron-69bc8cb85-2qbr6\" (UID: \"5f9ddfd3-3f45-40e8-a9f8-2976dd20280f\") " pod="openstack/neutron-69bc8cb85-2qbr6" Nov 28 10:17:07 crc kubenswrapper[4838]: I1128 10:17:07.441128 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5f9ddfd3-3f45-40e8-a9f8-2976dd20280f-combined-ca-bundle\") pod \"neutron-69bc8cb85-2qbr6\" (UID: \"5f9ddfd3-3f45-40e8-a9f8-2976dd20280f\") " pod="openstack/neutron-69bc8cb85-2qbr6" Nov 28 10:17:07 crc kubenswrapper[4838]: I1128 10:17:07.441543 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/5f9ddfd3-3f45-40e8-a9f8-2976dd20280f-internal-tls-certs\") pod \"neutron-69bc8cb85-2qbr6\" (UID: \"5f9ddfd3-3f45-40e8-a9f8-2976dd20280f\") " pod="openstack/neutron-69bc8cb85-2qbr6" Nov 28 10:17:07 crc kubenswrapper[4838]: I1128 10:17:07.442812 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/5f9ddfd3-3f45-40e8-a9f8-2976dd20280f-ovndb-tls-certs\") pod \"neutron-69bc8cb85-2qbr6\" (UID: \"5f9ddfd3-3f45-40e8-a9f8-2976dd20280f\") " pod="openstack/neutron-69bc8cb85-2qbr6" Nov 28 10:17:07 crc kubenswrapper[4838]: I1128 10:17:07.442862 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/5f9ddfd3-3f45-40e8-a9f8-2976dd20280f-public-tls-certs\") pod \"neutron-69bc8cb85-2qbr6\" (UID: \"5f9ddfd3-3f45-40e8-a9f8-2976dd20280f\") " pod="openstack/neutron-69bc8cb85-2qbr6" Nov 28 10:17:07 crc kubenswrapper[4838]: I1128 10:17:07.444113 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/5f9ddfd3-3f45-40e8-a9f8-2976dd20280f-httpd-config\") pod \"neutron-69bc8cb85-2qbr6\" (UID: \"5f9ddfd3-3f45-40e8-a9f8-2976dd20280f\") " pod="openstack/neutron-69bc8cb85-2qbr6" Nov 28 10:17:07 crc kubenswrapper[4838]: I1128 10:17:07.445391 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/5f9ddfd3-3f45-40e8-a9f8-2976dd20280f-config\") pod \"neutron-69bc8cb85-2qbr6\" (UID: \"5f9ddfd3-3f45-40e8-a9f8-2976dd20280f\") " pod="openstack/neutron-69bc8cb85-2qbr6" Nov 28 10:17:07 crc kubenswrapper[4838]: I1128 10:17:07.448226 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xdtps\" (UniqueName: \"kubernetes.io/projected/5f9ddfd3-3f45-40e8-a9f8-2976dd20280f-kube-api-access-xdtps\") pod \"neutron-69bc8cb85-2qbr6\" (UID: \"5f9ddfd3-3f45-40e8-a9f8-2976dd20280f\") " pod="openstack/neutron-69bc8cb85-2qbr6" Nov 28 10:17:07 crc kubenswrapper[4838]: I1128 10:17:07.719932 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-69bc8cb85-2qbr6" Nov 28 10:17:07 crc kubenswrapper[4838]: I1128 10:17:07.825457 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6bb684768f-7wcp7" event={"ID":"3eeec0e8-8302-414a-882e-22921c0e0872","Type":"ContainerStarted","Data":"0c28e94f4de20669a1e88d69a499de09cf4bd337bc0e20dd1df90eda44fc4112"} Nov 28 10:17:07 crc kubenswrapper[4838]: I1128 10:17:07.827311 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-6bb684768f-7wcp7" Nov 28 10:17:07 crc kubenswrapper[4838]: I1128 10:17:07.830669 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-7ff57884d4-gkgfr" event={"ID":"9f301ff0-c619-4fb2-a1e6-a7254b39e13f","Type":"ContainerStarted","Data":"eca3a5e3d95690fe050322e7d54999af48228e9f74303f5d000b82c3f7809f7f"} Nov 28 10:17:07 crc kubenswrapper[4838]: I1128 10:17:07.830708 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-7ff57884d4-gkgfr" Nov 28 10:17:07 crc kubenswrapper[4838]: I1128 10:17:07.850978 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-6bb684768f-7wcp7" podStartSLOduration=2.8509561310000002 podStartE2EDuration="2.850956131s" podCreationTimestamp="2025-11-28 10:17:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 10:17:07.845801361 +0000 UTC m=+1199.544775551" watchObservedRunningTime="2025-11-28 10:17:07.850956131 +0000 UTC m=+1199.549930301" Nov 28 10:17:07 crc kubenswrapper[4838]: I1128 10:17:07.873552 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-7ff57884d4-gkgfr" podStartSLOduration=2.873526939 podStartE2EDuration="2.873526939s" podCreationTimestamp="2025-11-28 10:17:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 10:17:07.865169734 +0000 UTC m=+1199.564143924" watchObservedRunningTime="2025-11-28 10:17:07.873526939 +0000 UTC m=+1199.572501109" Nov 28 10:17:08 crc kubenswrapper[4838]: I1128 10:17:08.595815 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="72b3211c-56cf-4987-9a43-2021dea17799" path="/var/lib/kubelet/pods/72b3211c-56cf-4987-9a43-2021dea17799/volumes" Nov 28 10:17:09 crc kubenswrapper[4838]: I1128 10:17:08.853496 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-65687b7854-r7rh5" event={"ID":"de6a5f4a-30c6-4f42-88e7-3f113c1ed53b","Type":"ContainerStarted","Data":"48004b199a1b0278de5e37aa0ef9d06bb7248b12f947f6044aaf086bebe34933"} Nov 28 10:17:09 crc kubenswrapper[4838]: I1128 10:17:08.857065 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-6669b75dd9-q6nlg" event={"ID":"39e38efd-bd92-419d-90e8-f6630032e7d7","Type":"ContainerStarted","Data":"9819a3f7044e40dcecefa7753dcc67858bdad2a65e087a0e9fc897a95ac4503a"} Nov 28 10:17:09 crc kubenswrapper[4838]: I1128 10:17:09.807043 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-69bc8cb85-2qbr6"] Nov 28 10:17:09 crc kubenswrapper[4838]: I1128 10:17:09.869362 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-65687b7854-r7rh5" event={"ID":"de6a5f4a-30c6-4f42-88e7-3f113c1ed53b","Type":"ContainerStarted","Data":"84bc7d18b226528658c59aade3b6584f0be8999649538fee39201d8c8ff44351"} Nov 28 10:17:09 crc kubenswrapper[4838]: I1128 10:17:09.873213 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-6669b75dd9-q6nlg" event={"ID":"39e38efd-bd92-419d-90e8-f6630032e7d7","Type":"ContainerStarted","Data":"6a71c282c00d9be24209d832059c0bbaebbf7d8183cb2e26adc6e69668898996"} Nov 28 10:17:09 crc kubenswrapper[4838]: I1128 10:17:09.901743 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-keystone-listener-65687b7854-r7rh5" podStartSLOduration=3.423689115 podStartE2EDuration="5.901703339s" podCreationTimestamp="2025-11-28 10:17:04 +0000 UTC" firstStartedPulling="2025-11-28 10:17:05.972876505 +0000 UTC m=+1197.671850675" lastFinishedPulling="2025-11-28 10:17:08.450890729 +0000 UTC m=+1200.149864899" observedRunningTime="2025-11-28 10:17:09.890050365 +0000 UTC m=+1201.589024525" watchObservedRunningTime="2025-11-28 10:17:09.901703339 +0000 UTC m=+1201.600677509" Nov 28 10:17:09 crc kubenswrapper[4838]: I1128 10:17:09.920702 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-worker-6669b75dd9-q6nlg" podStartSLOduration=3.372537886 podStartE2EDuration="5.92068132s" podCreationTimestamp="2025-11-28 10:17:04 +0000 UTC" firstStartedPulling="2025-11-28 10:17:05.884433942 +0000 UTC m=+1197.583408102" lastFinishedPulling="2025-11-28 10:17:08.432577366 +0000 UTC m=+1200.131551536" observedRunningTime="2025-11-28 10:17:09.919243521 +0000 UTC m=+1201.618217691" watchObservedRunningTime="2025-11-28 10:17:09.92068132 +0000 UTC m=+1201.619655490" Nov 28 10:17:11 crc kubenswrapper[4838]: I1128 10:17:11.443934 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-6dfcbd5794-mx784"] Nov 28 10:17:11 crc kubenswrapper[4838]: I1128 10:17:11.445743 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-6dfcbd5794-mx784" Nov 28 10:17:11 crc kubenswrapper[4838]: I1128 10:17:11.447882 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-public-svc" Nov 28 10:17:11 crc kubenswrapper[4838]: I1128 10:17:11.448371 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-internal-svc" Nov 28 10:17:11 crc kubenswrapper[4838]: I1128 10:17:11.470505 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-6dfcbd5794-mx784"] Nov 28 10:17:11 crc kubenswrapper[4838]: I1128 10:17:11.600805 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rz7s4\" (UniqueName: \"kubernetes.io/projected/54957e0e-0d82-418e-9786-612dd3d121f0-kube-api-access-rz7s4\") pod \"barbican-api-6dfcbd5794-mx784\" (UID: \"54957e0e-0d82-418e-9786-612dd3d121f0\") " pod="openstack/barbican-api-6dfcbd5794-mx784" Nov 28 10:17:11 crc kubenswrapper[4838]: I1128 10:17:11.600862 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/54957e0e-0d82-418e-9786-612dd3d121f0-public-tls-certs\") pod \"barbican-api-6dfcbd5794-mx784\" (UID: \"54957e0e-0d82-418e-9786-612dd3d121f0\") " pod="openstack/barbican-api-6dfcbd5794-mx784" Nov 28 10:17:11 crc kubenswrapper[4838]: I1128 10:17:11.600888 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/54957e0e-0d82-418e-9786-612dd3d121f0-internal-tls-certs\") pod \"barbican-api-6dfcbd5794-mx784\" (UID: \"54957e0e-0d82-418e-9786-612dd3d121f0\") " pod="openstack/barbican-api-6dfcbd5794-mx784" Nov 28 10:17:11 crc kubenswrapper[4838]: I1128 10:17:11.600988 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/54957e0e-0d82-418e-9786-612dd3d121f0-config-data-custom\") pod \"barbican-api-6dfcbd5794-mx784\" (UID: \"54957e0e-0d82-418e-9786-612dd3d121f0\") " pod="openstack/barbican-api-6dfcbd5794-mx784" Nov 28 10:17:11 crc kubenswrapper[4838]: I1128 10:17:11.601029 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/54957e0e-0d82-418e-9786-612dd3d121f0-config-data\") pod \"barbican-api-6dfcbd5794-mx784\" (UID: \"54957e0e-0d82-418e-9786-612dd3d121f0\") " pod="openstack/barbican-api-6dfcbd5794-mx784" Nov 28 10:17:11 crc kubenswrapper[4838]: I1128 10:17:11.601056 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/54957e0e-0d82-418e-9786-612dd3d121f0-combined-ca-bundle\") pod \"barbican-api-6dfcbd5794-mx784\" (UID: \"54957e0e-0d82-418e-9786-612dd3d121f0\") " pod="openstack/barbican-api-6dfcbd5794-mx784" Nov 28 10:17:11 crc kubenswrapper[4838]: I1128 10:17:11.601103 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/54957e0e-0d82-418e-9786-612dd3d121f0-logs\") pod \"barbican-api-6dfcbd5794-mx784\" (UID: \"54957e0e-0d82-418e-9786-612dd3d121f0\") " pod="openstack/barbican-api-6dfcbd5794-mx784" Nov 28 10:17:11 crc kubenswrapper[4838]: I1128 10:17:11.704708 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/54957e0e-0d82-418e-9786-612dd3d121f0-config-data\") pod \"barbican-api-6dfcbd5794-mx784\" (UID: \"54957e0e-0d82-418e-9786-612dd3d121f0\") " pod="openstack/barbican-api-6dfcbd5794-mx784" Nov 28 10:17:11 crc kubenswrapper[4838]: I1128 10:17:11.704785 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/54957e0e-0d82-418e-9786-612dd3d121f0-combined-ca-bundle\") pod \"barbican-api-6dfcbd5794-mx784\" (UID: \"54957e0e-0d82-418e-9786-612dd3d121f0\") " pod="openstack/barbican-api-6dfcbd5794-mx784" Nov 28 10:17:11 crc kubenswrapper[4838]: I1128 10:17:11.704832 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/54957e0e-0d82-418e-9786-612dd3d121f0-logs\") pod \"barbican-api-6dfcbd5794-mx784\" (UID: \"54957e0e-0d82-418e-9786-612dd3d121f0\") " pod="openstack/barbican-api-6dfcbd5794-mx784" Nov 28 10:17:11 crc kubenswrapper[4838]: I1128 10:17:11.704893 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rz7s4\" (UniqueName: \"kubernetes.io/projected/54957e0e-0d82-418e-9786-612dd3d121f0-kube-api-access-rz7s4\") pod \"barbican-api-6dfcbd5794-mx784\" (UID: \"54957e0e-0d82-418e-9786-612dd3d121f0\") " pod="openstack/barbican-api-6dfcbd5794-mx784" Nov 28 10:17:11 crc kubenswrapper[4838]: I1128 10:17:11.704947 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/54957e0e-0d82-418e-9786-612dd3d121f0-public-tls-certs\") pod \"barbican-api-6dfcbd5794-mx784\" (UID: \"54957e0e-0d82-418e-9786-612dd3d121f0\") " pod="openstack/barbican-api-6dfcbd5794-mx784" Nov 28 10:17:11 crc kubenswrapper[4838]: I1128 10:17:11.704977 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/54957e0e-0d82-418e-9786-612dd3d121f0-internal-tls-certs\") pod \"barbican-api-6dfcbd5794-mx784\" (UID: \"54957e0e-0d82-418e-9786-612dd3d121f0\") " pod="openstack/barbican-api-6dfcbd5794-mx784" Nov 28 10:17:11 crc kubenswrapper[4838]: I1128 10:17:11.705162 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/54957e0e-0d82-418e-9786-612dd3d121f0-config-data-custom\") pod \"barbican-api-6dfcbd5794-mx784\" (UID: \"54957e0e-0d82-418e-9786-612dd3d121f0\") " pod="openstack/barbican-api-6dfcbd5794-mx784" Nov 28 10:17:11 crc kubenswrapper[4838]: I1128 10:17:11.707234 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/54957e0e-0d82-418e-9786-612dd3d121f0-logs\") pod \"barbican-api-6dfcbd5794-mx784\" (UID: \"54957e0e-0d82-418e-9786-612dd3d121f0\") " pod="openstack/barbican-api-6dfcbd5794-mx784" Nov 28 10:17:11 crc kubenswrapper[4838]: I1128 10:17:11.712290 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/54957e0e-0d82-418e-9786-612dd3d121f0-combined-ca-bundle\") pod \"barbican-api-6dfcbd5794-mx784\" (UID: \"54957e0e-0d82-418e-9786-612dd3d121f0\") " pod="openstack/barbican-api-6dfcbd5794-mx784" Nov 28 10:17:11 crc kubenswrapper[4838]: I1128 10:17:11.713596 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/54957e0e-0d82-418e-9786-612dd3d121f0-config-data\") pod \"barbican-api-6dfcbd5794-mx784\" (UID: \"54957e0e-0d82-418e-9786-612dd3d121f0\") " pod="openstack/barbican-api-6dfcbd5794-mx784" Nov 28 10:17:11 crc kubenswrapper[4838]: I1128 10:17:11.715067 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/54957e0e-0d82-418e-9786-612dd3d121f0-internal-tls-certs\") pod \"barbican-api-6dfcbd5794-mx784\" (UID: \"54957e0e-0d82-418e-9786-612dd3d121f0\") " pod="openstack/barbican-api-6dfcbd5794-mx784" Nov 28 10:17:11 crc kubenswrapper[4838]: I1128 10:17:11.722189 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/54957e0e-0d82-418e-9786-612dd3d121f0-config-data-custom\") pod \"barbican-api-6dfcbd5794-mx784\" (UID: \"54957e0e-0d82-418e-9786-612dd3d121f0\") " pod="openstack/barbican-api-6dfcbd5794-mx784" Nov 28 10:17:11 crc kubenswrapper[4838]: I1128 10:17:11.722623 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/54957e0e-0d82-418e-9786-612dd3d121f0-public-tls-certs\") pod \"barbican-api-6dfcbd5794-mx784\" (UID: \"54957e0e-0d82-418e-9786-612dd3d121f0\") " pod="openstack/barbican-api-6dfcbd5794-mx784" Nov 28 10:17:11 crc kubenswrapper[4838]: I1128 10:17:11.737282 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rz7s4\" (UniqueName: \"kubernetes.io/projected/54957e0e-0d82-418e-9786-612dd3d121f0-kube-api-access-rz7s4\") pod \"barbican-api-6dfcbd5794-mx784\" (UID: \"54957e0e-0d82-418e-9786-612dd3d121f0\") " pod="openstack/barbican-api-6dfcbd5794-mx784" Nov 28 10:17:11 crc kubenswrapper[4838]: I1128 10:17:11.775376 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-6dfcbd5794-mx784" Nov 28 10:17:12 crc kubenswrapper[4838]: I1128 10:17:12.904171 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-69bc8cb85-2qbr6" event={"ID":"5f9ddfd3-3f45-40e8-a9f8-2976dd20280f","Type":"ContainerStarted","Data":"5571dedbca500d9766adfec02530a2b7240424523e16890454928399519eecce"} Nov 28 10:17:14 crc kubenswrapper[4838]: I1128 10:17:14.661544 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-6dfcbd5794-mx784"] Nov 28 10:17:14 crc kubenswrapper[4838]: I1128 10:17:14.929736 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-6dfcbd5794-mx784" event={"ID":"54957e0e-0d82-418e-9786-612dd3d121f0","Type":"ContainerStarted","Data":"70ede478c09c6e7e018fe12f524c9c1fdde45c9802b69caee984c15405d83533"} Nov 28 10:17:14 crc kubenswrapper[4838]: I1128 10:17:14.929989 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-6dfcbd5794-mx784" event={"ID":"54957e0e-0d82-418e-9786-612dd3d121f0","Type":"ContainerStarted","Data":"9f1750d19ade13eae234e7d1b7108b8889d20085f80a2fead198382ef8a742b1"} Nov 28 10:17:14 crc kubenswrapper[4838]: I1128 10:17:14.932114 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b6bd887d-04f5-45c0-b831-1d16262bbf08","Type":"ContainerStarted","Data":"ddaa88ff7bbc8330594069ff5823b40ac34e7124e3862d3ae386b481a293ca7a"} Nov 28 10:17:14 crc kubenswrapper[4838]: I1128 10:17:14.932422 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 28 10:17:14 crc kubenswrapper[4838]: I1128 10:17:14.932661 4838 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="b6bd887d-04f5-45c0-b831-1d16262bbf08" containerName="ceilometer-central-agent" containerID="cri-o://3f39291203323511b7dacbb127e1410ad584d8fd37be3292550a3d7cc5ffbf02" gracePeriod=30 Nov 28 10:17:14 crc kubenswrapper[4838]: I1128 10:17:14.932771 4838 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="b6bd887d-04f5-45c0-b831-1d16262bbf08" containerName="sg-core" containerID="cri-o://fb6e5792d865ee441936a93349d3a59c49f77d75278e9c2efaac73c96d8c90e2" gracePeriod=30 Nov 28 10:17:14 crc kubenswrapper[4838]: I1128 10:17:14.932817 4838 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="b6bd887d-04f5-45c0-b831-1d16262bbf08" containerName="ceilometer-notification-agent" containerID="cri-o://37183545c8c3a969307f440966afa2391add66012dc772d301476a2e5b8bcde8" gracePeriod=30 Nov 28 10:17:14 crc kubenswrapper[4838]: I1128 10:17:14.932654 4838 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="b6bd887d-04f5-45c0-b831-1d16262bbf08" containerName="proxy-httpd" containerID="cri-o://ddaa88ff7bbc8330594069ff5823b40ac34e7124e3862d3ae386b481a293ca7a" gracePeriod=30 Nov 28 10:17:14 crc kubenswrapper[4838]: I1128 10:17:14.934263 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-69bc8cb85-2qbr6" event={"ID":"5f9ddfd3-3f45-40e8-a9f8-2976dd20280f","Type":"ContainerStarted","Data":"7aa876c891349a1ff15987c7e76adef2f4f8567b7d69cb8908464ac1d30c7e68"} Nov 28 10:17:14 crc kubenswrapper[4838]: I1128 10:17:14.934291 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-69bc8cb85-2qbr6" event={"ID":"5f9ddfd3-3f45-40e8-a9f8-2976dd20280f","Type":"ContainerStarted","Data":"5ed92c89e2c4652554dc6b1db83249669d67a0678655281469c2fb0a5b67fb05"} Nov 28 10:17:14 crc kubenswrapper[4838]: I1128 10:17:14.934455 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-69bc8cb85-2qbr6" Nov 28 10:17:14 crc kubenswrapper[4838]: I1128 10:17:14.952918 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.284506322 podStartE2EDuration="42.95290385s" podCreationTimestamp="2025-11-28 10:16:32 +0000 UTC" firstStartedPulling="2025-11-28 10:16:33.714346087 +0000 UTC m=+1165.413320257" lastFinishedPulling="2025-11-28 10:17:14.382743605 +0000 UTC m=+1206.081717785" observedRunningTime="2025-11-28 10:17:14.947385911 +0000 UTC m=+1206.646360081" watchObservedRunningTime="2025-11-28 10:17:14.95290385 +0000 UTC m=+1206.651878020" Nov 28 10:17:14 crc kubenswrapper[4838]: I1128 10:17:14.967822 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-69bc8cb85-2qbr6" podStartSLOduration=7.967784111 podStartE2EDuration="7.967784111s" podCreationTimestamp="2025-11-28 10:17:07 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 10:17:14.96477181 +0000 UTC m=+1206.663745980" watchObservedRunningTime="2025-11-28 10:17:14.967784111 +0000 UTC m=+1206.666758281" Nov 28 10:17:15 crc kubenswrapper[4838]: I1128 10:17:15.685709 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-6bb684768f-7wcp7" Nov 28 10:17:15 crc kubenswrapper[4838]: I1128 10:17:15.754818 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7987f74bbc-pkpc9"] Nov 28 10:17:15 crc kubenswrapper[4838]: I1128 10:17:15.755163 4838 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-7987f74bbc-pkpc9" podUID="1b11db77-4c89-43b5-b2c4-f72e865025b3" containerName="dnsmasq-dns" containerID="cri-o://8de83020bee197333d0589834e31bc771f5f9c952b02e750d44ce828a4a1f3c7" gracePeriod=10 Nov 28 10:17:15 crc kubenswrapper[4838]: I1128 10:17:15.957696 4838 generic.go:334] "Generic (PLEG): container finished" podID="1b11db77-4c89-43b5-b2c4-f72e865025b3" containerID="8de83020bee197333d0589834e31bc771f5f9c952b02e750d44ce828a4a1f3c7" exitCode=0 Nov 28 10:17:15 crc kubenswrapper[4838]: I1128 10:17:15.957768 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7987f74bbc-pkpc9" event={"ID":"1b11db77-4c89-43b5-b2c4-f72e865025b3","Type":"ContainerDied","Data":"8de83020bee197333d0589834e31bc771f5f9c952b02e750d44ce828a4a1f3c7"} Nov 28 10:17:15 crc kubenswrapper[4838]: I1128 10:17:15.973158 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-6dfcbd5794-mx784" event={"ID":"54957e0e-0d82-418e-9786-612dd3d121f0","Type":"ContainerStarted","Data":"3b1c1287c77b67b3bb5540e255c7859f9820aedd8dca5617287875bd950fd45e"} Nov 28 10:17:15 crc kubenswrapper[4838]: I1128 10:17:15.973701 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-6dfcbd5794-mx784" Nov 28 10:17:15 crc kubenswrapper[4838]: I1128 10:17:15.973775 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-6dfcbd5794-mx784" Nov 28 10:17:15 crc kubenswrapper[4838]: I1128 10:17:15.990038 4838 generic.go:334] "Generic (PLEG): container finished" podID="b6bd887d-04f5-45c0-b831-1d16262bbf08" containerID="ddaa88ff7bbc8330594069ff5823b40ac34e7124e3862d3ae386b481a293ca7a" exitCode=0 Nov 28 10:17:15 crc kubenswrapper[4838]: I1128 10:17:15.990063 4838 generic.go:334] "Generic (PLEG): container finished" podID="b6bd887d-04f5-45c0-b831-1d16262bbf08" containerID="fb6e5792d865ee441936a93349d3a59c49f77d75278e9c2efaac73c96d8c90e2" exitCode=2 Nov 28 10:17:15 crc kubenswrapper[4838]: I1128 10:17:15.990072 4838 generic.go:334] "Generic (PLEG): container finished" podID="b6bd887d-04f5-45c0-b831-1d16262bbf08" containerID="3f39291203323511b7dacbb127e1410ad584d8fd37be3292550a3d7cc5ffbf02" exitCode=0 Nov 28 10:17:15 crc kubenswrapper[4838]: I1128 10:17:15.990114 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b6bd887d-04f5-45c0-b831-1d16262bbf08","Type":"ContainerDied","Data":"ddaa88ff7bbc8330594069ff5823b40ac34e7124e3862d3ae386b481a293ca7a"} Nov 28 10:17:15 crc kubenswrapper[4838]: I1128 10:17:15.990138 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b6bd887d-04f5-45c0-b831-1d16262bbf08","Type":"ContainerDied","Data":"fb6e5792d865ee441936a93349d3a59c49f77d75278e9c2efaac73c96d8c90e2"} Nov 28 10:17:15 crc kubenswrapper[4838]: I1128 10:17:15.990149 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b6bd887d-04f5-45c0-b831-1d16262bbf08","Type":"ContainerDied","Data":"3f39291203323511b7dacbb127e1410ad584d8fd37be3292550a3d7cc5ffbf02"} Nov 28 10:17:15 crc kubenswrapper[4838]: I1128 10:17:15.996456 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-6dfcbd5794-mx784" podStartSLOduration=4.996441294 podStartE2EDuration="4.996441294s" podCreationTimestamp="2025-11-28 10:17:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 10:17:15.994997845 +0000 UTC m=+1207.693972015" watchObservedRunningTime="2025-11-28 10:17:15.996441294 +0000 UTC m=+1207.695415464" Nov 28 10:17:16 crc kubenswrapper[4838]: I1128 10:17:16.009215 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-qdl6f" event={"ID":"d4f1cd16-7995-4964-87d8-ab904bc11ca5","Type":"ContainerStarted","Data":"d7d294e1c7be7a054cb72311f4c8f259dd90aedcf039dcbb12f05bce0acb0190"} Nov 28 10:17:16 crc kubenswrapper[4838]: I1128 10:17:16.028176 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-db-sync-qdl6f" podStartSLOduration=3.36270954 podStartE2EDuration="44.028158899s" podCreationTimestamp="2025-11-28 10:16:32 +0000 UTC" firstStartedPulling="2025-11-28 10:16:33.720029869 +0000 UTC m=+1165.419004039" lastFinishedPulling="2025-11-28 10:17:14.385479228 +0000 UTC m=+1206.084453398" observedRunningTime="2025-11-28 10:17:16.027520292 +0000 UTC m=+1207.726494462" watchObservedRunningTime="2025-11-28 10:17:16.028158899 +0000 UTC m=+1207.727133069" Nov 28 10:17:16 crc kubenswrapper[4838]: I1128 10:17:16.263787 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7987f74bbc-pkpc9" Nov 28 10:17:16 crc kubenswrapper[4838]: I1128 10:17:16.289160 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1b11db77-4c89-43b5-b2c4-f72e865025b3-dns-svc\") pod \"1b11db77-4c89-43b5-b2c4-f72e865025b3\" (UID: \"1b11db77-4c89-43b5-b2c4-f72e865025b3\") " Nov 28 10:17:16 crc kubenswrapper[4838]: I1128 10:17:16.289226 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hz499\" (UniqueName: \"kubernetes.io/projected/1b11db77-4c89-43b5-b2c4-f72e865025b3-kube-api-access-hz499\") pod \"1b11db77-4c89-43b5-b2c4-f72e865025b3\" (UID: \"1b11db77-4c89-43b5-b2c4-f72e865025b3\") " Nov 28 10:17:16 crc kubenswrapper[4838]: I1128 10:17:16.301174 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1b11db77-4c89-43b5-b2c4-f72e865025b3-kube-api-access-hz499" (OuterVolumeSpecName: "kube-api-access-hz499") pod "1b11db77-4c89-43b5-b2c4-f72e865025b3" (UID: "1b11db77-4c89-43b5-b2c4-f72e865025b3"). InnerVolumeSpecName "kube-api-access-hz499". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:17:16 crc kubenswrapper[4838]: I1128 10:17:16.360956 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1b11db77-4c89-43b5-b2c4-f72e865025b3-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "1b11db77-4c89-43b5-b2c4-f72e865025b3" (UID: "1b11db77-4c89-43b5-b2c4-f72e865025b3"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 10:17:16 crc kubenswrapper[4838]: I1128 10:17:16.390799 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1b11db77-4c89-43b5-b2c4-f72e865025b3-config\") pod \"1b11db77-4c89-43b5-b2c4-f72e865025b3\" (UID: \"1b11db77-4c89-43b5-b2c4-f72e865025b3\") " Nov 28 10:17:16 crc kubenswrapper[4838]: I1128 10:17:16.390915 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/1b11db77-4c89-43b5-b2c4-f72e865025b3-ovsdbserver-sb\") pod \"1b11db77-4c89-43b5-b2c4-f72e865025b3\" (UID: \"1b11db77-4c89-43b5-b2c4-f72e865025b3\") " Nov 28 10:17:16 crc kubenswrapper[4838]: I1128 10:17:16.391013 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/1b11db77-4c89-43b5-b2c4-f72e865025b3-ovsdbserver-nb\") pod \"1b11db77-4c89-43b5-b2c4-f72e865025b3\" (UID: \"1b11db77-4c89-43b5-b2c4-f72e865025b3\") " Nov 28 10:17:16 crc kubenswrapper[4838]: I1128 10:17:16.391682 4838 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1b11db77-4c89-43b5-b2c4-f72e865025b3-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 10:17:16 crc kubenswrapper[4838]: I1128 10:17:16.391732 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hz499\" (UniqueName: \"kubernetes.io/projected/1b11db77-4c89-43b5-b2c4-f72e865025b3-kube-api-access-hz499\") on node \"crc\" DevicePath \"\"" Nov 28 10:17:16 crc kubenswrapper[4838]: I1128 10:17:16.434521 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1b11db77-4c89-43b5-b2c4-f72e865025b3-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "1b11db77-4c89-43b5-b2c4-f72e865025b3" (UID: "1b11db77-4c89-43b5-b2c4-f72e865025b3"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 10:17:16 crc kubenswrapper[4838]: I1128 10:17:16.436094 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1b11db77-4c89-43b5-b2c4-f72e865025b3-config" (OuterVolumeSpecName: "config") pod "1b11db77-4c89-43b5-b2c4-f72e865025b3" (UID: "1b11db77-4c89-43b5-b2c4-f72e865025b3"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 10:17:16 crc kubenswrapper[4838]: I1128 10:17:16.438568 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1b11db77-4c89-43b5-b2c4-f72e865025b3-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "1b11db77-4c89-43b5-b2c4-f72e865025b3" (UID: "1b11db77-4c89-43b5-b2c4-f72e865025b3"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 10:17:16 crc kubenswrapper[4838]: I1128 10:17:16.493777 4838 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1b11db77-4c89-43b5-b2c4-f72e865025b3-config\") on node \"crc\" DevicePath \"\"" Nov 28 10:17:16 crc kubenswrapper[4838]: I1128 10:17:16.493814 4838 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/1b11db77-4c89-43b5-b2c4-f72e865025b3-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 28 10:17:16 crc kubenswrapper[4838]: I1128 10:17:16.493826 4838 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/1b11db77-4c89-43b5-b2c4-f72e865025b3-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 28 10:17:16 crc kubenswrapper[4838]: I1128 10:17:16.954398 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-9858c78b6-nx8kz" Nov 28 10:17:17 crc kubenswrapper[4838]: I1128 10:17:17.017559 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7987f74bbc-pkpc9" event={"ID":"1b11db77-4c89-43b5-b2c4-f72e865025b3","Type":"ContainerDied","Data":"353218c1f4abafdf6b075d63fa04ada9a7c8dad93e4081286b26f726e24dcbbb"} Nov 28 10:17:17 crc kubenswrapper[4838]: I1128 10:17:17.018064 4838 scope.go:117] "RemoveContainer" containerID="8de83020bee197333d0589834e31bc771f5f9c952b02e750d44ce828a4a1f3c7" Nov 28 10:17:17 crc kubenswrapper[4838]: I1128 10:17:17.017593 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7987f74bbc-pkpc9" Nov 28 10:17:17 crc kubenswrapper[4838]: I1128 10:17:17.041602 4838 scope.go:117] "RemoveContainer" containerID="64552d8fbe3d2a4c842f6482779271b161e224706b0f2dc959b687afa128fcac" Nov 28 10:17:17 crc kubenswrapper[4838]: I1128 10:17:17.048525 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7987f74bbc-pkpc9"] Nov 28 10:17:17 crc kubenswrapper[4838]: I1128 10:17:17.054427 4838 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-7987f74bbc-pkpc9"] Nov 28 10:17:17 crc kubenswrapper[4838]: I1128 10:17:17.112108 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-9858c78b6-nx8kz" Nov 28 10:17:18 crc kubenswrapper[4838]: I1128 10:17:18.579262 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1b11db77-4c89-43b5-b2c4-f72e865025b3" path="/var/lib/kubelet/pods/1b11db77-4c89-43b5-b2c4-f72e865025b3/volumes" Nov 28 10:17:19 crc kubenswrapper[4838]: I1128 10:17:19.061539 4838 generic.go:334] "Generic (PLEG): container finished" podID="b6bd887d-04f5-45c0-b831-1d16262bbf08" containerID="37183545c8c3a969307f440966afa2391add66012dc772d301476a2e5b8bcde8" exitCode=0 Nov 28 10:17:19 crc kubenswrapper[4838]: I1128 10:17:19.063524 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b6bd887d-04f5-45c0-b831-1d16262bbf08","Type":"ContainerDied","Data":"37183545c8c3a969307f440966afa2391add66012dc772d301476a2e5b8bcde8"} Nov 28 10:17:19 crc kubenswrapper[4838]: I1128 10:17:19.148269 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 10:17:19 crc kubenswrapper[4838]: I1128 10:17:19.246861 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b6bd887d-04f5-45c0-b831-1d16262bbf08-config-data\") pod \"b6bd887d-04f5-45c0-b831-1d16262bbf08\" (UID: \"b6bd887d-04f5-45c0-b831-1d16262bbf08\") " Nov 28 10:17:19 crc kubenswrapper[4838]: I1128 10:17:19.246925 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b6bd887d-04f5-45c0-b831-1d16262bbf08-log-httpd\") pod \"b6bd887d-04f5-45c0-b831-1d16262bbf08\" (UID: \"b6bd887d-04f5-45c0-b831-1d16262bbf08\") " Nov 28 10:17:19 crc kubenswrapper[4838]: I1128 10:17:19.246984 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b6bd887d-04f5-45c0-b831-1d16262bbf08-combined-ca-bundle\") pod \"b6bd887d-04f5-45c0-b831-1d16262bbf08\" (UID: \"b6bd887d-04f5-45c0-b831-1d16262bbf08\") " Nov 28 10:17:19 crc kubenswrapper[4838]: I1128 10:17:19.247045 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b6bd887d-04f5-45c0-b831-1d16262bbf08-run-httpd\") pod \"b6bd887d-04f5-45c0-b831-1d16262bbf08\" (UID: \"b6bd887d-04f5-45c0-b831-1d16262bbf08\") " Nov 28 10:17:19 crc kubenswrapper[4838]: I1128 10:17:19.247135 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9jg2s\" (UniqueName: \"kubernetes.io/projected/b6bd887d-04f5-45c0-b831-1d16262bbf08-kube-api-access-9jg2s\") pod \"b6bd887d-04f5-45c0-b831-1d16262bbf08\" (UID: \"b6bd887d-04f5-45c0-b831-1d16262bbf08\") " Nov 28 10:17:19 crc kubenswrapper[4838]: I1128 10:17:19.247250 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/b6bd887d-04f5-45c0-b831-1d16262bbf08-sg-core-conf-yaml\") pod \"b6bd887d-04f5-45c0-b831-1d16262bbf08\" (UID: \"b6bd887d-04f5-45c0-b831-1d16262bbf08\") " Nov 28 10:17:19 crc kubenswrapper[4838]: I1128 10:17:19.247286 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b6bd887d-04f5-45c0-b831-1d16262bbf08-scripts\") pod \"b6bd887d-04f5-45c0-b831-1d16262bbf08\" (UID: \"b6bd887d-04f5-45c0-b831-1d16262bbf08\") " Nov 28 10:17:19 crc kubenswrapper[4838]: I1128 10:17:19.248562 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b6bd887d-04f5-45c0-b831-1d16262bbf08-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "b6bd887d-04f5-45c0-b831-1d16262bbf08" (UID: "b6bd887d-04f5-45c0-b831-1d16262bbf08"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 10:17:19 crc kubenswrapper[4838]: I1128 10:17:19.248970 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b6bd887d-04f5-45c0-b831-1d16262bbf08-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "b6bd887d-04f5-45c0-b831-1d16262bbf08" (UID: "b6bd887d-04f5-45c0-b831-1d16262bbf08"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 10:17:19 crc kubenswrapper[4838]: I1128 10:17:19.253490 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6bd887d-04f5-45c0-b831-1d16262bbf08-scripts" (OuterVolumeSpecName: "scripts") pod "b6bd887d-04f5-45c0-b831-1d16262bbf08" (UID: "b6bd887d-04f5-45c0-b831-1d16262bbf08"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:17:19 crc kubenswrapper[4838]: I1128 10:17:19.253548 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6bd887d-04f5-45c0-b831-1d16262bbf08-kube-api-access-9jg2s" (OuterVolumeSpecName: "kube-api-access-9jg2s") pod "b6bd887d-04f5-45c0-b831-1d16262bbf08" (UID: "b6bd887d-04f5-45c0-b831-1d16262bbf08"). InnerVolumeSpecName "kube-api-access-9jg2s". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:17:19 crc kubenswrapper[4838]: I1128 10:17:19.274239 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6bd887d-04f5-45c0-b831-1d16262bbf08-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "b6bd887d-04f5-45c0-b831-1d16262bbf08" (UID: "b6bd887d-04f5-45c0-b831-1d16262bbf08"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:17:19 crc kubenswrapper[4838]: I1128 10:17:19.342338 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6bd887d-04f5-45c0-b831-1d16262bbf08-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b6bd887d-04f5-45c0-b831-1d16262bbf08" (UID: "b6bd887d-04f5-45c0-b831-1d16262bbf08"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:17:19 crc kubenswrapper[4838]: I1128 10:17:19.343648 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6bd887d-04f5-45c0-b831-1d16262bbf08-config-data" (OuterVolumeSpecName: "config-data") pod "b6bd887d-04f5-45c0-b831-1d16262bbf08" (UID: "b6bd887d-04f5-45c0-b831-1d16262bbf08"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:17:19 crc kubenswrapper[4838]: I1128 10:17:19.362394 4838 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b6bd887d-04f5-45c0-b831-1d16262bbf08-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 10:17:19 crc kubenswrapper[4838]: I1128 10:17:19.363164 4838 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b6bd887d-04f5-45c0-b831-1d16262bbf08-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 28 10:17:19 crc kubenswrapper[4838]: I1128 10:17:19.363187 4838 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b6bd887d-04f5-45c0-b831-1d16262bbf08-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 10:17:19 crc kubenswrapper[4838]: I1128 10:17:19.363201 4838 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b6bd887d-04f5-45c0-b831-1d16262bbf08-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 28 10:17:19 crc kubenswrapper[4838]: I1128 10:17:19.363212 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9jg2s\" (UniqueName: \"kubernetes.io/projected/b6bd887d-04f5-45c0-b831-1d16262bbf08-kube-api-access-9jg2s\") on node \"crc\" DevicePath \"\"" Nov 28 10:17:19 crc kubenswrapper[4838]: I1128 10:17:19.363224 4838 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/b6bd887d-04f5-45c0-b831-1d16262bbf08-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 28 10:17:19 crc kubenswrapper[4838]: I1128 10:17:19.363235 4838 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b6bd887d-04f5-45c0-b831-1d16262bbf08-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 10:17:20 crc kubenswrapper[4838]: I1128 10:17:20.080979 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 10:17:20 crc kubenswrapper[4838]: I1128 10:17:20.081014 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b6bd887d-04f5-45c0-b831-1d16262bbf08","Type":"ContainerDied","Data":"de9499d5b3f9216845c1ce6207a026bac8dfba2c9bcb91980ec5e7ff1ecd636e"} Nov 28 10:17:20 crc kubenswrapper[4838]: I1128 10:17:20.081141 4838 scope.go:117] "RemoveContainer" containerID="ddaa88ff7bbc8330594069ff5823b40ac34e7124e3862d3ae386b481a293ca7a" Nov 28 10:17:20 crc kubenswrapper[4838]: I1128 10:17:20.085114 4838 generic.go:334] "Generic (PLEG): container finished" podID="d4f1cd16-7995-4964-87d8-ab904bc11ca5" containerID="d7d294e1c7be7a054cb72311f4c8f259dd90aedcf039dcbb12f05bce0acb0190" exitCode=0 Nov 28 10:17:20 crc kubenswrapper[4838]: I1128 10:17:20.085170 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-qdl6f" event={"ID":"d4f1cd16-7995-4964-87d8-ab904bc11ca5","Type":"ContainerDied","Data":"d7d294e1c7be7a054cb72311f4c8f259dd90aedcf039dcbb12f05bce0acb0190"} Nov 28 10:17:20 crc kubenswrapper[4838]: I1128 10:17:20.136799 4838 scope.go:117] "RemoveContainer" containerID="fb6e5792d865ee441936a93349d3a59c49f77d75278e9c2efaac73c96d8c90e2" Nov 28 10:17:20 crc kubenswrapper[4838]: I1128 10:17:20.143680 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 28 10:17:20 crc kubenswrapper[4838]: I1128 10:17:20.158419 4838 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 28 10:17:20 crc kubenswrapper[4838]: I1128 10:17:20.176980 4838 scope.go:117] "RemoveContainer" containerID="37183545c8c3a969307f440966afa2391add66012dc772d301476a2e5b8bcde8" Nov 28 10:17:20 crc kubenswrapper[4838]: I1128 10:17:20.203695 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 28 10:17:20 crc kubenswrapper[4838]: E1128 10:17:20.204265 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1b11db77-4c89-43b5-b2c4-f72e865025b3" containerName="dnsmasq-dns" Nov 28 10:17:20 crc kubenswrapper[4838]: I1128 10:17:20.204289 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="1b11db77-4c89-43b5-b2c4-f72e865025b3" containerName="dnsmasq-dns" Nov 28 10:17:20 crc kubenswrapper[4838]: E1128 10:17:20.204308 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1b11db77-4c89-43b5-b2c4-f72e865025b3" containerName="init" Nov 28 10:17:20 crc kubenswrapper[4838]: I1128 10:17:20.204320 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="1b11db77-4c89-43b5-b2c4-f72e865025b3" containerName="init" Nov 28 10:17:20 crc kubenswrapper[4838]: E1128 10:17:20.204355 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b6bd887d-04f5-45c0-b831-1d16262bbf08" containerName="ceilometer-central-agent" Nov 28 10:17:20 crc kubenswrapper[4838]: I1128 10:17:20.204366 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="b6bd887d-04f5-45c0-b831-1d16262bbf08" containerName="ceilometer-central-agent" Nov 28 10:17:20 crc kubenswrapper[4838]: E1128 10:17:20.204388 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b6bd887d-04f5-45c0-b831-1d16262bbf08" containerName="proxy-httpd" Nov 28 10:17:20 crc kubenswrapper[4838]: I1128 10:17:20.204399 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="b6bd887d-04f5-45c0-b831-1d16262bbf08" containerName="proxy-httpd" Nov 28 10:17:20 crc kubenswrapper[4838]: E1128 10:17:20.204422 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b6bd887d-04f5-45c0-b831-1d16262bbf08" containerName="sg-core" Nov 28 10:17:20 crc kubenswrapper[4838]: I1128 10:17:20.204432 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="b6bd887d-04f5-45c0-b831-1d16262bbf08" containerName="sg-core" Nov 28 10:17:20 crc kubenswrapper[4838]: E1128 10:17:20.204453 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b6bd887d-04f5-45c0-b831-1d16262bbf08" containerName="ceilometer-notification-agent" Nov 28 10:17:20 crc kubenswrapper[4838]: I1128 10:17:20.204462 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="b6bd887d-04f5-45c0-b831-1d16262bbf08" containerName="ceilometer-notification-agent" Nov 28 10:17:20 crc kubenswrapper[4838]: I1128 10:17:20.205069 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="1b11db77-4c89-43b5-b2c4-f72e865025b3" containerName="dnsmasq-dns" Nov 28 10:17:20 crc kubenswrapper[4838]: I1128 10:17:20.205108 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="b6bd887d-04f5-45c0-b831-1d16262bbf08" containerName="proxy-httpd" Nov 28 10:17:20 crc kubenswrapper[4838]: I1128 10:17:20.205132 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="b6bd887d-04f5-45c0-b831-1d16262bbf08" containerName="sg-core" Nov 28 10:17:20 crc kubenswrapper[4838]: I1128 10:17:20.205156 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="b6bd887d-04f5-45c0-b831-1d16262bbf08" containerName="ceilometer-central-agent" Nov 28 10:17:20 crc kubenswrapper[4838]: I1128 10:17:20.205173 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="b6bd887d-04f5-45c0-b831-1d16262bbf08" containerName="ceilometer-notification-agent" Nov 28 10:17:20 crc kubenswrapper[4838]: I1128 10:17:20.207658 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 10:17:20 crc kubenswrapper[4838]: I1128 10:17:20.212913 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 28 10:17:20 crc kubenswrapper[4838]: I1128 10:17:20.212953 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 28 10:17:20 crc kubenswrapper[4838]: I1128 10:17:20.217118 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 28 10:17:20 crc kubenswrapper[4838]: I1128 10:17:20.220359 4838 scope.go:117] "RemoveContainer" containerID="3f39291203323511b7dacbb127e1410ad584d8fd37be3292550a3d7cc5ffbf02" Nov 28 10:17:20 crc kubenswrapper[4838]: I1128 10:17:20.381271 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5144a764-e665-486c-ba49-02edbd12cf0b-log-httpd\") pod \"ceilometer-0\" (UID: \"5144a764-e665-486c-ba49-02edbd12cf0b\") " pod="openstack/ceilometer-0" Nov 28 10:17:20 crc kubenswrapper[4838]: I1128 10:17:20.381343 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5144a764-e665-486c-ba49-02edbd12cf0b-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"5144a764-e665-486c-ba49-02edbd12cf0b\") " pod="openstack/ceilometer-0" Nov 28 10:17:20 crc kubenswrapper[4838]: I1128 10:17:20.381399 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/5144a764-e665-486c-ba49-02edbd12cf0b-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"5144a764-e665-486c-ba49-02edbd12cf0b\") " pod="openstack/ceilometer-0" Nov 28 10:17:20 crc kubenswrapper[4838]: I1128 10:17:20.381425 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5144a764-e665-486c-ba49-02edbd12cf0b-run-httpd\") pod \"ceilometer-0\" (UID: \"5144a764-e665-486c-ba49-02edbd12cf0b\") " pod="openstack/ceilometer-0" Nov 28 10:17:20 crc kubenswrapper[4838]: I1128 10:17:20.381445 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5144a764-e665-486c-ba49-02edbd12cf0b-config-data\") pod \"ceilometer-0\" (UID: \"5144a764-e665-486c-ba49-02edbd12cf0b\") " pod="openstack/ceilometer-0" Nov 28 10:17:20 crc kubenswrapper[4838]: I1128 10:17:20.381528 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5144a764-e665-486c-ba49-02edbd12cf0b-scripts\") pod \"ceilometer-0\" (UID: \"5144a764-e665-486c-ba49-02edbd12cf0b\") " pod="openstack/ceilometer-0" Nov 28 10:17:20 crc kubenswrapper[4838]: I1128 10:17:20.381578 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zd9n9\" (UniqueName: \"kubernetes.io/projected/5144a764-e665-486c-ba49-02edbd12cf0b-kube-api-access-zd9n9\") pod \"ceilometer-0\" (UID: \"5144a764-e665-486c-ba49-02edbd12cf0b\") " pod="openstack/ceilometer-0" Nov 28 10:17:20 crc kubenswrapper[4838]: I1128 10:17:20.483839 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5144a764-e665-486c-ba49-02edbd12cf0b-log-httpd\") pod \"ceilometer-0\" (UID: \"5144a764-e665-486c-ba49-02edbd12cf0b\") " pod="openstack/ceilometer-0" Nov 28 10:17:20 crc kubenswrapper[4838]: I1128 10:17:20.483976 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5144a764-e665-486c-ba49-02edbd12cf0b-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"5144a764-e665-486c-ba49-02edbd12cf0b\") " pod="openstack/ceilometer-0" Nov 28 10:17:20 crc kubenswrapper[4838]: I1128 10:17:20.484065 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/5144a764-e665-486c-ba49-02edbd12cf0b-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"5144a764-e665-486c-ba49-02edbd12cf0b\") " pod="openstack/ceilometer-0" Nov 28 10:17:20 crc kubenswrapper[4838]: I1128 10:17:20.484107 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5144a764-e665-486c-ba49-02edbd12cf0b-run-httpd\") pod \"ceilometer-0\" (UID: \"5144a764-e665-486c-ba49-02edbd12cf0b\") " pod="openstack/ceilometer-0" Nov 28 10:17:20 crc kubenswrapper[4838]: I1128 10:17:20.484140 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5144a764-e665-486c-ba49-02edbd12cf0b-config-data\") pod \"ceilometer-0\" (UID: \"5144a764-e665-486c-ba49-02edbd12cf0b\") " pod="openstack/ceilometer-0" Nov 28 10:17:20 crc kubenswrapper[4838]: I1128 10:17:20.484192 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5144a764-e665-486c-ba49-02edbd12cf0b-scripts\") pod \"ceilometer-0\" (UID: \"5144a764-e665-486c-ba49-02edbd12cf0b\") " pod="openstack/ceilometer-0" Nov 28 10:17:20 crc kubenswrapper[4838]: I1128 10:17:20.484267 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zd9n9\" (UniqueName: \"kubernetes.io/projected/5144a764-e665-486c-ba49-02edbd12cf0b-kube-api-access-zd9n9\") pod \"ceilometer-0\" (UID: \"5144a764-e665-486c-ba49-02edbd12cf0b\") " pod="openstack/ceilometer-0" Nov 28 10:17:20 crc kubenswrapper[4838]: I1128 10:17:20.484503 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5144a764-e665-486c-ba49-02edbd12cf0b-log-httpd\") pod \"ceilometer-0\" (UID: \"5144a764-e665-486c-ba49-02edbd12cf0b\") " pod="openstack/ceilometer-0" Nov 28 10:17:20 crc kubenswrapper[4838]: I1128 10:17:20.484859 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5144a764-e665-486c-ba49-02edbd12cf0b-run-httpd\") pod \"ceilometer-0\" (UID: \"5144a764-e665-486c-ba49-02edbd12cf0b\") " pod="openstack/ceilometer-0" Nov 28 10:17:20 crc kubenswrapper[4838]: I1128 10:17:20.491434 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5144a764-e665-486c-ba49-02edbd12cf0b-scripts\") pod \"ceilometer-0\" (UID: \"5144a764-e665-486c-ba49-02edbd12cf0b\") " pod="openstack/ceilometer-0" Nov 28 10:17:20 crc kubenswrapper[4838]: I1128 10:17:20.492483 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5144a764-e665-486c-ba49-02edbd12cf0b-config-data\") pod \"ceilometer-0\" (UID: \"5144a764-e665-486c-ba49-02edbd12cf0b\") " pod="openstack/ceilometer-0" Nov 28 10:17:20 crc kubenswrapper[4838]: I1128 10:17:20.492828 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/5144a764-e665-486c-ba49-02edbd12cf0b-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"5144a764-e665-486c-ba49-02edbd12cf0b\") " pod="openstack/ceilometer-0" Nov 28 10:17:20 crc kubenswrapper[4838]: I1128 10:17:20.499633 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5144a764-e665-486c-ba49-02edbd12cf0b-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"5144a764-e665-486c-ba49-02edbd12cf0b\") " pod="openstack/ceilometer-0" Nov 28 10:17:20 crc kubenswrapper[4838]: I1128 10:17:20.516387 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zd9n9\" (UniqueName: \"kubernetes.io/projected/5144a764-e665-486c-ba49-02edbd12cf0b-kube-api-access-zd9n9\") pod \"ceilometer-0\" (UID: \"5144a764-e665-486c-ba49-02edbd12cf0b\") " pod="openstack/ceilometer-0" Nov 28 10:17:20 crc kubenswrapper[4838]: I1128 10:17:20.546946 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 10:17:20 crc kubenswrapper[4838]: I1128 10:17:20.573158 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6bd887d-04f5-45c0-b831-1d16262bbf08" path="/var/lib/kubelet/pods/b6bd887d-04f5-45c0-b831-1d16262bbf08/volumes" Nov 28 10:17:21 crc kubenswrapper[4838]: I1128 10:17:21.021859 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 28 10:17:21 crc kubenswrapper[4838]: W1128 10:17:21.027681 4838 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5144a764_e665_486c_ba49_02edbd12cf0b.slice/crio-779d7fe9319aa15bf86f4147d8ccf2ff684cf743e8a1cabce5046f50ebe45a6b WatchSource:0}: Error finding container 779d7fe9319aa15bf86f4147d8ccf2ff684cf743e8a1cabce5046f50ebe45a6b: Status 404 returned error can't find the container with id 779d7fe9319aa15bf86f4147d8ccf2ff684cf743e8a1cabce5046f50ebe45a6b Nov 28 10:17:21 crc kubenswrapper[4838]: I1128 10:17:21.105605 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5144a764-e665-486c-ba49-02edbd12cf0b","Type":"ContainerStarted","Data":"779d7fe9319aa15bf86f4147d8ccf2ff684cf743e8a1cabce5046f50ebe45a6b"} Nov 28 10:17:21 crc kubenswrapper[4838]: I1128 10:17:21.540313 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-qdl6f" Nov 28 10:17:21 crc kubenswrapper[4838]: I1128 10:17:21.721587 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/d4f1cd16-7995-4964-87d8-ab904bc11ca5-db-sync-config-data\") pod \"d4f1cd16-7995-4964-87d8-ab904bc11ca5\" (UID: \"d4f1cd16-7995-4964-87d8-ab904bc11ca5\") " Nov 28 10:17:21 crc kubenswrapper[4838]: I1128 10:17:21.722036 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d4f1cd16-7995-4964-87d8-ab904bc11ca5-combined-ca-bundle\") pod \"d4f1cd16-7995-4964-87d8-ab904bc11ca5\" (UID: \"d4f1cd16-7995-4964-87d8-ab904bc11ca5\") " Nov 28 10:17:21 crc kubenswrapper[4838]: I1128 10:17:21.722070 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d4f1cd16-7995-4964-87d8-ab904bc11ca5-config-data\") pod \"d4f1cd16-7995-4964-87d8-ab904bc11ca5\" (UID: \"d4f1cd16-7995-4964-87d8-ab904bc11ca5\") " Nov 28 10:17:21 crc kubenswrapper[4838]: I1128 10:17:21.722125 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/d4f1cd16-7995-4964-87d8-ab904bc11ca5-etc-machine-id\") pod \"d4f1cd16-7995-4964-87d8-ab904bc11ca5\" (UID: \"d4f1cd16-7995-4964-87d8-ab904bc11ca5\") " Nov 28 10:17:21 crc kubenswrapper[4838]: I1128 10:17:21.722208 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lrpzs\" (UniqueName: \"kubernetes.io/projected/d4f1cd16-7995-4964-87d8-ab904bc11ca5-kube-api-access-lrpzs\") pod \"d4f1cd16-7995-4964-87d8-ab904bc11ca5\" (UID: \"d4f1cd16-7995-4964-87d8-ab904bc11ca5\") " Nov 28 10:17:21 crc kubenswrapper[4838]: I1128 10:17:21.722233 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d4f1cd16-7995-4964-87d8-ab904bc11ca5-scripts\") pod \"d4f1cd16-7995-4964-87d8-ab904bc11ca5\" (UID: \"d4f1cd16-7995-4964-87d8-ab904bc11ca5\") " Nov 28 10:17:21 crc kubenswrapper[4838]: I1128 10:17:21.723766 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/d4f1cd16-7995-4964-87d8-ab904bc11ca5-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "d4f1cd16-7995-4964-87d8-ab904bc11ca5" (UID: "d4f1cd16-7995-4964-87d8-ab904bc11ca5"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 10:17:21 crc kubenswrapper[4838]: I1128 10:17:21.727284 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d4f1cd16-7995-4964-87d8-ab904bc11ca5-scripts" (OuterVolumeSpecName: "scripts") pod "d4f1cd16-7995-4964-87d8-ab904bc11ca5" (UID: "d4f1cd16-7995-4964-87d8-ab904bc11ca5"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:17:21 crc kubenswrapper[4838]: I1128 10:17:21.728133 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d4f1cd16-7995-4964-87d8-ab904bc11ca5-kube-api-access-lrpzs" (OuterVolumeSpecName: "kube-api-access-lrpzs") pod "d4f1cd16-7995-4964-87d8-ab904bc11ca5" (UID: "d4f1cd16-7995-4964-87d8-ab904bc11ca5"). InnerVolumeSpecName "kube-api-access-lrpzs". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:17:21 crc kubenswrapper[4838]: I1128 10:17:21.730885 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d4f1cd16-7995-4964-87d8-ab904bc11ca5-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "d4f1cd16-7995-4964-87d8-ab904bc11ca5" (UID: "d4f1cd16-7995-4964-87d8-ab904bc11ca5"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:17:21 crc kubenswrapper[4838]: I1128 10:17:21.766436 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d4f1cd16-7995-4964-87d8-ab904bc11ca5-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d4f1cd16-7995-4964-87d8-ab904bc11ca5" (UID: "d4f1cd16-7995-4964-87d8-ab904bc11ca5"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:17:21 crc kubenswrapper[4838]: I1128 10:17:21.777487 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d4f1cd16-7995-4964-87d8-ab904bc11ca5-config-data" (OuterVolumeSpecName: "config-data") pod "d4f1cd16-7995-4964-87d8-ab904bc11ca5" (UID: "d4f1cd16-7995-4964-87d8-ab904bc11ca5"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:17:21 crc kubenswrapper[4838]: I1128 10:17:21.825205 4838 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/d4f1cd16-7995-4964-87d8-ab904bc11ca5-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 10:17:21 crc kubenswrapper[4838]: I1128 10:17:21.825259 4838 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d4f1cd16-7995-4964-87d8-ab904bc11ca5-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 10:17:21 crc kubenswrapper[4838]: I1128 10:17:21.825278 4838 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d4f1cd16-7995-4964-87d8-ab904bc11ca5-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 10:17:21 crc kubenswrapper[4838]: I1128 10:17:21.825294 4838 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/d4f1cd16-7995-4964-87d8-ab904bc11ca5-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 28 10:17:21 crc kubenswrapper[4838]: I1128 10:17:21.825315 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lrpzs\" (UniqueName: \"kubernetes.io/projected/d4f1cd16-7995-4964-87d8-ab904bc11ca5-kube-api-access-lrpzs\") on node \"crc\" DevicePath \"\"" Nov 28 10:17:21 crc kubenswrapper[4838]: I1128 10:17:21.825334 4838 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d4f1cd16-7995-4964-87d8-ab904bc11ca5-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 10:17:22 crc kubenswrapper[4838]: I1128 10:17:22.119685 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5144a764-e665-486c-ba49-02edbd12cf0b","Type":"ContainerStarted","Data":"706aaa0270dd3982a1389a47ad15ac587ef7c0813b68d2778e41aac8473bedce"} Nov 28 10:17:22 crc kubenswrapper[4838]: I1128 10:17:22.121464 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-qdl6f" event={"ID":"d4f1cd16-7995-4964-87d8-ab904bc11ca5","Type":"ContainerDied","Data":"5515aa05d29520621f947285f4b535b4f6ff45921497fd8217fec3616af4d600"} Nov 28 10:17:22 crc kubenswrapper[4838]: I1128 10:17:22.121496 4838 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5515aa05d29520621f947285f4b535b4f6ff45921497fd8217fec3616af4d600" Nov 28 10:17:22 crc kubenswrapper[4838]: I1128 10:17:22.121551 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-qdl6f" Nov 28 10:17:22 crc kubenswrapper[4838]: I1128 10:17:22.439373 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Nov 28 10:17:22 crc kubenswrapper[4838]: E1128 10:17:22.441414 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d4f1cd16-7995-4964-87d8-ab904bc11ca5" containerName="cinder-db-sync" Nov 28 10:17:22 crc kubenswrapper[4838]: I1128 10:17:22.441438 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="d4f1cd16-7995-4964-87d8-ab904bc11ca5" containerName="cinder-db-sync" Nov 28 10:17:22 crc kubenswrapper[4838]: I1128 10:17:22.441609 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="d4f1cd16-7995-4964-87d8-ab904bc11ca5" containerName="cinder-db-sync" Nov 28 10:17:22 crc kubenswrapper[4838]: I1128 10:17:22.442583 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 28 10:17:22 crc kubenswrapper[4838]: I1128 10:17:22.450126 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Nov 28 10:17:22 crc kubenswrapper[4838]: I1128 10:17:22.450353 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-79wbw" Nov 28 10:17:22 crc kubenswrapper[4838]: I1128 10:17:22.450502 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Nov 28 10:17:22 crc kubenswrapper[4838]: I1128 10:17:22.451734 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Nov 28 10:17:22 crc kubenswrapper[4838]: I1128 10:17:22.479035 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 28 10:17:22 crc kubenswrapper[4838]: I1128 10:17:22.520659 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6d97fcdd8f-j2pfr"] Nov 28 10:17:22 crc kubenswrapper[4838]: I1128 10:17:22.522136 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6d97fcdd8f-j2pfr" Nov 28 10:17:22 crc kubenswrapper[4838]: I1128 10:17:22.548006 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6d97fcdd8f-j2pfr"] Nov 28 10:17:22 crc kubenswrapper[4838]: I1128 10:17:22.556296 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4fw94\" (UniqueName: \"kubernetes.io/projected/b699613e-fbde-4039-82bb-c83e188362c1-kube-api-access-4fw94\") pod \"cinder-scheduler-0\" (UID: \"b699613e-fbde-4039-82bb-c83e188362c1\") " pod="openstack/cinder-scheduler-0" Nov 28 10:17:22 crc kubenswrapper[4838]: I1128 10:17:22.556375 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m6xkl\" (UniqueName: \"kubernetes.io/projected/4a49a5cf-ca4f-4e48-bec8-8d5a728d8025-kube-api-access-m6xkl\") pod \"dnsmasq-dns-6d97fcdd8f-j2pfr\" (UID: \"4a49a5cf-ca4f-4e48-bec8-8d5a728d8025\") " pod="openstack/dnsmasq-dns-6d97fcdd8f-j2pfr" Nov 28 10:17:22 crc kubenswrapper[4838]: I1128 10:17:22.556403 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/b699613e-fbde-4039-82bb-c83e188362c1-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"b699613e-fbde-4039-82bb-c83e188362c1\") " pod="openstack/cinder-scheduler-0" Nov 28 10:17:22 crc kubenswrapper[4838]: I1128 10:17:22.556429 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b699613e-fbde-4039-82bb-c83e188362c1-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"b699613e-fbde-4039-82bb-c83e188362c1\") " pod="openstack/cinder-scheduler-0" Nov 28 10:17:22 crc kubenswrapper[4838]: I1128 10:17:22.556452 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b699613e-fbde-4039-82bb-c83e188362c1-config-data\") pod \"cinder-scheduler-0\" (UID: \"b699613e-fbde-4039-82bb-c83e188362c1\") " pod="openstack/cinder-scheduler-0" Nov 28 10:17:22 crc kubenswrapper[4838]: I1128 10:17:22.556538 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b699613e-fbde-4039-82bb-c83e188362c1-scripts\") pod \"cinder-scheduler-0\" (UID: \"b699613e-fbde-4039-82bb-c83e188362c1\") " pod="openstack/cinder-scheduler-0" Nov 28 10:17:22 crc kubenswrapper[4838]: I1128 10:17:22.556614 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4a49a5cf-ca4f-4e48-bec8-8d5a728d8025-dns-svc\") pod \"dnsmasq-dns-6d97fcdd8f-j2pfr\" (UID: \"4a49a5cf-ca4f-4e48-bec8-8d5a728d8025\") " pod="openstack/dnsmasq-dns-6d97fcdd8f-j2pfr" Nov 28 10:17:22 crc kubenswrapper[4838]: I1128 10:17:22.556851 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/4a49a5cf-ca4f-4e48-bec8-8d5a728d8025-ovsdbserver-nb\") pod \"dnsmasq-dns-6d97fcdd8f-j2pfr\" (UID: \"4a49a5cf-ca4f-4e48-bec8-8d5a728d8025\") " pod="openstack/dnsmasq-dns-6d97fcdd8f-j2pfr" Nov 28 10:17:22 crc kubenswrapper[4838]: I1128 10:17:22.556874 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4a49a5cf-ca4f-4e48-bec8-8d5a728d8025-config\") pod \"dnsmasq-dns-6d97fcdd8f-j2pfr\" (UID: \"4a49a5cf-ca4f-4e48-bec8-8d5a728d8025\") " pod="openstack/dnsmasq-dns-6d97fcdd8f-j2pfr" Nov 28 10:17:22 crc kubenswrapper[4838]: I1128 10:17:22.556921 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b699613e-fbde-4039-82bb-c83e188362c1-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"b699613e-fbde-4039-82bb-c83e188362c1\") " pod="openstack/cinder-scheduler-0" Nov 28 10:17:22 crc kubenswrapper[4838]: I1128 10:17:22.556948 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/4a49a5cf-ca4f-4e48-bec8-8d5a728d8025-ovsdbserver-sb\") pod \"dnsmasq-dns-6d97fcdd8f-j2pfr\" (UID: \"4a49a5cf-ca4f-4e48-bec8-8d5a728d8025\") " pod="openstack/dnsmasq-dns-6d97fcdd8f-j2pfr" Nov 28 10:17:22 crc kubenswrapper[4838]: I1128 10:17:22.658775 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4a49a5cf-ca4f-4e48-bec8-8d5a728d8025-dns-svc\") pod \"dnsmasq-dns-6d97fcdd8f-j2pfr\" (UID: \"4a49a5cf-ca4f-4e48-bec8-8d5a728d8025\") " pod="openstack/dnsmasq-dns-6d97fcdd8f-j2pfr" Nov 28 10:17:22 crc kubenswrapper[4838]: I1128 10:17:22.659113 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/4a49a5cf-ca4f-4e48-bec8-8d5a728d8025-ovsdbserver-nb\") pod \"dnsmasq-dns-6d97fcdd8f-j2pfr\" (UID: \"4a49a5cf-ca4f-4e48-bec8-8d5a728d8025\") " pod="openstack/dnsmasq-dns-6d97fcdd8f-j2pfr" Nov 28 10:17:22 crc kubenswrapper[4838]: I1128 10:17:22.659138 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4a49a5cf-ca4f-4e48-bec8-8d5a728d8025-config\") pod \"dnsmasq-dns-6d97fcdd8f-j2pfr\" (UID: \"4a49a5cf-ca4f-4e48-bec8-8d5a728d8025\") " pod="openstack/dnsmasq-dns-6d97fcdd8f-j2pfr" Nov 28 10:17:22 crc kubenswrapper[4838]: I1128 10:17:22.659164 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b699613e-fbde-4039-82bb-c83e188362c1-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"b699613e-fbde-4039-82bb-c83e188362c1\") " pod="openstack/cinder-scheduler-0" Nov 28 10:17:22 crc kubenswrapper[4838]: I1128 10:17:22.659184 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/4a49a5cf-ca4f-4e48-bec8-8d5a728d8025-ovsdbserver-sb\") pod \"dnsmasq-dns-6d97fcdd8f-j2pfr\" (UID: \"4a49a5cf-ca4f-4e48-bec8-8d5a728d8025\") " pod="openstack/dnsmasq-dns-6d97fcdd8f-j2pfr" Nov 28 10:17:22 crc kubenswrapper[4838]: I1128 10:17:22.659220 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4fw94\" (UniqueName: \"kubernetes.io/projected/b699613e-fbde-4039-82bb-c83e188362c1-kube-api-access-4fw94\") pod \"cinder-scheduler-0\" (UID: \"b699613e-fbde-4039-82bb-c83e188362c1\") " pod="openstack/cinder-scheduler-0" Nov 28 10:17:22 crc kubenswrapper[4838]: I1128 10:17:22.659271 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m6xkl\" (UniqueName: \"kubernetes.io/projected/4a49a5cf-ca4f-4e48-bec8-8d5a728d8025-kube-api-access-m6xkl\") pod \"dnsmasq-dns-6d97fcdd8f-j2pfr\" (UID: \"4a49a5cf-ca4f-4e48-bec8-8d5a728d8025\") " pod="openstack/dnsmasq-dns-6d97fcdd8f-j2pfr" Nov 28 10:17:22 crc kubenswrapper[4838]: I1128 10:17:22.659292 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/b699613e-fbde-4039-82bb-c83e188362c1-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"b699613e-fbde-4039-82bb-c83e188362c1\") " pod="openstack/cinder-scheduler-0" Nov 28 10:17:22 crc kubenswrapper[4838]: I1128 10:17:22.659321 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b699613e-fbde-4039-82bb-c83e188362c1-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"b699613e-fbde-4039-82bb-c83e188362c1\") " pod="openstack/cinder-scheduler-0" Nov 28 10:17:22 crc kubenswrapper[4838]: I1128 10:17:22.659341 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b699613e-fbde-4039-82bb-c83e188362c1-config-data\") pod \"cinder-scheduler-0\" (UID: \"b699613e-fbde-4039-82bb-c83e188362c1\") " pod="openstack/cinder-scheduler-0" Nov 28 10:17:22 crc kubenswrapper[4838]: I1128 10:17:22.659365 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b699613e-fbde-4039-82bb-c83e188362c1-scripts\") pod \"cinder-scheduler-0\" (UID: \"b699613e-fbde-4039-82bb-c83e188362c1\") " pod="openstack/cinder-scheduler-0" Nov 28 10:17:22 crc kubenswrapper[4838]: I1128 10:17:22.660539 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4a49a5cf-ca4f-4e48-bec8-8d5a728d8025-dns-svc\") pod \"dnsmasq-dns-6d97fcdd8f-j2pfr\" (UID: \"4a49a5cf-ca4f-4e48-bec8-8d5a728d8025\") " pod="openstack/dnsmasq-dns-6d97fcdd8f-j2pfr" Nov 28 10:17:22 crc kubenswrapper[4838]: I1128 10:17:22.661915 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4a49a5cf-ca4f-4e48-bec8-8d5a728d8025-config\") pod \"dnsmasq-dns-6d97fcdd8f-j2pfr\" (UID: \"4a49a5cf-ca4f-4e48-bec8-8d5a728d8025\") " pod="openstack/dnsmasq-dns-6d97fcdd8f-j2pfr" Nov 28 10:17:22 crc kubenswrapper[4838]: I1128 10:17:22.661986 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/4a49a5cf-ca4f-4e48-bec8-8d5a728d8025-ovsdbserver-sb\") pod \"dnsmasq-dns-6d97fcdd8f-j2pfr\" (UID: \"4a49a5cf-ca4f-4e48-bec8-8d5a728d8025\") " pod="openstack/dnsmasq-dns-6d97fcdd8f-j2pfr" Nov 28 10:17:22 crc kubenswrapper[4838]: I1128 10:17:22.662033 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/b699613e-fbde-4039-82bb-c83e188362c1-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"b699613e-fbde-4039-82bb-c83e188362c1\") " pod="openstack/cinder-scheduler-0" Nov 28 10:17:22 crc kubenswrapper[4838]: I1128 10:17:22.662258 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/4a49a5cf-ca4f-4e48-bec8-8d5a728d8025-ovsdbserver-nb\") pod \"dnsmasq-dns-6d97fcdd8f-j2pfr\" (UID: \"4a49a5cf-ca4f-4e48-bec8-8d5a728d8025\") " pod="openstack/dnsmasq-dns-6d97fcdd8f-j2pfr" Nov 28 10:17:22 crc kubenswrapper[4838]: I1128 10:17:22.664771 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b699613e-fbde-4039-82bb-c83e188362c1-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"b699613e-fbde-4039-82bb-c83e188362c1\") " pod="openstack/cinder-scheduler-0" Nov 28 10:17:22 crc kubenswrapper[4838]: I1128 10:17:22.679289 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b699613e-fbde-4039-82bb-c83e188362c1-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"b699613e-fbde-4039-82bb-c83e188362c1\") " pod="openstack/cinder-scheduler-0" Nov 28 10:17:22 crc kubenswrapper[4838]: I1128 10:17:22.680025 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b699613e-fbde-4039-82bb-c83e188362c1-scripts\") pod \"cinder-scheduler-0\" (UID: \"b699613e-fbde-4039-82bb-c83e188362c1\") " pod="openstack/cinder-scheduler-0" Nov 28 10:17:22 crc kubenswrapper[4838]: I1128 10:17:22.683908 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m6xkl\" (UniqueName: \"kubernetes.io/projected/4a49a5cf-ca4f-4e48-bec8-8d5a728d8025-kube-api-access-m6xkl\") pod \"dnsmasq-dns-6d97fcdd8f-j2pfr\" (UID: \"4a49a5cf-ca4f-4e48-bec8-8d5a728d8025\") " pod="openstack/dnsmasq-dns-6d97fcdd8f-j2pfr" Nov 28 10:17:22 crc kubenswrapper[4838]: I1128 10:17:22.686992 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b699613e-fbde-4039-82bb-c83e188362c1-config-data\") pod \"cinder-scheduler-0\" (UID: \"b699613e-fbde-4039-82bb-c83e188362c1\") " pod="openstack/cinder-scheduler-0" Nov 28 10:17:22 crc kubenswrapper[4838]: I1128 10:17:22.692265 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4fw94\" (UniqueName: \"kubernetes.io/projected/b699613e-fbde-4039-82bb-c83e188362c1-kube-api-access-4fw94\") pod \"cinder-scheduler-0\" (UID: \"b699613e-fbde-4039-82bb-c83e188362c1\") " pod="openstack/cinder-scheduler-0" Nov 28 10:17:22 crc kubenswrapper[4838]: I1128 10:17:22.708703 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Nov 28 10:17:22 crc kubenswrapper[4838]: I1128 10:17:22.710006 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 28 10:17:22 crc kubenswrapper[4838]: I1128 10:17:22.719139 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Nov 28 10:17:22 crc kubenswrapper[4838]: I1128 10:17:22.735600 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 28 10:17:22 crc kubenswrapper[4838]: I1128 10:17:22.760974 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f189d761-b514-47b6-8d4f-6111e883e2b4-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"f189d761-b514-47b6-8d4f-6111e883e2b4\") " pod="openstack/cinder-api-0" Nov 28 10:17:22 crc kubenswrapper[4838]: I1128 10:17:22.761025 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f189d761-b514-47b6-8d4f-6111e883e2b4-config-data\") pod \"cinder-api-0\" (UID: \"f189d761-b514-47b6-8d4f-6111e883e2b4\") " pod="openstack/cinder-api-0" Nov 28 10:17:22 crc kubenswrapper[4838]: I1128 10:17:22.761249 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f189d761-b514-47b6-8d4f-6111e883e2b4-config-data-custom\") pod \"cinder-api-0\" (UID: \"f189d761-b514-47b6-8d4f-6111e883e2b4\") " pod="openstack/cinder-api-0" Nov 28 10:17:22 crc kubenswrapper[4838]: I1128 10:17:22.761339 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f189d761-b514-47b6-8d4f-6111e883e2b4-scripts\") pod \"cinder-api-0\" (UID: \"f189d761-b514-47b6-8d4f-6111e883e2b4\") " pod="openstack/cinder-api-0" Nov 28 10:17:22 crc kubenswrapper[4838]: I1128 10:17:22.761469 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c6lcb\" (UniqueName: \"kubernetes.io/projected/f189d761-b514-47b6-8d4f-6111e883e2b4-kube-api-access-c6lcb\") pod \"cinder-api-0\" (UID: \"f189d761-b514-47b6-8d4f-6111e883e2b4\") " pod="openstack/cinder-api-0" Nov 28 10:17:22 crc kubenswrapper[4838]: I1128 10:17:22.761503 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f189d761-b514-47b6-8d4f-6111e883e2b4-logs\") pod \"cinder-api-0\" (UID: \"f189d761-b514-47b6-8d4f-6111e883e2b4\") " pod="openstack/cinder-api-0" Nov 28 10:17:22 crc kubenswrapper[4838]: I1128 10:17:22.761635 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/f189d761-b514-47b6-8d4f-6111e883e2b4-etc-machine-id\") pod \"cinder-api-0\" (UID: \"f189d761-b514-47b6-8d4f-6111e883e2b4\") " pod="openstack/cinder-api-0" Nov 28 10:17:22 crc kubenswrapper[4838]: I1128 10:17:22.812401 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 28 10:17:22 crc kubenswrapper[4838]: I1128 10:17:22.853776 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6d97fcdd8f-j2pfr" Nov 28 10:17:22 crc kubenswrapper[4838]: I1128 10:17:22.864120 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f189d761-b514-47b6-8d4f-6111e883e2b4-config-data-custom\") pod \"cinder-api-0\" (UID: \"f189d761-b514-47b6-8d4f-6111e883e2b4\") " pod="openstack/cinder-api-0" Nov 28 10:17:22 crc kubenswrapper[4838]: I1128 10:17:22.864283 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f189d761-b514-47b6-8d4f-6111e883e2b4-scripts\") pod \"cinder-api-0\" (UID: \"f189d761-b514-47b6-8d4f-6111e883e2b4\") " pod="openstack/cinder-api-0" Nov 28 10:17:22 crc kubenswrapper[4838]: I1128 10:17:22.864442 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c6lcb\" (UniqueName: \"kubernetes.io/projected/f189d761-b514-47b6-8d4f-6111e883e2b4-kube-api-access-c6lcb\") pod \"cinder-api-0\" (UID: \"f189d761-b514-47b6-8d4f-6111e883e2b4\") " pod="openstack/cinder-api-0" Nov 28 10:17:22 crc kubenswrapper[4838]: I1128 10:17:22.864492 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f189d761-b514-47b6-8d4f-6111e883e2b4-logs\") pod \"cinder-api-0\" (UID: \"f189d761-b514-47b6-8d4f-6111e883e2b4\") " pod="openstack/cinder-api-0" Nov 28 10:17:22 crc kubenswrapper[4838]: I1128 10:17:22.864671 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/f189d761-b514-47b6-8d4f-6111e883e2b4-etc-machine-id\") pod \"cinder-api-0\" (UID: \"f189d761-b514-47b6-8d4f-6111e883e2b4\") " pod="openstack/cinder-api-0" Nov 28 10:17:22 crc kubenswrapper[4838]: I1128 10:17:22.864783 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/f189d761-b514-47b6-8d4f-6111e883e2b4-etc-machine-id\") pod \"cinder-api-0\" (UID: \"f189d761-b514-47b6-8d4f-6111e883e2b4\") " pod="openstack/cinder-api-0" Nov 28 10:17:22 crc kubenswrapper[4838]: I1128 10:17:22.864800 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f189d761-b514-47b6-8d4f-6111e883e2b4-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"f189d761-b514-47b6-8d4f-6111e883e2b4\") " pod="openstack/cinder-api-0" Nov 28 10:17:22 crc kubenswrapper[4838]: I1128 10:17:22.864884 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f189d761-b514-47b6-8d4f-6111e883e2b4-config-data\") pod \"cinder-api-0\" (UID: \"f189d761-b514-47b6-8d4f-6111e883e2b4\") " pod="openstack/cinder-api-0" Nov 28 10:17:22 crc kubenswrapper[4838]: I1128 10:17:22.864900 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f189d761-b514-47b6-8d4f-6111e883e2b4-logs\") pod \"cinder-api-0\" (UID: \"f189d761-b514-47b6-8d4f-6111e883e2b4\") " pod="openstack/cinder-api-0" Nov 28 10:17:22 crc kubenswrapper[4838]: I1128 10:17:22.872314 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f189d761-b514-47b6-8d4f-6111e883e2b4-scripts\") pod \"cinder-api-0\" (UID: \"f189d761-b514-47b6-8d4f-6111e883e2b4\") " pod="openstack/cinder-api-0" Nov 28 10:17:22 crc kubenswrapper[4838]: I1128 10:17:22.873140 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f189d761-b514-47b6-8d4f-6111e883e2b4-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"f189d761-b514-47b6-8d4f-6111e883e2b4\") " pod="openstack/cinder-api-0" Nov 28 10:17:22 crc kubenswrapper[4838]: I1128 10:17:22.873561 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f189d761-b514-47b6-8d4f-6111e883e2b4-config-data-custom\") pod \"cinder-api-0\" (UID: \"f189d761-b514-47b6-8d4f-6111e883e2b4\") " pod="openstack/cinder-api-0" Nov 28 10:17:22 crc kubenswrapper[4838]: I1128 10:17:22.874576 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f189d761-b514-47b6-8d4f-6111e883e2b4-config-data\") pod \"cinder-api-0\" (UID: \"f189d761-b514-47b6-8d4f-6111e883e2b4\") " pod="openstack/cinder-api-0" Nov 28 10:17:22 crc kubenswrapper[4838]: I1128 10:17:22.882261 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c6lcb\" (UniqueName: \"kubernetes.io/projected/f189d761-b514-47b6-8d4f-6111e883e2b4-kube-api-access-c6lcb\") pod \"cinder-api-0\" (UID: \"f189d761-b514-47b6-8d4f-6111e883e2b4\") " pod="openstack/cinder-api-0" Nov 28 10:17:23 crc kubenswrapper[4838]: I1128 10:17:23.066388 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 28 10:17:23 crc kubenswrapper[4838]: I1128 10:17:23.162188 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5144a764-e665-486c-ba49-02edbd12cf0b","Type":"ContainerStarted","Data":"9bbcf3b4ec85835db0b14e0ae95a11283946d19b60d073adfac1876e37a4ae91"} Nov 28 10:17:23 crc kubenswrapper[4838]: I1128 10:17:23.312254 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 28 10:17:23 crc kubenswrapper[4838]: W1128 10:17:23.315453 4838 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb699613e_fbde_4039_82bb_c83e188362c1.slice/crio-c02ac6228dbb6579f68fe5386e4c706be9317629d7437737b175276ac65002e5 WatchSource:0}: Error finding container c02ac6228dbb6579f68fe5386e4c706be9317629d7437737b175276ac65002e5: Status 404 returned error can't find the container with id c02ac6228dbb6579f68fe5386e4c706be9317629d7437737b175276ac65002e5 Nov 28 10:17:23 crc kubenswrapper[4838]: I1128 10:17:23.331969 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-6dfcbd5794-mx784" Nov 28 10:17:23 crc kubenswrapper[4838]: I1128 10:17:23.342984 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-6dfcbd5794-mx784" Nov 28 10:17:23 crc kubenswrapper[4838]: I1128 10:17:23.417846 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-9858c78b6-nx8kz"] Nov 28 10:17:23 crc kubenswrapper[4838]: I1128 10:17:23.418388 4838 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-9858c78b6-nx8kz" podUID="57292635-e413-4511-830d-e536d3c8e398" containerName="barbican-api-log" containerID="cri-o://6638064eb253ae73f3b35f7ffb0d337ccd3adc48c6ed020f4b2423ecaf580cf6" gracePeriod=30 Nov 28 10:17:23 crc kubenswrapper[4838]: I1128 10:17:23.418883 4838 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-9858c78b6-nx8kz" podUID="57292635-e413-4511-830d-e536d3c8e398" containerName="barbican-api" containerID="cri-o://0f10fd058a7159094f3c16ee8be6385fb9c4e4143be3a680b9797753573f813f" gracePeriod=30 Nov 28 10:17:23 crc kubenswrapper[4838]: W1128 10:17:23.429080 4838 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4a49a5cf_ca4f_4e48_bec8_8d5a728d8025.slice/crio-eeb26e60d3bd0156e8399c98482b36bba0241e0811dbedd827ba4c6743006b6c WatchSource:0}: Error finding container eeb26e60d3bd0156e8399c98482b36bba0241e0811dbedd827ba4c6743006b6c: Status 404 returned error can't find the container with id eeb26e60d3bd0156e8399c98482b36bba0241e0811dbedd827ba4c6743006b6c Nov 28 10:17:23 crc kubenswrapper[4838]: I1128 10:17:23.432960 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6d97fcdd8f-j2pfr"] Nov 28 10:17:23 crc kubenswrapper[4838]: I1128 10:17:23.609902 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 28 10:17:24 crc kubenswrapper[4838]: I1128 10:17:24.171408 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"f189d761-b514-47b6-8d4f-6111e883e2b4","Type":"ContainerStarted","Data":"8480e56fc6f25f1fe2548dd39c0a971ee81f97918f5586f3037042a1f1aa6915"} Nov 28 10:17:24 crc kubenswrapper[4838]: I1128 10:17:24.173760 4838 generic.go:334] "Generic (PLEG): container finished" podID="57292635-e413-4511-830d-e536d3c8e398" containerID="6638064eb253ae73f3b35f7ffb0d337ccd3adc48c6ed020f4b2423ecaf580cf6" exitCode=143 Nov 28 10:17:24 crc kubenswrapper[4838]: I1128 10:17:24.173827 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-9858c78b6-nx8kz" event={"ID":"57292635-e413-4511-830d-e536d3c8e398","Type":"ContainerDied","Data":"6638064eb253ae73f3b35f7ffb0d337ccd3adc48c6ed020f4b2423ecaf580cf6"} Nov 28 10:17:24 crc kubenswrapper[4838]: I1128 10:17:24.175488 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"b699613e-fbde-4039-82bb-c83e188362c1","Type":"ContainerStarted","Data":"c02ac6228dbb6579f68fe5386e4c706be9317629d7437737b175276ac65002e5"} Nov 28 10:17:24 crc kubenswrapper[4838]: I1128 10:17:24.177832 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5144a764-e665-486c-ba49-02edbd12cf0b","Type":"ContainerStarted","Data":"df27e7312a84ae6694017e4e578d7d2d9a0616fc792ec2ef8ebedd3ea5e1031d"} Nov 28 10:17:24 crc kubenswrapper[4838]: I1128 10:17:24.179257 4838 generic.go:334] "Generic (PLEG): container finished" podID="4a49a5cf-ca4f-4e48-bec8-8d5a728d8025" containerID="9841777dfd5f372db2627f815f5f99aba9243f8c35abda8e632440b50e23a98c" exitCode=0 Nov 28 10:17:24 crc kubenswrapper[4838]: I1128 10:17:24.180053 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6d97fcdd8f-j2pfr" event={"ID":"4a49a5cf-ca4f-4e48-bec8-8d5a728d8025","Type":"ContainerDied","Data":"9841777dfd5f372db2627f815f5f99aba9243f8c35abda8e632440b50e23a98c"} Nov 28 10:17:24 crc kubenswrapper[4838]: I1128 10:17:24.180074 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6d97fcdd8f-j2pfr" event={"ID":"4a49a5cf-ca4f-4e48-bec8-8d5a728d8025","Type":"ContainerStarted","Data":"eeb26e60d3bd0156e8399c98482b36bba0241e0811dbedd827ba4c6743006b6c"} Nov 28 10:17:24 crc kubenswrapper[4838]: I1128 10:17:24.500914 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Nov 28 10:17:25 crc kubenswrapper[4838]: I1128 10:17:25.190700 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6d97fcdd8f-j2pfr" event={"ID":"4a49a5cf-ca4f-4e48-bec8-8d5a728d8025","Type":"ContainerStarted","Data":"495079ccddde65fce19fddf29ac8785094e6ce65cdbd107c52813d57e5d4c8ff"} Nov 28 10:17:25 crc kubenswrapper[4838]: I1128 10:17:25.191906 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-6d97fcdd8f-j2pfr" Nov 28 10:17:25 crc kubenswrapper[4838]: I1128 10:17:25.197221 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"f189d761-b514-47b6-8d4f-6111e883e2b4","Type":"ContainerStarted","Data":"db7e3e258c1bc1f060a47489f3d80a15c7d89125acea1610151e4d65ccd67c02"} Nov 28 10:17:25 crc kubenswrapper[4838]: I1128 10:17:25.197448 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"f189d761-b514-47b6-8d4f-6111e883e2b4","Type":"ContainerStarted","Data":"b7b4742e4b6dcaf1915f7ea15b57a3f45776c52712002f2e8068241d3e40c491"} Nov 28 10:17:25 crc kubenswrapper[4838]: I1128 10:17:25.197562 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Nov 28 10:17:25 crc kubenswrapper[4838]: I1128 10:17:25.197577 4838 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="f189d761-b514-47b6-8d4f-6111e883e2b4" containerName="cinder-api-log" containerID="cri-o://b7b4742e4b6dcaf1915f7ea15b57a3f45776c52712002f2e8068241d3e40c491" gracePeriod=30 Nov 28 10:17:25 crc kubenswrapper[4838]: I1128 10:17:25.197620 4838 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="f189d761-b514-47b6-8d4f-6111e883e2b4" containerName="cinder-api" containerID="cri-o://db7e3e258c1bc1f060a47489f3d80a15c7d89125acea1610151e4d65ccd67c02" gracePeriod=30 Nov 28 10:17:25 crc kubenswrapper[4838]: I1128 10:17:25.228529 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-6d97fcdd8f-j2pfr" podStartSLOduration=3.22849261 podStartE2EDuration="3.22849261s" podCreationTimestamp="2025-11-28 10:17:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 10:17:25.212687634 +0000 UTC m=+1216.911661804" watchObservedRunningTime="2025-11-28 10:17:25.22849261 +0000 UTC m=+1216.927466780" Nov 28 10:17:25 crc kubenswrapper[4838]: I1128 10:17:25.228981 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"b699613e-fbde-4039-82bb-c83e188362c1","Type":"ContainerStarted","Data":"6742b0ca67279a07047d06b4951926a73f266a1b0aeaf53f34ed12988fa97eb4"} Nov 28 10:17:25 crc kubenswrapper[4838]: I1128 10:17:25.248407 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5144a764-e665-486c-ba49-02edbd12cf0b","Type":"ContainerStarted","Data":"1f73e632cd0e1d819ec9815b4b27ca0c4c298914c6d1179f42332a0ae8066ee7"} Nov 28 10:17:25 crc kubenswrapper[4838]: I1128 10:17:25.248637 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 28 10:17:25 crc kubenswrapper[4838]: I1128 10:17:25.289632 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=3.289615628 podStartE2EDuration="3.289615628s" podCreationTimestamp="2025-11-28 10:17:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 10:17:25.244767209 +0000 UTC m=+1216.943741379" watchObservedRunningTime="2025-11-28 10:17:25.289615628 +0000 UTC m=+1216.988589798" Nov 28 10:17:26 crc kubenswrapper[4838]: I1128 10:17:26.257457 4838 generic.go:334] "Generic (PLEG): container finished" podID="f189d761-b514-47b6-8d4f-6111e883e2b4" containerID="b7b4742e4b6dcaf1915f7ea15b57a3f45776c52712002f2e8068241d3e40c491" exitCode=143 Nov 28 10:17:26 crc kubenswrapper[4838]: I1128 10:17:26.257637 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"f189d761-b514-47b6-8d4f-6111e883e2b4","Type":"ContainerDied","Data":"b7b4742e4b6dcaf1915f7ea15b57a3f45776c52712002f2e8068241d3e40c491"} Nov 28 10:17:26 crc kubenswrapper[4838]: I1128 10:17:26.260460 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"b699613e-fbde-4039-82bb-c83e188362c1","Type":"ContainerStarted","Data":"31d069750c59f35ecd538f8efae5a389fc3e71b37d3de9aa8e6d33d5a0f4a1a3"} Nov 28 10:17:26 crc kubenswrapper[4838]: I1128 10:17:26.302579 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=3.345239828 podStartE2EDuration="4.302562788s" podCreationTimestamp="2025-11-28 10:17:22 +0000 UTC" firstStartedPulling="2025-11-28 10:17:23.327127168 +0000 UTC m=+1215.026101338" lastFinishedPulling="2025-11-28 10:17:24.284450128 +0000 UTC m=+1215.983424298" observedRunningTime="2025-11-28 10:17:26.295021244 +0000 UTC m=+1217.993995414" watchObservedRunningTime="2025-11-28 10:17:26.302562788 +0000 UTC m=+1218.001536958" Nov 28 10:17:26 crc kubenswrapper[4838]: I1128 10:17:26.303223 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.896031951 podStartE2EDuration="6.303218865s" podCreationTimestamp="2025-11-28 10:17:20 +0000 UTC" firstStartedPulling="2025-11-28 10:17:21.033956657 +0000 UTC m=+1212.732930837" lastFinishedPulling="2025-11-28 10:17:24.441143591 +0000 UTC m=+1216.140117751" observedRunningTime="2025-11-28 10:17:25.291457988 +0000 UTC m=+1216.990432158" watchObservedRunningTime="2025-11-28 10:17:26.303218865 +0000 UTC m=+1218.002193035" Nov 28 10:17:26 crc kubenswrapper[4838]: I1128 10:17:26.585642 4838 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-9858c78b6-nx8kz" podUID="57292635-e413-4511-830d-e536d3c8e398" containerName="barbican-api" probeResult="failure" output="Get \"http://10.217.0.144:9311/healthcheck\": read tcp 10.217.0.2:57962->10.217.0.144:9311: read: connection reset by peer" Nov 28 10:17:26 crc kubenswrapper[4838]: I1128 10:17:26.586038 4838 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-9858c78b6-nx8kz" podUID="57292635-e413-4511-830d-e536d3c8e398" containerName="barbican-api-log" probeResult="failure" output="Get \"http://10.217.0.144:9311/healthcheck\": read tcp 10.217.0.2:57974->10.217.0.144:9311: read: connection reset by peer" Nov 28 10:17:27 crc kubenswrapper[4838]: I1128 10:17:27.005622 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-9858c78b6-nx8kz" Nov 28 10:17:27 crc kubenswrapper[4838]: I1128 10:17:27.058398 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/57292635-e413-4511-830d-e536d3c8e398-config-data-custom\") pod \"57292635-e413-4511-830d-e536d3c8e398\" (UID: \"57292635-e413-4511-830d-e536d3c8e398\") " Nov 28 10:17:27 crc kubenswrapper[4838]: I1128 10:17:27.058597 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/57292635-e413-4511-830d-e536d3c8e398-combined-ca-bundle\") pod \"57292635-e413-4511-830d-e536d3c8e398\" (UID: \"57292635-e413-4511-830d-e536d3c8e398\") " Nov 28 10:17:27 crc kubenswrapper[4838]: I1128 10:17:27.058631 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z67wp\" (UniqueName: \"kubernetes.io/projected/57292635-e413-4511-830d-e536d3c8e398-kube-api-access-z67wp\") pod \"57292635-e413-4511-830d-e536d3c8e398\" (UID: \"57292635-e413-4511-830d-e536d3c8e398\") " Nov 28 10:17:27 crc kubenswrapper[4838]: I1128 10:17:27.058698 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/57292635-e413-4511-830d-e536d3c8e398-config-data\") pod \"57292635-e413-4511-830d-e536d3c8e398\" (UID: \"57292635-e413-4511-830d-e536d3c8e398\") " Nov 28 10:17:27 crc kubenswrapper[4838]: I1128 10:17:27.058733 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/57292635-e413-4511-830d-e536d3c8e398-logs\") pod \"57292635-e413-4511-830d-e536d3c8e398\" (UID: \"57292635-e413-4511-830d-e536d3c8e398\") " Nov 28 10:17:27 crc kubenswrapper[4838]: I1128 10:17:27.059616 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57292635-e413-4511-830d-e536d3c8e398-logs" (OuterVolumeSpecName: "logs") pod "57292635-e413-4511-830d-e536d3c8e398" (UID: "57292635-e413-4511-830d-e536d3c8e398"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 10:17:27 crc kubenswrapper[4838]: I1128 10:17:27.069747 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/57292635-e413-4511-830d-e536d3c8e398-kube-api-access-z67wp" (OuterVolumeSpecName: "kube-api-access-z67wp") pod "57292635-e413-4511-830d-e536d3c8e398" (UID: "57292635-e413-4511-830d-e536d3c8e398"). InnerVolumeSpecName "kube-api-access-z67wp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:17:27 crc kubenswrapper[4838]: I1128 10:17:27.070443 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/57292635-e413-4511-830d-e536d3c8e398-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "57292635-e413-4511-830d-e536d3c8e398" (UID: "57292635-e413-4511-830d-e536d3c8e398"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:17:27 crc kubenswrapper[4838]: I1128 10:17:27.128067 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/57292635-e413-4511-830d-e536d3c8e398-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "57292635-e413-4511-830d-e536d3c8e398" (UID: "57292635-e413-4511-830d-e536d3c8e398"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:17:27 crc kubenswrapper[4838]: I1128 10:17:27.138167 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/57292635-e413-4511-830d-e536d3c8e398-config-data" (OuterVolumeSpecName: "config-data") pod "57292635-e413-4511-830d-e536d3c8e398" (UID: "57292635-e413-4511-830d-e536d3c8e398"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:17:27 crc kubenswrapper[4838]: I1128 10:17:27.160664 4838 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/57292635-e413-4511-830d-e536d3c8e398-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 10:17:27 crc kubenswrapper[4838]: I1128 10:17:27.160708 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z67wp\" (UniqueName: \"kubernetes.io/projected/57292635-e413-4511-830d-e536d3c8e398-kube-api-access-z67wp\") on node \"crc\" DevicePath \"\"" Nov 28 10:17:27 crc kubenswrapper[4838]: I1128 10:17:27.160741 4838 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/57292635-e413-4511-830d-e536d3c8e398-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 10:17:27 crc kubenswrapper[4838]: I1128 10:17:27.160753 4838 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/57292635-e413-4511-830d-e536d3c8e398-logs\") on node \"crc\" DevicePath \"\"" Nov 28 10:17:27 crc kubenswrapper[4838]: I1128 10:17:27.160767 4838 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/57292635-e413-4511-830d-e536d3c8e398-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 28 10:17:27 crc kubenswrapper[4838]: I1128 10:17:27.278850 4838 generic.go:334] "Generic (PLEG): container finished" podID="57292635-e413-4511-830d-e536d3c8e398" containerID="0f10fd058a7159094f3c16ee8be6385fb9c4e4143be3a680b9797753573f813f" exitCode=0 Nov 28 10:17:27 crc kubenswrapper[4838]: I1128 10:17:27.281042 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-9858c78b6-nx8kz" Nov 28 10:17:27 crc kubenswrapper[4838]: I1128 10:17:27.283616 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-9858c78b6-nx8kz" event={"ID":"57292635-e413-4511-830d-e536d3c8e398","Type":"ContainerDied","Data":"0f10fd058a7159094f3c16ee8be6385fb9c4e4143be3a680b9797753573f813f"} Nov 28 10:17:27 crc kubenswrapper[4838]: I1128 10:17:27.283734 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-9858c78b6-nx8kz" event={"ID":"57292635-e413-4511-830d-e536d3c8e398","Type":"ContainerDied","Data":"839f89ab60e7cdf19c5baa76c6b40085c74c504b1fce2f0100ef41804c6eaf7d"} Nov 28 10:17:27 crc kubenswrapper[4838]: I1128 10:17:27.283762 4838 scope.go:117] "RemoveContainer" containerID="0f10fd058a7159094f3c16ee8be6385fb9c4e4143be3a680b9797753573f813f" Nov 28 10:17:27 crc kubenswrapper[4838]: I1128 10:17:27.324423 4838 scope.go:117] "RemoveContainer" containerID="6638064eb253ae73f3b35f7ffb0d337ccd3adc48c6ed020f4b2423ecaf580cf6" Nov 28 10:17:27 crc kubenswrapper[4838]: I1128 10:17:27.339905 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-9858c78b6-nx8kz"] Nov 28 10:17:27 crc kubenswrapper[4838]: I1128 10:17:27.349846 4838 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-api-9858c78b6-nx8kz"] Nov 28 10:17:27 crc kubenswrapper[4838]: I1128 10:17:27.353732 4838 scope.go:117] "RemoveContainer" containerID="0f10fd058a7159094f3c16ee8be6385fb9c4e4143be3a680b9797753573f813f" Nov 28 10:17:27 crc kubenswrapper[4838]: E1128 10:17:27.354197 4838 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0f10fd058a7159094f3c16ee8be6385fb9c4e4143be3a680b9797753573f813f\": container with ID starting with 0f10fd058a7159094f3c16ee8be6385fb9c4e4143be3a680b9797753573f813f not found: ID does not exist" containerID="0f10fd058a7159094f3c16ee8be6385fb9c4e4143be3a680b9797753573f813f" Nov 28 10:17:27 crc kubenswrapper[4838]: I1128 10:17:27.354304 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0f10fd058a7159094f3c16ee8be6385fb9c4e4143be3a680b9797753573f813f"} err="failed to get container status \"0f10fd058a7159094f3c16ee8be6385fb9c4e4143be3a680b9797753573f813f\": rpc error: code = NotFound desc = could not find container \"0f10fd058a7159094f3c16ee8be6385fb9c4e4143be3a680b9797753573f813f\": container with ID starting with 0f10fd058a7159094f3c16ee8be6385fb9c4e4143be3a680b9797753573f813f not found: ID does not exist" Nov 28 10:17:27 crc kubenswrapper[4838]: I1128 10:17:27.354418 4838 scope.go:117] "RemoveContainer" containerID="6638064eb253ae73f3b35f7ffb0d337ccd3adc48c6ed020f4b2423ecaf580cf6" Nov 28 10:17:27 crc kubenswrapper[4838]: E1128 10:17:27.354808 4838 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6638064eb253ae73f3b35f7ffb0d337ccd3adc48c6ed020f4b2423ecaf580cf6\": container with ID starting with 6638064eb253ae73f3b35f7ffb0d337ccd3adc48c6ed020f4b2423ecaf580cf6 not found: ID does not exist" containerID="6638064eb253ae73f3b35f7ffb0d337ccd3adc48c6ed020f4b2423ecaf580cf6" Nov 28 10:17:27 crc kubenswrapper[4838]: I1128 10:17:27.354897 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6638064eb253ae73f3b35f7ffb0d337ccd3adc48c6ed020f4b2423ecaf580cf6"} err="failed to get container status \"6638064eb253ae73f3b35f7ffb0d337ccd3adc48c6ed020f4b2423ecaf580cf6\": rpc error: code = NotFound desc = could not find container \"6638064eb253ae73f3b35f7ffb0d337ccd3adc48c6ed020f4b2423ecaf580cf6\": container with ID starting with 6638064eb253ae73f3b35f7ffb0d337ccd3adc48c6ed020f4b2423ecaf580cf6 not found: ID does not exist" Nov 28 10:17:27 crc kubenswrapper[4838]: I1128 10:17:27.813665 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Nov 28 10:17:28 crc kubenswrapper[4838]: I1128 10:17:28.585795 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="57292635-e413-4511-830d-e536d3c8e398" path="/var/lib/kubelet/pods/57292635-e413-4511-830d-e536d3c8e398/volumes" Nov 28 10:17:32 crc kubenswrapper[4838]: I1128 10:17:32.855930 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-6d97fcdd8f-j2pfr" Nov 28 10:17:32 crc kubenswrapper[4838]: I1128 10:17:32.945857 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6bb684768f-7wcp7"] Nov 28 10:17:32 crc kubenswrapper[4838]: I1128 10:17:32.946178 4838 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-6bb684768f-7wcp7" podUID="3eeec0e8-8302-414a-882e-22921c0e0872" containerName="dnsmasq-dns" containerID="cri-o://0c28e94f4de20669a1e88d69a499de09cf4bd337bc0e20dd1df90eda44fc4112" gracePeriod=10 Nov 28 10:17:33 crc kubenswrapper[4838]: I1128 10:17:33.165312 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Nov 28 10:17:33 crc kubenswrapper[4838]: I1128 10:17:33.223755 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 28 10:17:33 crc kubenswrapper[4838]: I1128 10:17:33.385608 4838 generic.go:334] "Generic (PLEG): container finished" podID="3eeec0e8-8302-414a-882e-22921c0e0872" containerID="0c28e94f4de20669a1e88d69a499de09cf4bd337bc0e20dd1df90eda44fc4112" exitCode=0 Nov 28 10:17:33 crc kubenswrapper[4838]: I1128 10:17:33.385802 4838 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="b699613e-fbde-4039-82bb-c83e188362c1" containerName="cinder-scheduler" containerID="cri-o://6742b0ca67279a07047d06b4951926a73f266a1b0aeaf53f34ed12988fa97eb4" gracePeriod=30 Nov 28 10:17:33 crc kubenswrapper[4838]: I1128 10:17:33.386050 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6bb684768f-7wcp7" event={"ID":"3eeec0e8-8302-414a-882e-22921c0e0872","Type":"ContainerDied","Data":"0c28e94f4de20669a1e88d69a499de09cf4bd337bc0e20dd1df90eda44fc4112"} Nov 28 10:17:33 crc kubenswrapper[4838]: I1128 10:17:33.386270 4838 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="b699613e-fbde-4039-82bb-c83e188362c1" containerName="probe" containerID="cri-o://31d069750c59f35ecd538f8efae5a389fc3e71b37d3de9aa8e6d33d5a0f4a1a3" gracePeriod=30 Nov 28 10:17:33 crc kubenswrapper[4838]: I1128 10:17:33.456821 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6bb684768f-7wcp7" Nov 28 10:17:33 crc kubenswrapper[4838]: I1128 10:17:33.485921 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3eeec0e8-8302-414a-882e-22921c0e0872-config\") pod \"3eeec0e8-8302-414a-882e-22921c0e0872\" (UID: \"3eeec0e8-8302-414a-882e-22921c0e0872\") " Nov 28 10:17:33 crc kubenswrapper[4838]: I1128 10:17:33.485965 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3eeec0e8-8302-414a-882e-22921c0e0872-ovsdbserver-nb\") pod \"3eeec0e8-8302-414a-882e-22921c0e0872\" (UID: \"3eeec0e8-8302-414a-882e-22921c0e0872\") " Nov 28 10:17:33 crc kubenswrapper[4838]: I1128 10:17:33.486020 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8j7vg\" (UniqueName: \"kubernetes.io/projected/3eeec0e8-8302-414a-882e-22921c0e0872-kube-api-access-8j7vg\") pod \"3eeec0e8-8302-414a-882e-22921c0e0872\" (UID: \"3eeec0e8-8302-414a-882e-22921c0e0872\") " Nov 28 10:17:33 crc kubenswrapper[4838]: I1128 10:17:33.486070 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3eeec0e8-8302-414a-882e-22921c0e0872-dns-svc\") pod \"3eeec0e8-8302-414a-882e-22921c0e0872\" (UID: \"3eeec0e8-8302-414a-882e-22921c0e0872\") " Nov 28 10:17:33 crc kubenswrapper[4838]: I1128 10:17:33.486103 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/3eeec0e8-8302-414a-882e-22921c0e0872-ovsdbserver-sb\") pod \"3eeec0e8-8302-414a-882e-22921c0e0872\" (UID: \"3eeec0e8-8302-414a-882e-22921c0e0872\") " Nov 28 10:17:33 crc kubenswrapper[4838]: I1128 10:17:33.499005 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3eeec0e8-8302-414a-882e-22921c0e0872-kube-api-access-8j7vg" (OuterVolumeSpecName: "kube-api-access-8j7vg") pod "3eeec0e8-8302-414a-882e-22921c0e0872" (UID: "3eeec0e8-8302-414a-882e-22921c0e0872"). InnerVolumeSpecName "kube-api-access-8j7vg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:17:33 crc kubenswrapper[4838]: I1128 10:17:33.532583 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3eeec0e8-8302-414a-882e-22921c0e0872-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "3eeec0e8-8302-414a-882e-22921c0e0872" (UID: "3eeec0e8-8302-414a-882e-22921c0e0872"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 10:17:33 crc kubenswrapper[4838]: I1128 10:17:33.538258 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3eeec0e8-8302-414a-882e-22921c0e0872-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "3eeec0e8-8302-414a-882e-22921c0e0872" (UID: "3eeec0e8-8302-414a-882e-22921c0e0872"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 10:17:33 crc kubenswrapper[4838]: I1128 10:17:33.540275 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3eeec0e8-8302-414a-882e-22921c0e0872-config" (OuterVolumeSpecName: "config") pod "3eeec0e8-8302-414a-882e-22921c0e0872" (UID: "3eeec0e8-8302-414a-882e-22921c0e0872"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 10:17:33 crc kubenswrapper[4838]: I1128 10:17:33.541680 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3eeec0e8-8302-414a-882e-22921c0e0872-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "3eeec0e8-8302-414a-882e-22921c0e0872" (UID: "3eeec0e8-8302-414a-882e-22921c0e0872"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 10:17:33 crc kubenswrapper[4838]: I1128 10:17:33.589201 4838 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3eeec0e8-8302-414a-882e-22921c0e0872-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 10:17:33 crc kubenswrapper[4838]: I1128 10:17:33.589243 4838 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/3eeec0e8-8302-414a-882e-22921c0e0872-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 28 10:17:33 crc kubenswrapper[4838]: I1128 10:17:33.589254 4838 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3eeec0e8-8302-414a-882e-22921c0e0872-config\") on node \"crc\" DevicePath \"\"" Nov 28 10:17:33 crc kubenswrapper[4838]: I1128 10:17:33.589263 4838 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3eeec0e8-8302-414a-882e-22921c0e0872-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 28 10:17:33 crc kubenswrapper[4838]: I1128 10:17:33.589273 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8j7vg\" (UniqueName: \"kubernetes.io/projected/3eeec0e8-8302-414a-882e-22921c0e0872-kube-api-access-8j7vg\") on node \"crc\" DevicePath \"\"" Nov 28 10:17:34 crc kubenswrapper[4838]: I1128 10:17:34.402160 4838 generic.go:334] "Generic (PLEG): container finished" podID="b699613e-fbde-4039-82bb-c83e188362c1" containerID="31d069750c59f35ecd538f8efae5a389fc3e71b37d3de9aa8e6d33d5a0f4a1a3" exitCode=0 Nov 28 10:17:34 crc kubenswrapper[4838]: I1128 10:17:34.402305 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"b699613e-fbde-4039-82bb-c83e188362c1","Type":"ContainerDied","Data":"31d069750c59f35ecd538f8efae5a389fc3e71b37d3de9aa8e6d33d5a0f4a1a3"} Nov 28 10:17:34 crc kubenswrapper[4838]: I1128 10:17:34.406081 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6bb684768f-7wcp7" event={"ID":"3eeec0e8-8302-414a-882e-22921c0e0872","Type":"ContainerDied","Data":"d08b2a9a018ecd7a6e01e3627fa83ab707af0f5b6c29645d61c131e9cfc4fe8a"} Nov 28 10:17:34 crc kubenswrapper[4838]: I1128 10:17:34.406148 4838 scope.go:117] "RemoveContainer" containerID="0c28e94f4de20669a1e88d69a499de09cf4bd337bc0e20dd1df90eda44fc4112" Nov 28 10:17:34 crc kubenswrapper[4838]: I1128 10:17:34.406153 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6bb684768f-7wcp7" Nov 28 10:17:34 crc kubenswrapper[4838]: I1128 10:17:34.446955 4838 scope.go:117] "RemoveContainer" containerID="3f5fccaa14838fdba68d88ed92ddaf340356abd833791ad2ab25ec2717cfdbc5" Nov 28 10:17:34 crc kubenswrapper[4838]: I1128 10:17:34.474964 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6bb684768f-7wcp7"] Nov 28 10:17:34 crc kubenswrapper[4838]: I1128 10:17:34.486816 4838 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6bb684768f-7wcp7"] Nov 28 10:17:34 crc kubenswrapper[4838]: I1128 10:17:34.578064 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3eeec0e8-8302-414a-882e-22921c0e0872" path="/var/lib/kubelet/pods/3eeec0e8-8302-414a-882e-22921c0e0872/volumes" Nov 28 10:17:35 crc kubenswrapper[4838]: I1128 10:17:35.095951 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/cinder-api-0" Nov 28 10:17:35 crc kubenswrapper[4838]: I1128 10:17:35.702629 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-7ff57884d4-gkgfr" Nov 28 10:17:35 crc kubenswrapper[4838]: I1128 10:17:35.999526 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-7f5f5f8b64-f2wff" Nov 28 10:17:36 crc kubenswrapper[4838]: I1128 10:17:36.002562 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-7f5f5f8b64-f2wff" Nov 28 10:17:36 crc kubenswrapper[4838]: I1128 10:17:36.384386 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 28 10:17:36 crc kubenswrapper[4838]: I1128 10:17:36.449671 4838 generic.go:334] "Generic (PLEG): container finished" podID="b699613e-fbde-4039-82bb-c83e188362c1" containerID="6742b0ca67279a07047d06b4951926a73f266a1b0aeaf53f34ed12988fa97eb4" exitCode=0 Nov 28 10:17:36 crc kubenswrapper[4838]: I1128 10:17:36.450520 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 28 10:17:36 crc kubenswrapper[4838]: I1128 10:17:36.450946 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"b699613e-fbde-4039-82bb-c83e188362c1","Type":"ContainerDied","Data":"6742b0ca67279a07047d06b4951926a73f266a1b0aeaf53f34ed12988fa97eb4"} Nov 28 10:17:36 crc kubenswrapper[4838]: I1128 10:17:36.450975 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"b699613e-fbde-4039-82bb-c83e188362c1","Type":"ContainerDied","Data":"c02ac6228dbb6579f68fe5386e4c706be9317629d7437737b175276ac65002e5"} Nov 28 10:17:36 crc kubenswrapper[4838]: I1128 10:17:36.450992 4838 scope.go:117] "RemoveContainer" containerID="31d069750c59f35ecd538f8efae5a389fc3e71b37d3de9aa8e6d33d5a0f4a1a3" Nov 28 10:17:36 crc kubenswrapper[4838]: I1128 10:17:36.472937 4838 scope.go:117] "RemoveContainer" containerID="6742b0ca67279a07047d06b4951926a73f266a1b0aeaf53f34ed12988fa97eb4" Nov 28 10:17:36 crc kubenswrapper[4838]: I1128 10:17:36.491925 4838 scope.go:117] "RemoveContainer" containerID="31d069750c59f35ecd538f8efae5a389fc3e71b37d3de9aa8e6d33d5a0f4a1a3" Nov 28 10:17:36 crc kubenswrapper[4838]: E1128 10:17:36.492416 4838 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"31d069750c59f35ecd538f8efae5a389fc3e71b37d3de9aa8e6d33d5a0f4a1a3\": container with ID starting with 31d069750c59f35ecd538f8efae5a389fc3e71b37d3de9aa8e6d33d5a0f4a1a3 not found: ID does not exist" containerID="31d069750c59f35ecd538f8efae5a389fc3e71b37d3de9aa8e6d33d5a0f4a1a3" Nov 28 10:17:36 crc kubenswrapper[4838]: I1128 10:17:36.492475 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"31d069750c59f35ecd538f8efae5a389fc3e71b37d3de9aa8e6d33d5a0f4a1a3"} err="failed to get container status \"31d069750c59f35ecd538f8efae5a389fc3e71b37d3de9aa8e6d33d5a0f4a1a3\": rpc error: code = NotFound desc = could not find container \"31d069750c59f35ecd538f8efae5a389fc3e71b37d3de9aa8e6d33d5a0f4a1a3\": container with ID starting with 31d069750c59f35ecd538f8efae5a389fc3e71b37d3de9aa8e6d33d5a0f4a1a3 not found: ID does not exist" Nov 28 10:17:36 crc kubenswrapper[4838]: I1128 10:17:36.492500 4838 scope.go:117] "RemoveContainer" containerID="6742b0ca67279a07047d06b4951926a73f266a1b0aeaf53f34ed12988fa97eb4" Nov 28 10:17:36 crc kubenswrapper[4838]: E1128 10:17:36.493162 4838 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6742b0ca67279a07047d06b4951926a73f266a1b0aeaf53f34ed12988fa97eb4\": container with ID starting with 6742b0ca67279a07047d06b4951926a73f266a1b0aeaf53f34ed12988fa97eb4 not found: ID does not exist" containerID="6742b0ca67279a07047d06b4951926a73f266a1b0aeaf53f34ed12988fa97eb4" Nov 28 10:17:36 crc kubenswrapper[4838]: I1128 10:17:36.493194 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6742b0ca67279a07047d06b4951926a73f266a1b0aeaf53f34ed12988fa97eb4"} err="failed to get container status \"6742b0ca67279a07047d06b4951926a73f266a1b0aeaf53f34ed12988fa97eb4\": rpc error: code = NotFound desc = could not find container \"6742b0ca67279a07047d06b4951926a73f266a1b0aeaf53f34ed12988fa97eb4\": container with ID starting with 6742b0ca67279a07047d06b4951926a73f266a1b0aeaf53f34ed12988fa97eb4 not found: ID does not exist" Nov 28 10:17:36 crc kubenswrapper[4838]: I1128 10:17:36.548913 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4fw94\" (UniqueName: \"kubernetes.io/projected/b699613e-fbde-4039-82bb-c83e188362c1-kube-api-access-4fw94\") pod \"b699613e-fbde-4039-82bb-c83e188362c1\" (UID: \"b699613e-fbde-4039-82bb-c83e188362c1\") " Nov 28 10:17:36 crc kubenswrapper[4838]: I1128 10:17:36.550327 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b699613e-fbde-4039-82bb-c83e188362c1-combined-ca-bundle\") pod \"b699613e-fbde-4039-82bb-c83e188362c1\" (UID: \"b699613e-fbde-4039-82bb-c83e188362c1\") " Nov 28 10:17:36 crc kubenswrapper[4838]: I1128 10:17:36.550375 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b699613e-fbde-4039-82bb-c83e188362c1-scripts\") pod \"b699613e-fbde-4039-82bb-c83e188362c1\" (UID: \"b699613e-fbde-4039-82bb-c83e188362c1\") " Nov 28 10:17:36 crc kubenswrapper[4838]: I1128 10:17:36.550413 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b699613e-fbde-4039-82bb-c83e188362c1-config-data\") pod \"b699613e-fbde-4039-82bb-c83e188362c1\" (UID: \"b699613e-fbde-4039-82bb-c83e188362c1\") " Nov 28 10:17:36 crc kubenswrapper[4838]: I1128 10:17:36.550448 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b699613e-fbde-4039-82bb-c83e188362c1-config-data-custom\") pod \"b699613e-fbde-4039-82bb-c83e188362c1\" (UID: \"b699613e-fbde-4039-82bb-c83e188362c1\") " Nov 28 10:17:36 crc kubenswrapper[4838]: I1128 10:17:36.550488 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/b699613e-fbde-4039-82bb-c83e188362c1-etc-machine-id\") pod \"b699613e-fbde-4039-82bb-c83e188362c1\" (UID: \"b699613e-fbde-4039-82bb-c83e188362c1\") " Nov 28 10:17:36 crc kubenswrapper[4838]: I1128 10:17:36.550738 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/b699613e-fbde-4039-82bb-c83e188362c1-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "b699613e-fbde-4039-82bb-c83e188362c1" (UID: "b699613e-fbde-4039-82bb-c83e188362c1"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 10:17:36 crc kubenswrapper[4838]: I1128 10:17:36.551364 4838 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/b699613e-fbde-4039-82bb-c83e188362c1-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 28 10:17:36 crc kubenswrapper[4838]: I1128 10:17:36.557932 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b699613e-fbde-4039-82bb-c83e188362c1-scripts" (OuterVolumeSpecName: "scripts") pod "b699613e-fbde-4039-82bb-c83e188362c1" (UID: "b699613e-fbde-4039-82bb-c83e188362c1"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:17:36 crc kubenswrapper[4838]: I1128 10:17:36.569985 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b699613e-fbde-4039-82bb-c83e188362c1-kube-api-access-4fw94" (OuterVolumeSpecName: "kube-api-access-4fw94") pod "b699613e-fbde-4039-82bb-c83e188362c1" (UID: "b699613e-fbde-4039-82bb-c83e188362c1"). InnerVolumeSpecName "kube-api-access-4fw94". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:17:36 crc kubenswrapper[4838]: I1128 10:17:36.578363 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b699613e-fbde-4039-82bb-c83e188362c1-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "b699613e-fbde-4039-82bb-c83e188362c1" (UID: "b699613e-fbde-4039-82bb-c83e188362c1"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:17:36 crc kubenswrapper[4838]: I1128 10:17:36.610624 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b699613e-fbde-4039-82bb-c83e188362c1-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b699613e-fbde-4039-82bb-c83e188362c1" (UID: "b699613e-fbde-4039-82bb-c83e188362c1"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:17:36 crc kubenswrapper[4838]: I1128 10:17:36.654763 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4fw94\" (UniqueName: \"kubernetes.io/projected/b699613e-fbde-4039-82bb-c83e188362c1-kube-api-access-4fw94\") on node \"crc\" DevicePath \"\"" Nov 28 10:17:36 crc kubenswrapper[4838]: I1128 10:17:36.654844 4838 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b699613e-fbde-4039-82bb-c83e188362c1-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 10:17:36 crc kubenswrapper[4838]: I1128 10:17:36.654871 4838 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b699613e-fbde-4039-82bb-c83e188362c1-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 10:17:36 crc kubenswrapper[4838]: I1128 10:17:36.654894 4838 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b699613e-fbde-4039-82bb-c83e188362c1-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 28 10:17:36 crc kubenswrapper[4838]: I1128 10:17:36.659399 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b699613e-fbde-4039-82bb-c83e188362c1-config-data" (OuterVolumeSpecName: "config-data") pod "b699613e-fbde-4039-82bb-c83e188362c1" (UID: "b699613e-fbde-4039-82bb-c83e188362c1"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:17:36 crc kubenswrapper[4838]: I1128 10:17:36.702094 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/keystone-698bf66db7-q4nv6" Nov 28 10:17:36 crc kubenswrapper[4838]: I1128 10:17:36.757875 4838 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b699613e-fbde-4039-82bb-c83e188362c1-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 10:17:36 crc kubenswrapper[4838]: E1128 10:17:36.801738 4838 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb699613e_fbde_4039_82bb_c83e188362c1.slice\": RecentStats: unable to find data in memory cache]" Nov 28 10:17:36 crc kubenswrapper[4838]: I1128 10:17:36.805760 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 28 10:17:36 crc kubenswrapper[4838]: I1128 10:17:36.830051 4838 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 28 10:17:36 crc kubenswrapper[4838]: I1128 10:17:36.845668 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Nov 28 10:17:36 crc kubenswrapper[4838]: E1128 10:17:36.846053 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b699613e-fbde-4039-82bb-c83e188362c1" containerName="probe" Nov 28 10:17:36 crc kubenswrapper[4838]: I1128 10:17:36.846078 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="b699613e-fbde-4039-82bb-c83e188362c1" containerName="probe" Nov 28 10:17:36 crc kubenswrapper[4838]: E1128 10:17:36.846104 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3eeec0e8-8302-414a-882e-22921c0e0872" containerName="init" Nov 28 10:17:36 crc kubenswrapper[4838]: I1128 10:17:36.846113 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="3eeec0e8-8302-414a-882e-22921c0e0872" containerName="init" Nov 28 10:17:36 crc kubenswrapper[4838]: E1128 10:17:36.846120 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="57292635-e413-4511-830d-e536d3c8e398" containerName="barbican-api" Nov 28 10:17:36 crc kubenswrapper[4838]: I1128 10:17:36.846128 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="57292635-e413-4511-830d-e536d3c8e398" containerName="barbican-api" Nov 28 10:17:36 crc kubenswrapper[4838]: E1128 10:17:36.846160 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b699613e-fbde-4039-82bb-c83e188362c1" containerName="cinder-scheduler" Nov 28 10:17:36 crc kubenswrapper[4838]: I1128 10:17:36.846168 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="b699613e-fbde-4039-82bb-c83e188362c1" containerName="cinder-scheduler" Nov 28 10:17:36 crc kubenswrapper[4838]: E1128 10:17:36.846178 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="57292635-e413-4511-830d-e536d3c8e398" containerName="barbican-api-log" Nov 28 10:17:36 crc kubenswrapper[4838]: I1128 10:17:36.846186 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="57292635-e413-4511-830d-e536d3c8e398" containerName="barbican-api-log" Nov 28 10:17:36 crc kubenswrapper[4838]: E1128 10:17:36.846195 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3eeec0e8-8302-414a-882e-22921c0e0872" containerName="dnsmasq-dns" Nov 28 10:17:36 crc kubenswrapper[4838]: I1128 10:17:36.846204 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="3eeec0e8-8302-414a-882e-22921c0e0872" containerName="dnsmasq-dns" Nov 28 10:17:36 crc kubenswrapper[4838]: I1128 10:17:36.846391 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="3eeec0e8-8302-414a-882e-22921c0e0872" containerName="dnsmasq-dns" Nov 28 10:17:36 crc kubenswrapper[4838]: I1128 10:17:36.846415 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="b699613e-fbde-4039-82bb-c83e188362c1" containerName="cinder-scheduler" Nov 28 10:17:36 crc kubenswrapper[4838]: I1128 10:17:36.846437 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="b699613e-fbde-4039-82bb-c83e188362c1" containerName="probe" Nov 28 10:17:36 crc kubenswrapper[4838]: I1128 10:17:36.846477 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="57292635-e413-4511-830d-e536d3c8e398" containerName="barbican-api-log" Nov 28 10:17:36 crc kubenswrapper[4838]: I1128 10:17:36.846490 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="57292635-e413-4511-830d-e536d3c8e398" containerName="barbican-api" Nov 28 10:17:36 crc kubenswrapper[4838]: I1128 10:17:36.847461 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 28 10:17:36 crc kubenswrapper[4838]: I1128 10:17:36.849921 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Nov 28 10:17:36 crc kubenswrapper[4838]: I1128 10:17:36.858203 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 28 10:17:36 crc kubenswrapper[4838]: I1128 10:17:36.859284 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8jgc7\" (UniqueName: \"kubernetes.io/projected/289f7c5f-5d1c-44fa-9231-281ed2d83e7a-kube-api-access-8jgc7\") pod \"cinder-scheduler-0\" (UID: \"289f7c5f-5d1c-44fa-9231-281ed2d83e7a\") " pod="openstack/cinder-scheduler-0" Nov 28 10:17:36 crc kubenswrapper[4838]: I1128 10:17:36.865663 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/289f7c5f-5d1c-44fa-9231-281ed2d83e7a-scripts\") pod \"cinder-scheduler-0\" (UID: \"289f7c5f-5d1c-44fa-9231-281ed2d83e7a\") " pod="openstack/cinder-scheduler-0" Nov 28 10:17:36 crc kubenswrapper[4838]: I1128 10:17:36.865736 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/289f7c5f-5d1c-44fa-9231-281ed2d83e7a-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"289f7c5f-5d1c-44fa-9231-281ed2d83e7a\") " pod="openstack/cinder-scheduler-0" Nov 28 10:17:36 crc kubenswrapper[4838]: I1128 10:17:36.865783 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/289f7c5f-5d1c-44fa-9231-281ed2d83e7a-config-data\") pod \"cinder-scheduler-0\" (UID: \"289f7c5f-5d1c-44fa-9231-281ed2d83e7a\") " pod="openstack/cinder-scheduler-0" Nov 28 10:17:36 crc kubenswrapper[4838]: I1128 10:17:36.865874 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/289f7c5f-5d1c-44fa-9231-281ed2d83e7a-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"289f7c5f-5d1c-44fa-9231-281ed2d83e7a\") " pod="openstack/cinder-scheduler-0" Nov 28 10:17:36 crc kubenswrapper[4838]: I1128 10:17:36.865909 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/289f7c5f-5d1c-44fa-9231-281ed2d83e7a-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"289f7c5f-5d1c-44fa-9231-281ed2d83e7a\") " pod="openstack/cinder-scheduler-0" Nov 28 10:17:36 crc kubenswrapper[4838]: I1128 10:17:36.967567 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/289f7c5f-5d1c-44fa-9231-281ed2d83e7a-config-data\") pod \"cinder-scheduler-0\" (UID: \"289f7c5f-5d1c-44fa-9231-281ed2d83e7a\") " pod="openstack/cinder-scheduler-0" Nov 28 10:17:36 crc kubenswrapper[4838]: I1128 10:17:36.967661 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/289f7c5f-5d1c-44fa-9231-281ed2d83e7a-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"289f7c5f-5d1c-44fa-9231-281ed2d83e7a\") " pod="openstack/cinder-scheduler-0" Nov 28 10:17:36 crc kubenswrapper[4838]: I1128 10:17:36.967687 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/289f7c5f-5d1c-44fa-9231-281ed2d83e7a-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"289f7c5f-5d1c-44fa-9231-281ed2d83e7a\") " pod="openstack/cinder-scheduler-0" Nov 28 10:17:36 crc kubenswrapper[4838]: I1128 10:17:36.967773 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8jgc7\" (UniqueName: \"kubernetes.io/projected/289f7c5f-5d1c-44fa-9231-281ed2d83e7a-kube-api-access-8jgc7\") pod \"cinder-scheduler-0\" (UID: \"289f7c5f-5d1c-44fa-9231-281ed2d83e7a\") " pod="openstack/cinder-scheduler-0" Nov 28 10:17:36 crc kubenswrapper[4838]: I1128 10:17:36.967819 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/289f7c5f-5d1c-44fa-9231-281ed2d83e7a-scripts\") pod \"cinder-scheduler-0\" (UID: \"289f7c5f-5d1c-44fa-9231-281ed2d83e7a\") " pod="openstack/cinder-scheduler-0" Nov 28 10:17:36 crc kubenswrapper[4838]: I1128 10:17:36.967838 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/289f7c5f-5d1c-44fa-9231-281ed2d83e7a-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"289f7c5f-5d1c-44fa-9231-281ed2d83e7a\") " pod="openstack/cinder-scheduler-0" Nov 28 10:17:36 crc kubenswrapper[4838]: I1128 10:17:36.968209 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/289f7c5f-5d1c-44fa-9231-281ed2d83e7a-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"289f7c5f-5d1c-44fa-9231-281ed2d83e7a\") " pod="openstack/cinder-scheduler-0" Nov 28 10:17:36 crc kubenswrapper[4838]: I1128 10:17:36.975502 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/289f7c5f-5d1c-44fa-9231-281ed2d83e7a-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"289f7c5f-5d1c-44fa-9231-281ed2d83e7a\") " pod="openstack/cinder-scheduler-0" Nov 28 10:17:36 crc kubenswrapper[4838]: I1128 10:17:36.975613 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/289f7c5f-5d1c-44fa-9231-281ed2d83e7a-config-data\") pod \"cinder-scheduler-0\" (UID: \"289f7c5f-5d1c-44fa-9231-281ed2d83e7a\") " pod="openstack/cinder-scheduler-0" Nov 28 10:17:36 crc kubenswrapper[4838]: I1128 10:17:36.975905 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/289f7c5f-5d1c-44fa-9231-281ed2d83e7a-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"289f7c5f-5d1c-44fa-9231-281ed2d83e7a\") " pod="openstack/cinder-scheduler-0" Nov 28 10:17:36 crc kubenswrapper[4838]: I1128 10:17:36.975967 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/289f7c5f-5d1c-44fa-9231-281ed2d83e7a-scripts\") pod \"cinder-scheduler-0\" (UID: \"289f7c5f-5d1c-44fa-9231-281ed2d83e7a\") " pod="openstack/cinder-scheduler-0" Nov 28 10:17:36 crc kubenswrapper[4838]: I1128 10:17:36.984009 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8jgc7\" (UniqueName: \"kubernetes.io/projected/289f7c5f-5d1c-44fa-9231-281ed2d83e7a-kube-api-access-8jgc7\") pod \"cinder-scheduler-0\" (UID: \"289f7c5f-5d1c-44fa-9231-281ed2d83e7a\") " pod="openstack/cinder-scheduler-0" Nov 28 10:17:37 crc kubenswrapper[4838]: I1128 10:17:37.164431 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 28 10:17:37 crc kubenswrapper[4838]: I1128 10:17:37.604624 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstackclient"] Nov 28 10:17:37 crc kubenswrapper[4838]: I1128 10:17:37.606159 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 28 10:17:37 crc kubenswrapper[4838]: I1128 10:17:37.610661 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-config-secret" Nov 28 10:17:37 crc kubenswrapper[4838]: I1128 10:17:37.610821 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config" Nov 28 10:17:37 crc kubenswrapper[4838]: I1128 10:17:37.611365 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstackclient-openstackclient-dockercfg-rsm74" Nov 28 10:17:37 crc kubenswrapper[4838]: I1128 10:17:37.632550 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Nov 28 10:17:37 crc kubenswrapper[4838]: I1128 10:17:37.680197 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/0e43f0a1-9712-425c-8ac8-907e14481e05-openstack-config-secret\") pod \"openstackclient\" (UID: \"0e43f0a1-9712-425c-8ac8-907e14481e05\") " pod="openstack/openstackclient" Nov 28 10:17:37 crc kubenswrapper[4838]: I1128 10:17:37.680679 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fwt5k\" (UniqueName: \"kubernetes.io/projected/0e43f0a1-9712-425c-8ac8-907e14481e05-kube-api-access-fwt5k\") pod \"openstackclient\" (UID: \"0e43f0a1-9712-425c-8ac8-907e14481e05\") " pod="openstack/openstackclient" Nov 28 10:17:37 crc kubenswrapper[4838]: I1128 10:17:37.680780 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0e43f0a1-9712-425c-8ac8-907e14481e05-combined-ca-bundle\") pod \"openstackclient\" (UID: \"0e43f0a1-9712-425c-8ac8-907e14481e05\") " pod="openstack/openstackclient" Nov 28 10:17:37 crc kubenswrapper[4838]: I1128 10:17:37.680859 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/0e43f0a1-9712-425c-8ac8-907e14481e05-openstack-config\") pod \"openstackclient\" (UID: \"0e43f0a1-9712-425c-8ac8-907e14481e05\") " pod="openstack/openstackclient" Nov 28 10:17:37 crc kubenswrapper[4838]: I1128 10:17:37.689187 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 28 10:17:37 crc kubenswrapper[4838]: I1128 10:17:37.737674 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-69bc8cb85-2qbr6" Nov 28 10:17:37 crc kubenswrapper[4838]: I1128 10:17:37.785462 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/0e43f0a1-9712-425c-8ac8-907e14481e05-openstack-config-secret\") pod \"openstackclient\" (UID: \"0e43f0a1-9712-425c-8ac8-907e14481e05\") " pod="openstack/openstackclient" Nov 28 10:17:37 crc kubenswrapper[4838]: I1128 10:17:37.785561 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fwt5k\" (UniqueName: \"kubernetes.io/projected/0e43f0a1-9712-425c-8ac8-907e14481e05-kube-api-access-fwt5k\") pod \"openstackclient\" (UID: \"0e43f0a1-9712-425c-8ac8-907e14481e05\") " pod="openstack/openstackclient" Nov 28 10:17:37 crc kubenswrapper[4838]: I1128 10:17:37.785732 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0e43f0a1-9712-425c-8ac8-907e14481e05-combined-ca-bundle\") pod \"openstackclient\" (UID: \"0e43f0a1-9712-425c-8ac8-907e14481e05\") " pod="openstack/openstackclient" Nov 28 10:17:37 crc kubenswrapper[4838]: I1128 10:17:37.785838 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/0e43f0a1-9712-425c-8ac8-907e14481e05-openstack-config\") pod \"openstackclient\" (UID: \"0e43f0a1-9712-425c-8ac8-907e14481e05\") " pod="openstack/openstackclient" Nov 28 10:17:37 crc kubenswrapper[4838]: I1128 10:17:37.792831 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/0e43f0a1-9712-425c-8ac8-907e14481e05-openstack-config\") pod \"openstackclient\" (UID: \"0e43f0a1-9712-425c-8ac8-907e14481e05\") " pod="openstack/openstackclient" Nov 28 10:17:37 crc kubenswrapper[4838]: I1128 10:17:37.793448 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/0e43f0a1-9712-425c-8ac8-907e14481e05-openstack-config-secret\") pod \"openstackclient\" (UID: \"0e43f0a1-9712-425c-8ac8-907e14481e05\") " pod="openstack/openstackclient" Nov 28 10:17:37 crc kubenswrapper[4838]: I1128 10:17:37.795184 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0e43f0a1-9712-425c-8ac8-907e14481e05-combined-ca-bundle\") pod \"openstackclient\" (UID: \"0e43f0a1-9712-425c-8ac8-907e14481e05\") " pod="openstack/openstackclient" Nov 28 10:17:37 crc kubenswrapper[4838]: I1128 10:17:37.816166 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-7ff57884d4-gkgfr"] Nov 28 10:17:37 crc kubenswrapper[4838]: I1128 10:17:37.816421 4838 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-7ff57884d4-gkgfr" podUID="9f301ff0-c619-4fb2-a1e6-a7254b39e13f" containerName="neutron-api" containerID="cri-o://749dc59908ae7ebd5eaf5f716fe0187be34fc2e861977219fa3340212d7b49c3" gracePeriod=30 Nov 28 10:17:37 crc kubenswrapper[4838]: I1128 10:17:37.816774 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fwt5k\" (UniqueName: \"kubernetes.io/projected/0e43f0a1-9712-425c-8ac8-907e14481e05-kube-api-access-fwt5k\") pod \"openstackclient\" (UID: \"0e43f0a1-9712-425c-8ac8-907e14481e05\") " pod="openstack/openstackclient" Nov 28 10:17:37 crc kubenswrapper[4838]: I1128 10:17:37.816772 4838 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-7ff57884d4-gkgfr" podUID="9f301ff0-c619-4fb2-a1e6-a7254b39e13f" containerName="neutron-httpd" containerID="cri-o://eca3a5e3d95690fe050322e7d54999af48228e9f74303f5d000b82c3f7809f7f" gracePeriod=30 Nov 28 10:17:37 crc kubenswrapper[4838]: I1128 10:17:37.954141 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 28 10:17:38 crc kubenswrapper[4838]: I1128 10:17:38.007765 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/openstackclient"] Nov 28 10:17:38 crc kubenswrapper[4838]: I1128 10:17:38.032161 4838 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/openstackclient"] Nov 28 10:17:38 crc kubenswrapper[4838]: I1128 10:17:38.067517 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstackclient"] Nov 28 10:17:38 crc kubenswrapper[4838]: I1128 10:17:38.069556 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 28 10:17:38 crc kubenswrapper[4838]: I1128 10:17:38.094024 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/b87ad3f7-9446-43b7-9141-2279794386a0-openstack-config\") pod \"openstackclient\" (UID: \"b87ad3f7-9446-43b7-9141-2279794386a0\") " pod="openstack/openstackclient" Nov 28 10:17:38 crc kubenswrapper[4838]: I1128 10:17:38.094071 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/b87ad3f7-9446-43b7-9141-2279794386a0-openstack-config-secret\") pod \"openstackclient\" (UID: \"b87ad3f7-9446-43b7-9141-2279794386a0\") " pod="openstack/openstackclient" Nov 28 10:17:38 crc kubenswrapper[4838]: I1128 10:17:38.094162 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j87q8\" (UniqueName: \"kubernetes.io/projected/b87ad3f7-9446-43b7-9141-2279794386a0-kube-api-access-j87q8\") pod \"openstackclient\" (UID: \"b87ad3f7-9446-43b7-9141-2279794386a0\") " pod="openstack/openstackclient" Nov 28 10:17:38 crc kubenswrapper[4838]: I1128 10:17:38.094206 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b87ad3f7-9446-43b7-9141-2279794386a0-combined-ca-bundle\") pod \"openstackclient\" (UID: \"b87ad3f7-9446-43b7-9141-2279794386a0\") " pod="openstack/openstackclient" Nov 28 10:17:38 crc kubenswrapper[4838]: I1128 10:17:38.123147 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Nov 28 10:17:38 crc kubenswrapper[4838]: I1128 10:17:38.195484 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j87q8\" (UniqueName: \"kubernetes.io/projected/b87ad3f7-9446-43b7-9141-2279794386a0-kube-api-access-j87q8\") pod \"openstackclient\" (UID: \"b87ad3f7-9446-43b7-9141-2279794386a0\") " pod="openstack/openstackclient" Nov 28 10:17:38 crc kubenswrapper[4838]: I1128 10:17:38.196093 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b87ad3f7-9446-43b7-9141-2279794386a0-combined-ca-bundle\") pod \"openstackclient\" (UID: \"b87ad3f7-9446-43b7-9141-2279794386a0\") " pod="openstack/openstackclient" Nov 28 10:17:38 crc kubenswrapper[4838]: I1128 10:17:38.196173 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/b87ad3f7-9446-43b7-9141-2279794386a0-openstack-config\") pod \"openstackclient\" (UID: \"b87ad3f7-9446-43b7-9141-2279794386a0\") " pod="openstack/openstackclient" Nov 28 10:17:38 crc kubenswrapper[4838]: I1128 10:17:38.196201 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/b87ad3f7-9446-43b7-9141-2279794386a0-openstack-config-secret\") pod \"openstackclient\" (UID: \"b87ad3f7-9446-43b7-9141-2279794386a0\") " pod="openstack/openstackclient" Nov 28 10:17:38 crc kubenswrapper[4838]: E1128 10:17:38.197470 4838 log.go:32] "RunPodSandbox from runtime service failed" err=< Nov 28 10:17:38 crc kubenswrapper[4838]: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_openstackclient_openstack_0e43f0a1-9712-425c-8ac8-907e14481e05_0(f620099fe2288d41a163fb7977829c7aac038d9f245f538ae448679fad56e9a7): error adding pod openstack_openstackclient to CNI network "multus-cni-network": plugin type="multus-shim" name="multus-cni-network" failed (add): CmdAdd (shim): CNI request failed with status 400: 'ContainerID:"f620099fe2288d41a163fb7977829c7aac038d9f245f538ae448679fad56e9a7" Netns:"/var/run/netns/9c1cddd7-720e-4f0c-a24c-597244add9d9" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openstack;K8S_POD_NAME=openstackclient;K8S_POD_INFRA_CONTAINER_ID=f620099fe2288d41a163fb7977829c7aac038d9f245f538ae448679fad56e9a7;K8S_POD_UID=0e43f0a1-9712-425c-8ac8-907e14481e05" Path:"" ERRORED: error configuring pod [openstack/openstackclient] networking: Multus: [openstack/openstackclient/0e43f0a1-9712-425c-8ac8-907e14481e05]: expected pod UID "0e43f0a1-9712-425c-8ac8-907e14481e05" but got "b87ad3f7-9446-43b7-9141-2279794386a0" from Kube API Nov 28 10:17:38 crc kubenswrapper[4838]: ': StdinData: {"binDir":"/var/lib/cni/bin","clusterNetwork":"/host/run/multus/cni/net.d/10-ovn-kubernetes.conf","cniVersion":"0.3.1","daemonSocketDir":"/run/multus/socket","globalNamespaces":"default,openshift-multus,openshift-sriov-network-operator,openshift-cnv","logLevel":"verbose","logToStderr":true,"name":"multus-cni-network","namespaceIsolation":true,"type":"multus-shim"} Nov 28 10:17:38 crc kubenswrapper[4838]: > Nov 28 10:17:38 crc kubenswrapper[4838]: E1128 10:17:38.197674 4838 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err=< Nov 28 10:17:38 crc kubenswrapper[4838]: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_openstackclient_openstack_0e43f0a1-9712-425c-8ac8-907e14481e05_0(f620099fe2288d41a163fb7977829c7aac038d9f245f538ae448679fad56e9a7): error adding pod openstack_openstackclient to CNI network "multus-cni-network": plugin type="multus-shim" name="multus-cni-network" failed (add): CmdAdd (shim): CNI request failed with status 400: 'ContainerID:"f620099fe2288d41a163fb7977829c7aac038d9f245f538ae448679fad56e9a7" Netns:"/var/run/netns/9c1cddd7-720e-4f0c-a24c-597244add9d9" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openstack;K8S_POD_NAME=openstackclient;K8S_POD_INFRA_CONTAINER_ID=f620099fe2288d41a163fb7977829c7aac038d9f245f538ae448679fad56e9a7;K8S_POD_UID=0e43f0a1-9712-425c-8ac8-907e14481e05" Path:"" ERRORED: error configuring pod [openstack/openstackclient] networking: Multus: [openstack/openstackclient/0e43f0a1-9712-425c-8ac8-907e14481e05]: expected pod UID "0e43f0a1-9712-425c-8ac8-907e14481e05" but got "b87ad3f7-9446-43b7-9141-2279794386a0" from Kube API Nov 28 10:17:38 crc kubenswrapper[4838]: ': StdinData: {"binDir":"/var/lib/cni/bin","clusterNetwork":"/host/run/multus/cni/net.d/10-ovn-kubernetes.conf","cniVersion":"0.3.1","daemonSocketDir":"/run/multus/socket","globalNamespaces":"default,openshift-multus,openshift-sriov-network-operator,openshift-cnv","logLevel":"verbose","logToStderr":true,"name":"multus-cni-network","namespaceIsolation":true,"type":"multus-shim"} Nov 28 10:17:38 crc kubenswrapper[4838]: > pod="openstack/openstackclient" Nov 28 10:17:38 crc kubenswrapper[4838]: I1128 10:17:38.198772 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/b87ad3f7-9446-43b7-9141-2279794386a0-openstack-config\") pod \"openstackclient\" (UID: \"b87ad3f7-9446-43b7-9141-2279794386a0\") " pod="openstack/openstackclient" Nov 28 10:17:38 crc kubenswrapper[4838]: I1128 10:17:38.200820 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b87ad3f7-9446-43b7-9141-2279794386a0-combined-ca-bundle\") pod \"openstackclient\" (UID: \"b87ad3f7-9446-43b7-9141-2279794386a0\") " pod="openstack/openstackclient" Nov 28 10:17:38 crc kubenswrapper[4838]: I1128 10:17:38.201036 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/b87ad3f7-9446-43b7-9141-2279794386a0-openstack-config-secret\") pod \"openstackclient\" (UID: \"b87ad3f7-9446-43b7-9141-2279794386a0\") " pod="openstack/openstackclient" Nov 28 10:17:38 crc kubenswrapper[4838]: I1128 10:17:38.215065 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j87q8\" (UniqueName: \"kubernetes.io/projected/b87ad3f7-9446-43b7-9141-2279794386a0-kube-api-access-j87q8\") pod \"openstackclient\" (UID: \"b87ad3f7-9446-43b7-9141-2279794386a0\") " pod="openstack/openstackclient" Nov 28 10:17:38 crc kubenswrapper[4838]: I1128 10:17:38.224097 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 28 10:17:38 crc kubenswrapper[4838]: I1128 10:17:38.495163 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"289f7c5f-5d1c-44fa-9231-281ed2d83e7a","Type":"ContainerStarted","Data":"b0d1d72a6f22f1f36cf453749c7b2f111835205ac98c0df588af4ced651e6ad7"} Nov 28 10:17:38 crc kubenswrapper[4838]: I1128 10:17:38.527947 4838 generic.go:334] "Generic (PLEG): container finished" podID="9f301ff0-c619-4fb2-a1e6-a7254b39e13f" containerID="eca3a5e3d95690fe050322e7d54999af48228e9f74303f5d000b82c3f7809f7f" exitCode=0 Nov 28 10:17:38 crc kubenswrapper[4838]: I1128 10:17:38.528019 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 28 10:17:38 crc kubenswrapper[4838]: I1128 10:17:38.528582 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-7ff57884d4-gkgfr" event={"ID":"9f301ff0-c619-4fb2-a1e6-a7254b39e13f","Type":"ContainerDied","Data":"eca3a5e3d95690fe050322e7d54999af48228e9f74303f5d000b82c3f7809f7f"} Nov 28 10:17:38 crc kubenswrapper[4838]: I1128 10:17:38.533062 4838 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openstack/openstackclient" oldPodUID="0e43f0a1-9712-425c-8ac8-907e14481e05" podUID="b87ad3f7-9446-43b7-9141-2279794386a0" Nov 28 10:17:38 crc kubenswrapper[4838]: I1128 10:17:38.555354 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 28 10:17:38 crc kubenswrapper[4838]: I1128 10:17:38.574428 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b699613e-fbde-4039-82bb-c83e188362c1" path="/var/lib/kubelet/pods/b699613e-fbde-4039-82bb-c83e188362c1/volumes" Nov 28 10:17:38 crc kubenswrapper[4838]: I1128 10:17:38.605425 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/0e43f0a1-9712-425c-8ac8-907e14481e05-openstack-config-secret\") pod \"0e43f0a1-9712-425c-8ac8-907e14481e05\" (UID: \"0e43f0a1-9712-425c-8ac8-907e14481e05\") " Nov 28 10:17:38 crc kubenswrapper[4838]: I1128 10:17:38.605564 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fwt5k\" (UniqueName: \"kubernetes.io/projected/0e43f0a1-9712-425c-8ac8-907e14481e05-kube-api-access-fwt5k\") pod \"0e43f0a1-9712-425c-8ac8-907e14481e05\" (UID: \"0e43f0a1-9712-425c-8ac8-907e14481e05\") " Nov 28 10:17:38 crc kubenswrapper[4838]: I1128 10:17:38.605733 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0e43f0a1-9712-425c-8ac8-907e14481e05-combined-ca-bundle\") pod \"0e43f0a1-9712-425c-8ac8-907e14481e05\" (UID: \"0e43f0a1-9712-425c-8ac8-907e14481e05\") " Nov 28 10:17:38 crc kubenswrapper[4838]: I1128 10:17:38.605754 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/0e43f0a1-9712-425c-8ac8-907e14481e05-openstack-config\") pod \"0e43f0a1-9712-425c-8ac8-907e14481e05\" (UID: \"0e43f0a1-9712-425c-8ac8-907e14481e05\") " Nov 28 10:17:38 crc kubenswrapper[4838]: I1128 10:17:38.606490 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0e43f0a1-9712-425c-8ac8-907e14481e05-openstack-config" (OuterVolumeSpecName: "openstack-config") pod "0e43f0a1-9712-425c-8ac8-907e14481e05" (UID: "0e43f0a1-9712-425c-8ac8-907e14481e05"). InnerVolumeSpecName "openstack-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 10:17:38 crc kubenswrapper[4838]: I1128 10:17:38.612414 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0e43f0a1-9712-425c-8ac8-907e14481e05-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0e43f0a1-9712-425c-8ac8-907e14481e05" (UID: "0e43f0a1-9712-425c-8ac8-907e14481e05"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:17:38 crc kubenswrapper[4838]: I1128 10:17:38.613886 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0e43f0a1-9712-425c-8ac8-907e14481e05-kube-api-access-fwt5k" (OuterVolumeSpecName: "kube-api-access-fwt5k") pod "0e43f0a1-9712-425c-8ac8-907e14481e05" (UID: "0e43f0a1-9712-425c-8ac8-907e14481e05"). InnerVolumeSpecName "kube-api-access-fwt5k". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:17:38 crc kubenswrapper[4838]: I1128 10:17:38.619813 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0e43f0a1-9712-425c-8ac8-907e14481e05-openstack-config-secret" (OuterVolumeSpecName: "openstack-config-secret") pod "0e43f0a1-9712-425c-8ac8-907e14481e05" (UID: "0e43f0a1-9712-425c-8ac8-907e14481e05"). InnerVolumeSpecName "openstack-config-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:17:38 crc kubenswrapper[4838]: I1128 10:17:38.704515 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Nov 28 10:17:38 crc kubenswrapper[4838]: I1128 10:17:38.707409 4838 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0e43f0a1-9712-425c-8ac8-907e14481e05-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 10:17:38 crc kubenswrapper[4838]: I1128 10:17:38.707432 4838 reconciler_common.go:293] "Volume detached for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/0e43f0a1-9712-425c-8ac8-907e14481e05-openstack-config\") on node \"crc\" DevicePath \"\"" Nov 28 10:17:38 crc kubenswrapper[4838]: I1128 10:17:38.707444 4838 reconciler_common.go:293] "Volume detached for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/0e43f0a1-9712-425c-8ac8-907e14481e05-openstack-config-secret\") on node \"crc\" DevicePath \"\"" Nov 28 10:17:38 crc kubenswrapper[4838]: I1128 10:17:38.707456 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fwt5k\" (UniqueName: \"kubernetes.io/projected/0e43f0a1-9712-425c-8ac8-907e14481e05-kube-api-access-fwt5k\") on node \"crc\" DevicePath \"\"" Nov 28 10:17:39 crc kubenswrapper[4838]: I1128 10:17:39.545708 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"b87ad3f7-9446-43b7-9141-2279794386a0","Type":"ContainerStarted","Data":"8691bc64cd2ad200816d71975c5c75d6232dc7b3a72a961c19d511c469f1aba5"} Nov 28 10:17:39 crc kubenswrapper[4838]: I1128 10:17:39.549082 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 28 10:17:39 crc kubenswrapper[4838]: I1128 10:17:39.549194 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"289f7c5f-5d1c-44fa-9231-281ed2d83e7a","Type":"ContainerStarted","Data":"fc8ef09ff9195867eb7ae51ec10f9898c250a78c6324c7a31a8cfb75c63203cc"} Nov 28 10:17:39 crc kubenswrapper[4838]: I1128 10:17:39.549243 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"289f7c5f-5d1c-44fa-9231-281ed2d83e7a","Type":"ContainerStarted","Data":"a75db6da2497c9cf22ed85d776b818f7bd652edf9d38f1efb2127d6547b85720"} Nov 28 10:17:39 crc kubenswrapper[4838]: I1128 10:17:39.568594 4838 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openstack/openstackclient" oldPodUID="0e43f0a1-9712-425c-8ac8-907e14481e05" podUID="b87ad3f7-9446-43b7-9141-2279794386a0" Nov 28 10:17:39 crc kubenswrapper[4838]: I1128 10:17:39.573834 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=3.573822732 podStartE2EDuration="3.573822732s" podCreationTimestamp="2025-11-28 10:17:36 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 10:17:39.565948379 +0000 UTC m=+1231.264922549" watchObservedRunningTime="2025-11-28 10:17:39.573822732 +0000 UTC m=+1231.272796902" Nov 28 10:17:40 crc kubenswrapper[4838]: I1128 10:17:40.570999 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0e43f0a1-9712-425c-8ac8-907e14481e05" path="/var/lib/kubelet/pods/0e43f0a1-9712-425c-8ac8-907e14481e05/volumes" Nov 28 10:17:42 crc kubenswrapper[4838]: I1128 10:17:42.165399 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Nov 28 10:17:43 crc kubenswrapper[4838]: I1128 10:17:43.586766 4838 generic.go:334] "Generic (PLEG): container finished" podID="9f301ff0-c619-4fb2-a1e6-a7254b39e13f" containerID="749dc59908ae7ebd5eaf5f716fe0187be34fc2e861977219fa3340212d7b49c3" exitCode=0 Nov 28 10:17:43 crc kubenswrapper[4838]: I1128 10:17:43.586814 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-7ff57884d4-gkgfr" event={"ID":"9f301ff0-c619-4fb2-a1e6-a7254b39e13f","Type":"ContainerDied","Data":"749dc59908ae7ebd5eaf5f716fe0187be34fc2e861977219fa3340212d7b49c3"} Nov 28 10:17:44 crc kubenswrapper[4838]: I1128 10:17:44.233608 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-db-create-5rgrd"] Nov 28 10:17:44 crc kubenswrapper[4838]: I1128 10:17:44.234624 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-5rgrd" Nov 28 10:17:44 crc kubenswrapper[4838]: I1128 10:17:44.249840 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-5rgrd"] Nov 28 10:17:44 crc kubenswrapper[4838]: I1128 10:17:44.348698 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-db-create-r7rf2"] Nov 28 10:17:44 crc kubenswrapper[4838]: I1128 10:17:44.350364 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-r7rf2" Nov 28 10:17:44 crc kubenswrapper[4838]: I1128 10:17:44.366234 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-r7rf2"] Nov 28 10:17:44 crc kubenswrapper[4838]: I1128 10:17:44.381015 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-cfc5-account-create-update-cfjq4"] Nov 28 10:17:44 crc kubenswrapper[4838]: I1128 10:17:44.382105 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-cfc5-account-create-update-cfjq4" Nov 28 10:17:44 crc kubenswrapper[4838]: I1128 10:17:44.386302 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-db-secret" Nov 28 10:17:44 crc kubenswrapper[4838]: I1128 10:17:44.407100 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fmxxt\" (UniqueName: \"kubernetes.io/projected/0a796e35-158d-43c9-a806-92dda81c78f2-kube-api-access-fmxxt\") pod \"nova-api-db-create-5rgrd\" (UID: \"0a796e35-158d-43c9-a806-92dda81c78f2\") " pod="openstack/nova-api-db-create-5rgrd" Nov 28 10:17:44 crc kubenswrapper[4838]: I1128 10:17:44.407155 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0a796e35-158d-43c9-a806-92dda81c78f2-operator-scripts\") pod \"nova-api-db-create-5rgrd\" (UID: \"0a796e35-158d-43c9-a806-92dda81c78f2\") " pod="openstack/nova-api-db-create-5rgrd" Nov 28 10:17:44 crc kubenswrapper[4838]: I1128 10:17:44.423875 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-cfc5-account-create-update-cfjq4"] Nov 28 10:17:44 crc kubenswrapper[4838]: I1128 10:17:44.509222 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fmxxt\" (UniqueName: \"kubernetes.io/projected/0a796e35-158d-43c9-a806-92dda81c78f2-kube-api-access-fmxxt\") pod \"nova-api-db-create-5rgrd\" (UID: \"0a796e35-158d-43c9-a806-92dda81c78f2\") " pod="openstack/nova-api-db-create-5rgrd" Nov 28 10:17:44 crc kubenswrapper[4838]: I1128 10:17:44.509286 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0a796e35-158d-43c9-a806-92dda81c78f2-operator-scripts\") pod \"nova-api-db-create-5rgrd\" (UID: \"0a796e35-158d-43c9-a806-92dda81c78f2\") " pod="openstack/nova-api-db-create-5rgrd" Nov 28 10:17:44 crc kubenswrapper[4838]: I1128 10:17:44.509326 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z562h\" (UniqueName: \"kubernetes.io/projected/2c180cf9-7d38-4e43-9723-1fa20242ff56-kube-api-access-z562h\") pod \"nova-cell0-db-create-r7rf2\" (UID: \"2c180cf9-7d38-4e43-9723-1fa20242ff56\") " pod="openstack/nova-cell0-db-create-r7rf2" Nov 28 10:17:44 crc kubenswrapper[4838]: I1128 10:17:44.509368 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2c180cf9-7d38-4e43-9723-1fa20242ff56-operator-scripts\") pod \"nova-cell0-db-create-r7rf2\" (UID: \"2c180cf9-7d38-4e43-9723-1fa20242ff56\") " pod="openstack/nova-cell0-db-create-r7rf2" Nov 28 10:17:44 crc kubenswrapper[4838]: I1128 10:17:44.509386 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cpftl\" (UniqueName: \"kubernetes.io/projected/6f84685f-a9f2-45b4-af58-fb0218060369-kube-api-access-cpftl\") pod \"nova-api-cfc5-account-create-update-cfjq4\" (UID: \"6f84685f-a9f2-45b4-af58-fb0218060369\") " pod="openstack/nova-api-cfc5-account-create-update-cfjq4" Nov 28 10:17:44 crc kubenswrapper[4838]: I1128 10:17:44.509405 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6f84685f-a9f2-45b4-af58-fb0218060369-operator-scripts\") pod \"nova-api-cfc5-account-create-update-cfjq4\" (UID: \"6f84685f-a9f2-45b4-af58-fb0218060369\") " pod="openstack/nova-api-cfc5-account-create-update-cfjq4" Nov 28 10:17:44 crc kubenswrapper[4838]: I1128 10:17:44.512871 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0a796e35-158d-43c9-a806-92dda81c78f2-operator-scripts\") pod \"nova-api-db-create-5rgrd\" (UID: \"0a796e35-158d-43c9-a806-92dda81c78f2\") " pod="openstack/nova-api-db-create-5rgrd" Nov 28 10:17:44 crc kubenswrapper[4838]: I1128 10:17:44.542689 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-db-create-h4wpj"] Nov 28 10:17:44 crc kubenswrapper[4838]: I1128 10:17:44.544771 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-h4wpj" Nov 28 10:17:44 crc kubenswrapper[4838]: I1128 10:17:44.546376 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fmxxt\" (UniqueName: \"kubernetes.io/projected/0a796e35-158d-43c9-a806-92dda81c78f2-kube-api-access-fmxxt\") pod \"nova-api-db-create-5rgrd\" (UID: \"0a796e35-158d-43c9-a806-92dda81c78f2\") " pod="openstack/nova-api-db-create-5rgrd" Nov 28 10:17:44 crc kubenswrapper[4838]: I1128 10:17:44.557255 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-5rgrd" Nov 28 10:17:44 crc kubenswrapper[4838]: I1128 10:17:44.560290 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-h4wpj"] Nov 28 10:17:44 crc kubenswrapper[4838]: I1128 10:17:44.574505 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-1abe-account-create-update-74trh"] Nov 28 10:17:44 crc kubenswrapper[4838]: I1128 10:17:44.575521 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-1abe-account-create-update-74trh" Nov 28 10:17:44 crc kubenswrapper[4838]: I1128 10:17:44.579145 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-db-secret" Nov 28 10:17:44 crc kubenswrapper[4838]: I1128 10:17:44.581642 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-1abe-account-create-update-74trh"] Nov 28 10:17:44 crc kubenswrapper[4838]: I1128 10:17:44.615310 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z562h\" (UniqueName: \"kubernetes.io/projected/2c180cf9-7d38-4e43-9723-1fa20242ff56-kube-api-access-z562h\") pod \"nova-cell0-db-create-r7rf2\" (UID: \"2c180cf9-7d38-4e43-9723-1fa20242ff56\") " pod="openstack/nova-cell0-db-create-r7rf2" Nov 28 10:17:44 crc kubenswrapper[4838]: I1128 10:17:44.615430 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2c180cf9-7d38-4e43-9723-1fa20242ff56-operator-scripts\") pod \"nova-cell0-db-create-r7rf2\" (UID: \"2c180cf9-7d38-4e43-9723-1fa20242ff56\") " pod="openstack/nova-cell0-db-create-r7rf2" Nov 28 10:17:44 crc kubenswrapper[4838]: I1128 10:17:44.615451 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cpftl\" (UniqueName: \"kubernetes.io/projected/6f84685f-a9f2-45b4-af58-fb0218060369-kube-api-access-cpftl\") pod \"nova-api-cfc5-account-create-update-cfjq4\" (UID: \"6f84685f-a9f2-45b4-af58-fb0218060369\") " pod="openstack/nova-api-cfc5-account-create-update-cfjq4" Nov 28 10:17:44 crc kubenswrapper[4838]: I1128 10:17:44.615479 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6f84685f-a9f2-45b4-af58-fb0218060369-operator-scripts\") pod \"nova-api-cfc5-account-create-update-cfjq4\" (UID: \"6f84685f-a9f2-45b4-af58-fb0218060369\") " pod="openstack/nova-api-cfc5-account-create-update-cfjq4" Nov 28 10:17:44 crc kubenswrapper[4838]: I1128 10:17:44.616282 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2c180cf9-7d38-4e43-9723-1fa20242ff56-operator-scripts\") pod \"nova-cell0-db-create-r7rf2\" (UID: \"2c180cf9-7d38-4e43-9723-1fa20242ff56\") " pod="openstack/nova-cell0-db-create-r7rf2" Nov 28 10:17:44 crc kubenswrapper[4838]: I1128 10:17:44.616403 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6f84685f-a9f2-45b4-af58-fb0218060369-operator-scripts\") pod \"nova-api-cfc5-account-create-update-cfjq4\" (UID: \"6f84685f-a9f2-45b4-af58-fb0218060369\") " pod="openstack/nova-api-cfc5-account-create-update-cfjq4" Nov 28 10:17:44 crc kubenswrapper[4838]: I1128 10:17:44.640500 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z562h\" (UniqueName: \"kubernetes.io/projected/2c180cf9-7d38-4e43-9723-1fa20242ff56-kube-api-access-z562h\") pod \"nova-cell0-db-create-r7rf2\" (UID: \"2c180cf9-7d38-4e43-9723-1fa20242ff56\") " pod="openstack/nova-cell0-db-create-r7rf2" Nov 28 10:17:44 crc kubenswrapper[4838]: I1128 10:17:44.660339 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cpftl\" (UniqueName: \"kubernetes.io/projected/6f84685f-a9f2-45b4-af58-fb0218060369-kube-api-access-cpftl\") pod \"nova-api-cfc5-account-create-update-cfjq4\" (UID: \"6f84685f-a9f2-45b4-af58-fb0218060369\") " pod="openstack/nova-api-cfc5-account-create-update-cfjq4" Nov 28 10:17:44 crc kubenswrapper[4838]: I1128 10:17:44.671140 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-r7rf2" Nov 28 10:17:44 crc kubenswrapper[4838]: I1128 10:17:44.710574 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-cfc5-account-create-update-cfjq4" Nov 28 10:17:44 crc kubenswrapper[4838]: I1128 10:17:44.718437 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2d048538-8370-4422-89b8-1f4733ae72b1-operator-scripts\") pod \"nova-cell0-1abe-account-create-update-74trh\" (UID: \"2d048538-8370-4422-89b8-1f4733ae72b1\") " pod="openstack/nova-cell0-1abe-account-create-update-74trh" Nov 28 10:17:44 crc kubenswrapper[4838]: I1128 10:17:44.718492 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bqw5x\" (UniqueName: \"kubernetes.io/projected/2d048538-8370-4422-89b8-1f4733ae72b1-kube-api-access-bqw5x\") pod \"nova-cell0-1abe-account-create-update-74trh\" (UID: \"2d048538-8370-4422-89b8-1f4733ae72b1\") " pod="openstack/nova-cell0-1abe-account-create-update-74trh" Nov 28 10:17:44 crc kubenswrapper[4838]: I1128 10:17:44.718537 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-szgmx\" (UniqueName: \"kubernetes.io/projected/ed9862ca-36d5-4ece-9aa7-4ed71b713e15-kube-api-access-szgmx\") pod \"nova-cell1-db-create-h4wpj\" (UID: \"ed9862ca-36d5-4ece-9aa7-4ed71b713e15\") " pod="openstack/nova-cell1-db-create-h4wpj" Nov 28 10:17:44 crc kubenswrapper[4838]: I1128 10:17:44.718588 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ed9862ca-36d5-4ece-9aa7-4ed71b713e15-operator-scripts\") pod \"nova-cell1-db-create-h4wpj\" (UID: \"ed9862ca-36d5-4ece-9aa7-4ed71b713e15\") " pod="openstack/nova-cell1-db-create-h4wpj" Nov 28 10:17:44 crc kubenswrapper[4838]: I1128 10:17:44.758810 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-d90b-account-create-update-lklt7"] Nov 28 10:17:44 crc kubenswrapper[4838]: I1128 10:17:44.760343 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-d90b-account-create-update-lklt7" Nov 28 10:17:44 crc kubenswrapper[4838]: I1128 10:17:44.766385 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-db-secret" Nov 28 10:17:44 crc kubenswrapper[4838]: I1128 10:17:44.800769 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-d90b-account-create-update-lklt7"] Nov 28 10:17:44 crc kubenswrapper[4838]: I1128 10:17:44.821562 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ed9862ca-36d5-4ece-9aa7-4ed71b713e15-operator-scripts\") pod \"nova-cell1-db-create-h4wpj\" (UID: \"ed9862ca-36d5-4ece-9aa7-4ed71b713e15\") " pod="openstack/nova-cell1-db-create-h4wpj" Nov 28 10:17:44 crc kubenswrapper[4838]: I1128 10:17:44.821687 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2d048538-8370-4422-89b8-1f4733ae72b1-operator-scripts\") pod \"nova-cell0-1abe-account-create-update-74trh\" (UID: \"2d048538-8370-4422-89b8-1f4733ae72b1\") " pod="openstack/nova-cell0-1abe-account-create-update-74trh" Nov 28 10:17:44 crc kubenswrapper[4838]: I1128 10:17:44.821729 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bqw5x\" (UniqueName: \"kubernetes.io/projected/2d048538-8370-4422-89b8-1f4733ae72b1-kube-api-access-bqw5x\") pod \"nova-cell0-1abe-account-create-update-74trh\" (UID: \"2d048538-8370-4422-89b8-1f4733ae72b1\") " pod="openstack/nova-cell0-1abe-account-create-update-74trh" Nov 28 10:17:44 crc kubenswrapper[4838]: I1128 10:17:44.821769 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-szgmx\" (UniqueName: \"kubernetes.io/projected/ed9862ca-36d5-4ece-9aa7-4ed71b713e15-kube-api-access-szgmx\") pod \"nova-cell1-db-create-h4wpj\" (UID: \"ed9862ca-36d5-4ece-9aa7-4ed71b713e15\") " pod="openstack/nova-cell1-db-create-h4wpj" Nov 28 10:17:44 crc kubenswrapper[4838]: I1128 10:17:44.822571 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2d048538-8370-4422-89b8-1f4733ae72b1-operator-scripts\") pod \"nova-cell0-1abe-account-create-update-74trh\" (UID: \"2d048538-8370-4422-89b8-1f4733ae72b1\") " pod="openstack/nova-cell0-1abe-account-create-update-74trh" Nov 28 10:17:44 crc kubenswrapper[4838]: I1128 10:17:44.823107 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ed9862ca-36d5-4ece-9aa7-4ed71b713e15-operator-scripts\") pod \"nova-cell1-db-create-h4wpj\" (UID: \"ed9862ca-36d5-4ece-9aa7-4ed71b713e15\") " pod="openstack/nova-cell1-db-create-h4wpj" Nov 28 10:17:44 crc kubenswrapper[4838]: I1128 10:17:44.841263 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bqw5x\" (UniqueName: \"kubernetes.io/projected/2d048538-8370-4422-89b8-1f4733ae72b1-kube-api-access-bqw5x\") pod \"nova-cell0-1abe-account-create-update-74trh\" (UID: \"2d048538-8370-4422-89b8-1f4733ae72b1\") " pod="openstack/nova-cell0-1abe-account-create-update-74trh" Nov 28 10:17:44 crc kubenswrapper[4838]: I1128 10:17:44.855455 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-szgmx\" (UniqueName: \"kubernetes.io/projected/ed9862ca-36d5-4ece-9aa7-4ed71b713e15-kube-api-access-szgmx\") pod \"nova-cell1-db-create-h4wpj\" (UID: \"ed9862ca-36d5-4ece-9aa7-4ed71b713e15\") " pod="openstack/nova-cell1-db-create-h4wpj" Nov 28 10:17:44 crc kubenswrapper[4838]: I1128 10:17:44.923301 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qs76r\" (UniqueName: \"kubernetes.io/projected/3af8b444-f619-4a2c-bfcc-1dbb7966eb62-kube-api-access-qs76r\") pod \"nova-cell1-d90b-account-create-update-lklt7\" (UID: \"3af8b444-f619-4a2c-bfcc-1dbb7966eb62\") " pod="openstack/nova-cell1-d90b-account-create-update-lklt7" Nov 28 10:17:44 crc kubenswrapper[4838]: I1128 10:17:44.923383 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3af8b444-f619-4a2c-bfcc-1dbb7966eb62-operator-scripts\") pod \"nova-cell1-d90b-account-create-update-lklt7\" (UID: \"3af8b444-f619-4a2c-bfcc-1dbb7966eb62\") " pod="openstack/nova-cell1-d90b-account-create-update-lklt7" Nov 28 10:17:44 crc kubenswrapper[4838]: I1128 10:17:44.935414 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-h4wpj" Nov 28 10:17:45 crc kubenswrapper[4838]: I1128 10:17:45.008470 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-1abe-account-create-update-74trh" Nov 28 10:17:45 crc kubenswrapper[4838]: I1128 10:17:45.024761 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qs76r\" (UniqueName: \"kubernetes.io/projected/3af8b444-f619-4a2c-bfcc-1dbb7966eb62-kube-api-access-qs76r\") pod \"nova-cell1-d90b-account-create-update-lklt7\" (UID: \"3af8b444-f619-4a2c-bfcc-1dbb7966eb62\") " pod="openstack/nova-cell1-d90b-account-create-update-lklt7" Nov 28 10:17:45 crc kubenswrapper[4838]: I1128 10:17:45.024816 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3af8b444-f619-4a2c-bfcc-1dbb7966eb62-operator-scripts\") pod \"nova-cell1-d90b-account-create-update-lklt7\" (UID: \"3af8b444-f619-4a2c-bfcc-1dbb7966eb62\") " pod="openstack/nova-cell1-d90b-account-create-update-lklt7" Nov 28 10:17:45 crc kubenswrapper[4838]: I1128 10:17:45.025515 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3af8b444-f619-4a2c-bfcc-1dbb7966eb62-operator-scripts\") pod \"nova-cell1-d90b-account-create-update-lklt7\" (UID: \"3af8b444-f619-4a2c-bfcc-1dbb7966eb62\") " pod="openstack/nova-cell1-d90b-account-create-update-lklt7" Nov 28 10:17:45 crc kubenswrapper[4838]: I1128 10:17:45.043079 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qs76r\" (UniqueName: \"kubernetes.io/projected/3af8b444-f619-4a2c-bfcc-1dbb7966eb62-kube-api-access-qs76r\") pod \"nova-cell1-d90b-account-create-update-lklt7\" (UID: \"3af8b444-f619-4a2c-bfcc-1dbb7966eb62\") " pod="openstack/nova-cell1-d90b-account-create-update-lklt7" Nov 28 10:17:45 crc kubenswrapper[4838]: I1128 10:17:45.088167 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-d90b-account-create-update-lklt7" Nov 28 10:17:45 crc kubenswrapper[4838]: I1128 10:17:45.264628 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 28 10:17:45 crc kubenswrapper[4838]: I1128 10:17:45.264918 4838 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="5144a764-e665-486c-ba49-02edbd12cf0b" containerName="ceilometer-central-agent" containerID="cri-o://706aaa0270dd3982a1389a47ad15ac587ef7c0813b68d2778e41aac8473bedce" gracePeriod=30 Nov 28 10:17:45 crc kubenswrapper[4838]: I1128 10:17:45.264982 4838 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="5144a764-e665-486c-ba49-02edbd12cf0b" containerName="proxy-httpd" containerID="cri-o://1f73e632cd0e1d819ec9815b4b27ca0c4c298914c6d1179f42332a0ae8066ee7" gracePeriod=30 Nov 28 10:17:45 crc kubenswrapper[4838]: I1128 10:17:45.265043 4838 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="5144a764-e665-486c-ba49-02edbd12cf0b" containerName="sg-core" containerID="cri-o://df27e7312a84ae6694017e4e578d7d2d9a0616fc792ec2ef8ebedd3ea5e1031d" gracePeriod=30 Nov 28 10:17:45 crc kubenswrapper[4838]: I1128 10:17:45.265120 4838 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="5144a764-e665-486c-ba49-02edbd12cf0b" containerName="ceilometer-notification-agent" containerID="cri-o://9bbcf3b4ec85835db0b14e0ae95a11283946d19b60d073adfac1876e37a4ae91" gracePeriod=30 Nov 28 10:17:45 crc kubenswrapper[4838]: I1128 10:17:45.274699 4838 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ceilometer-0" podUID="5144a764-e665-486c-ba49-02edbd12cf0b" containerName="proxy-httpd" probeResult="failure" output="Get \"http://10.217.0.149:3000/\": EOF" Nov 28 10:17:45 crc kubenswrapper[4838]: I1128 10:17:45.628535 4838 generic.go:334] "Generic (PLEG): container finished" podID="5144a764-e665-486c-ba49-02edbd12cf0b" containerID="1f73e632cd0e1d819ec9815b4b27ca0c4c298914c6d1179f42332a0ae8066ee7" exitCode=0 Nov 28 10:17:45 crc kubenswrapper[4838]: I1128 10:17:45.628877 4838 generic.go:334] "Generic (PLEG): container finished" podID="5144a764-e665-486c-ba49-02edbd12cf0b" containerID="df27e7312a84ae6694017e4e578d7d2d9a0616fc792ec2ef8ebedd3ea5e1031d" exitCode=2 Nov 28 10:17:45 crc kubenswrapper[4838]: I1128 10:17:45.628603 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5144a764-e665-486c-ba49-02edbd12cf0b","Type":"ContainerDied","Data":"1f73e632cd0e1d819ec9815b4b27ca0c4c298914c6d1179f42332a0ae8066ee7"} Nov 28 10:17:45 crc kubenswrapper[4838]: I1128 10:17:45.628917 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5144a764-e665-486c-ba49-02edbd12cf0b","Type":"ContainerDied","Data":"df27e7312a84ae6694017e4e578d7d2d9a0616fc792ec2ef8ebedd3ea5e1031d"} Nov 28 10:17:46 crc kubenswrapper[4838]: I1128 10:17:46.643893 4838 generic.go:334] "Generic (PLEG): container finished" podID="5144a764-e665-486c-ba49-02edbd12cf0b" containerID="706aaa0270dd3982a1389a47ad15ac587ef7c0813b68d2778e41aac8473bedce" exitCode=0 Nov 28 10:17:46 crc kubenswrapper[4838]: I1128 10:17:46.643934 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5144a764-e665-486c-ba49-02edbd12cf0b","Type":"ContainerDied","Data":"706aaa0270dd3982a1389a47ad15ac587ef7c0813b68d2778e41aac8473bedce"} Nov 28 10:17:47 crc kubenswrapper[4838]: I1128 10:17:47.374113 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Nov 28 10:17:49 crc kubenswrapper[4838]: I1128 10:17:49.449697 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 10:17:49 crc kubenswrapper[4838]: I1128 10:17:49.464982 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-7ff57884d4-gkgfr" Nov 28 10:17:49 crc kubenswrapper[4838]: I1128 10:17:49.582312 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-5rgrd"] Nov 28 10:17:49 crc kubenswrapper[4838]: W1128 10:17:49.594889 4838 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0a796e35_158d_43c9_a806_92dda81c78f2.slice/crio-50d8d1877e728f8d06ae4204dfa01f7f3fafda04671348669785475bb9139f9d WatchSource:0}: Error finding container 50d8d1877e728f8d06ae4204dfa01f7f3fafda04671348669785475bb9139f9d: Status 404 returned error can't find the container with id 50d8d1877e728f8d06ae4204dfa01f7f3fafda04671348669785475bb9139f9d Nov 28 10:17:49 crc kubenswrapper[4838]: I1128 10:17:49.602106 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-h4wpj"] Nov 28 10:17:49 crc kubenswrapper[4838]: I1128 10:17:49.604542 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5144a764-e665-486c-ba49-02edbd12cf0b-scripts\") pod \"5144a764-e665-486c-ba49-02edbd12cf0b\" (UID: \"5144a764-e665-486c-ba49-02edbd12cf0b\") " Nov 28 10:17:49 crc kubenswrapper[4838]: I1128 10:17:49.604690 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5144a764-e665-486c-ba49-02edbd12cf0b-log-httpd\") pod \"5144a764-e665-486c-ba49-02edbd12cf0b\" (UID: \"5144a764-e665-486c-ba49-02edbd12cf0b\") " Nov 28 10:17:49 crc kubenswrapper[4838]: I1128 10:17:49.604783 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/5144a764-e665-486c-ba49-02edbd12cf0b-sg-core-conf-yaml\") pod \"5144a764-e665-486c-ba49-02edbd12cf0b\" (UID: \"5144a764-e665-486c-ba49-02edbd12cf0b\") " Nov 28 10:17:49 crc kubenswrapper[4838]: I1128 10:17:49.604838 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/9f301ff0-c619-4fb2-a1e6-a7254b39e13f-config\") pod \"9f301ff0-c619-4fb2-a1e6-a7254b39e13f\" (UID: \"9f301ff0-c619-4fb2-a1e6-a7254b39e13f\") " Nov 28 10:17:49 crc kubenswrapper[4838]: I1128 10:17:49.604878 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9f301ff0-c619-4fb2-a1e6-a7254b39e13f-combined-ca-bundle\") pod \"9f301ff0-c619-4fb2-a1e6-a7254b39e13f\" (UID: \"9f301ff0-c619-4fb2-a1e6-a7254b39e13f\") " Nov 28 10:17:49 crc kubenswrapper[4838]: I1128 10:17:49.604916 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5144a764-e665-486c-ba49-02edbd12cf0b-run-httpd\") pod \"5144a764-e665-486c-ba49-02edbd12cf0b\" (UID: \"5144a764-e665-486c-ba49-02edbd12cf0b\") " Nov 28 10:17:49 crc kubenswrapper[4838]: I1128 10:17:49.604953 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/9f301ff0-c619-4fb2-a1e6-a7254b39e13f-ovndb-tls-certs\") pod \"9f301ff0-c619-4fb2-a1e6-a7254b39e13f\" (UID: \"9f301ff0-c619-4fb2-a1e6-a7254b39e13f\") " Nov 28 10:17:49 crc kubenswrapper[4838]: I1128 10:17:49.604985 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xvmkz\" (UniqueName: \"kubernetes.io/projected/9f301ff0-c619-4fb2-a1e6-a7254b39e13f-kube-api-access-xvmkz\") pod \"9f301ff0-c619-4fb2-a1e6-a7254b39e13f\" (UID: \"9f301ff0-c619-4fb2-a1e6-a7254b39e13f\") " Nov 28 10:17:49 crc kubenswrapper[4838]: I1128 10:17:49.605148 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/9f301ff0-c619-4fb2-a1e6-a7254b39e13f-httpd-config\") pod \"9f301ff0-c619-4fb2-a1e6-a7254b39e13f\" (UID: \"9f301ff0-c619-4fb2-a1e6-a7254b39e13f\") " Nov 28 10:17:49 crc kubenswrapper[4838]: I1128 10:17:49.605463 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5144a764-e665-486c-ba49-02edbd12cf0b-combined-ca-bundle\") pod \"5144a764-e665-486c-ba49-02edbd12cf0b\" (UID: \"5144a764-e665-486c-ba49-02edbd12cf0b\") " Nov 28 10:17:49 crc kubenswrapper[4838]: I1128 10:17:49.605541 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zd9n9\" (UniqueName: \"kubernetes.io/projected/5144a764-e665-486c-ba49-02edbd12cf0b-kube-api-access-zd9n9\") pod \"5144a764-e665-486c-ba49-02edbd12cf0b\" (UID: \"5144a764-e665-486c-ba49-02edbd12cf0b\") " Nov 28 10:17:49 crc kubenswrapper[4838]: I1128 10:17:49.605590 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5144a764-e665-486c-ba49-02edbd12cf0b-config-data\") pod \"5144a764-e665-486c-ba49-02edbd12cf0b\" (UID: \"5144a764-e665-486c-ba49-02edbd12cf0b\") " Nov 28 10:17:49 crc kubenswrapper[4838]: I1128 10:17:49.606973 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5144a764-e665-486c-ba49-02edbd12cf0b-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "5144a764-e665-486c-ba49-02edbd12cf0b" (UID: "5144a764-e665-486c-ba49-02edbd12cf0b"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 10:17:49 crc kubenswrapper[4838]: I1128 10:17:49.607009 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5144a764-e665-486c-ba49-02edbd12cf0b-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "5144a764-e665-486c-ba49-02edbd12cf0b" (UID: "5144a764-e665-486c-ba49-02edbd12cf0b"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 10:17:49 crc kubenswrapper[4838]: I1128 10:17:49.612686 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9f301ff0-c619-4fb2-a1e6-a7254b39e13f-httpd-config" (OuterVolumeSpecName: "httpd-config") pod "9f301ff0-c619-4fb2-a1e6-a7254b39e13f" (UID: "9f301ff0-c619-4fb2-a1e6-a7254b39e13f"). InnerVolumeSpecName "httpd-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:17:49 crc kubenswrapper[4838]: I1128 10:17:49.613894 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5144a764-e665-486c-ba49-02edbd12cf0b-kube-api-access-zd9n9" (OuterVolumeSpecName: "kube-api-access-zd9n9") pod "5144a764-e665-486c-ba49-02edbd12cf0b" (UID: "5144a764-e665-486c-ba49-02edbd12cf0b"). InnerVolumeSpecName "kube-api-access-zd9n9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:17:49 crc kubenswrapper[4838]: I1128 10:17:49.614544 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5144a764-e665-486c-ba49-02edbd12cf0b-scripts" (OuterVolumeSpecName: "scripts") pod "5144a764-e665-486c-ba49-02edbd12cf0b" (UID: "5144a764-e665-486c-ba49-02edbd12cf0b"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:17:49 crc kubenswrapper[4838]: I1128 10:17:49.622482 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9f301ff0-c619-4fb2-a1e6-a7254b39e13f-kube-api-access-xvmkz" (OuterVolumeSpecName: "kube-api-access-xvmkz") pod "9f301ff0-c619-4fb2-a1e6-a7254b39e13f" (UID: "9f301ff0-c619-4fb2-a1e6-a7254b39e13f"). InnerVolumeSpecName "kube-api-access-xvmkz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:17:49 crc kubenswrapper[4838]: I1128 10:17:49.648048 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5144a764-e665-486c-ba49-02edbd12cf0b-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "5144a764-e665-486c-ba49-02edbd12cf0b" (UID: "5144a764-e665-486c-ba49-02edbd12cf0b"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:17:49 crc kubenswrapper[4838]: I1128 10:17:49.678236 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-h4wpj" event={"ID":"ed9862ca-36d5-4ece-9aa7-4ed71b713e15","Type":"ContainerStarted","Data":"24f56dd2617aeab0001876d5b1e7292fcfe34b1171a023507b05323d844449f4"} Nov 28 10:17:49 crc kubenswrapper[4838]: I1128 10:17:49.688726 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-7ff57884d4-gkgfr" event={"ID":"9f301ff0-c619-4fb2-a1e6-a7254b39e13f","Type":"ContainerDied","Data":"4cd0dd8b7ec72a9fcf0acced6524b901d54c912bd568230b16cc6540a24fc225"} Nov 28 10:17:49 crc kubenswrapper[4838]: I1128 10:17:49.688775 4838 scope.go:117] "RemoveContainer" containerID="eca3a5e3d95690fe050322e7d54999af48228e9f74303f5d000b82c3f7809f7f" Nov 28 10:17:49 crc kubenswrapper[4838]: I1128 10:17:49.688930 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-7ff57884d4-gkgfr" Nov 28 10:17:49 crc kubenswrapper[4838]: I1128 10:17:49.696265 4838 generic.go:334] "Generic (PLEG): container finished" podID="5144a764-e665-486c-ba49-02edbd12cf0b" containerID="9bbcf3b4ec85835db0b14e0ae95a11283946d19b60d073adfac1876e37a4ae91" exitCode=0 Nov 28 10:17:49 crc kubenswrapper[4838]: I1128 10:17:49.696325 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5144a764-e665-486c-ba49-02edbd12cf0b","Type":"ContainerDied","Data":"9bbcf3b4ec85835db0b14e0ae95a11283946d19b60d073adfac1876e37a4ae91"} Nov 28 10:17:49 crc kubenswrapper[4838]: I1128 10:17:49.696353 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5144a764-e665-486c-ba49-02edbd12cf0b","Type":"ContainerDied","Data":"779d7fe9319aa15bf86f4147d8ccf2ff684cf743e8a1cabce5046f50ebe45a6b"} Nov 28 10:17:49 crc kubenswrapper[4838]: I1128 10:17:49.696416 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 10:17:49 crc kubenswrapper[4838]: I1128 10:17:49.701130 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"b87ad3f7-9446-43b7-9141-2279794386a0","Type":"ContainerStarted","Data":"cfc3be69f1343913801a00887c9ac160711b8fd92b34d32aef768ae899a82f1e"} Nov 28 10:17:49 crc kubenswrapper[4838]: I1128 10:17:49.704068 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-5rgrd" event={"ID":"0a796e35-158d-43c9-a806-92dda81c78f2","Type":"ContainerStarted","Data":"50d8d1877e728f8d06ae4204dfa01f7f3fafda04671348669785475bb9139f9d"} Nov 28 10:17:49 crc kubenswrapper[4838]: I1128 10:17:49.704575 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9f301ff0-c619-4fb2-a1e6-a7254b39e13f-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "9f301ff0-c619-4fb2-a1e6-a7254b39e13f" (UID: "9f301ff0-c619-4fb2-a1e6-a7254b39e13f"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:17:49 crc kubenswrapper[4838]: I1128 10:17:49.711013 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zd9n9\" (UniqueName: \"kubernetes.io/projected/5144a764-e665-486c-ba49-02edbd12cf0b-kube-api-access-zd9n9\") on node \"crc\" DevicePath \"\"" Nov 28 10:17:49 crc kubenswrapper[4838]: I1128 10:17:49.711045 4838 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5144a764-e665-486c-ba49-02edbd12cf0b-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 10:17:49 crc kubenswrapper[4838]: I1128 10:17:49.711058 4838 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5144a764-e665-486c-ba49-02edbd12cf0b-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 28 10:17:49 crc kubenswrapper[4838]: I1128 10:17:49.711072 4838 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/5144a764-e665-486c-ba49-02edbd12cf0b-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 28 10:17:49 crc kubenswrapper[4838]: I1128 10:17:49.711084 4838 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9f301ff0-c619-4fb2-a1e6-a7254b39e13f-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 10:17:49 crc kubenswrapper[4838]: I1128 10:17:49.711095 4838 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5144a764-e665-486c-ba49-02edbd12cf0b-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 28 10:17:49 crc kubenswrapper[4838]: I1128 10:17:49.711107 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xvmkz\" (UniqueName: \"kubernetes.io/projected/9f301ff0-c619-4fb2-a1e6-a7254b39e13f-kube-api-access-xvmkz\") on node \"crc\" DevicePath \"\"" Nov 28 10:17:49 crc kubenswrapper[4838]: I1128 10:17:49.711120 4838 reconciler_common.go:293] "Volume detached for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/9f301ff0-c619-4fb2-a1e6-a7254b39e13f-httpd-config\") on node \"crc\" DevicePath \"\"" Nov 28 10:17:49 crc kubenswrapper[4838]: I1128 10:17:49.721299 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstackclient" podStartSLOduration=1.474907446 podStartE2EDuration="11.721259828s" podCreationTimestamp="2025-11-28 10:17:38 +0000 UTC" firstStartedPulling="2025-11-28 10:17:38.708527052 +0000 UTC m=+1230.407501222" lastFinishedPulling="2025-11-28 10:17:48.954879434 +0000 UTC m=+1240.653853604" observedRunningTime="2025-11-28 10:17:49.714515217 +0000 UTC m=+1241.413489387" watchObservedRunningTime="2025-11-28 10:17:49.721259828 +0000 UTC m=+1241.420233998" Nov 28 10:17:49 crc kubenswrapper[4838]: I1128 10:17:49.723305 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9f301ff0-c619-4fb2-a1e6-a7254b39e13f-config" (OuterVolumeSpecName: "config") pod "9f301ff0-c619-4fb2-a1e6-a7254b39e13f" (UID: "9f301ff0-c619-4fb2-a1e6-a7254b39e13f"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:17:49 crc kubenswrapper[4838]: I1128 10:17:49.734059 4838 scope.go:117] "RemoveContainer" containerID="749dc59908ae7ebd5eaf5f716fe0187be34fc2e861977219fa3340212d7b49c3" Nov 28 10:17:49 crc kubenswrapper[4838]: I1128 10:17:49.753569 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5144a764-e665-486c-ba49-02edbd12cf0b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "5144a764-e665-486c-ba49-02edbd12cf0b" (UID: "5144a764-e665-486c-ba49-02edbd12cf0b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:17:49 crc kubenswrapper[4838]: I1128 10:17:49.764925 4838 scope.go:117] "RemoveContainer" containerID="1f73e632cd0e1d819ec9815b4b27ca0c4c298914c6d1179f42332a0ae8066ee7" Nov 28 10:17:49 crc kubenswrapper[4838]: I1128 10:17:49.767892 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9f301ff0-c619-4fb2-a1e6-a7254b39e13f-ovndb-tls-certs" (OuterVolumeSpecName: "ovndb-tls-certs") pod "9f301ff0-c619-4fb2-a1e6-a7254b39e13f" (UID: "9f301ff0-c619-4fb2-a1e6-a7254b39e13f"). InnerVolumeSpecName "ovndb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:17:49 crc kubenswrapper[4838]: I1128 10:17:49.769478 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5144a764-e665-486c-ba49-02edbd12cf0b-config-data" (OuterVolumeSpecName: "config-data") pod "5144a764-e665-486c-ba49-02edbd12cf0b" (UID: "5144a764-e665-486c-ba49-02edbd12cf0b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:17:49 crc kubenswrapper[4838]: I1128 10:17:49.810514 4838 scope.go:117] "RemoveContainer" containerID="df27e7312a84ae6694017e4e578d7d2d9a0616fc792ec2ef8ebedd3ea5e1031d" Nov 28 10:17:49 crc kubenswrapper[4838]: I1128 10:17:49.813648 4838 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/9f301ff0-c619-4fb2-a1e6-a7254b39e13f-config\") on node \"crc\" DevicePath \"\"" Nov 28 10:17:49 crc kubenswrapper[4838]: I1128 10:17:49.813674 4838 reconciler_common.go:293] "Volume detached for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/9f301ff0-c619-4fb2-a1e6-a7254b39e13f-ovndb-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 10:17:49 crc kubenswrapper[4838]: I1128 10:17:49.813686 4838 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5144a764-e665-486c-ba49-02edbd12cf0b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 10:17:49 crc kubenswrapper[4838]: I1128 10:17:49.813695 4838 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5144a764-e665-486c-ba49-02edbd12cf0b-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 10:17:49 crc kubenswrapper[4838]: I1128 10:17:49.903556 4838 scope.go:117] "RemoveContainer" containerID="9bbcf3b4ec85835db0b14e0ae95a11283946d19b60d073adfac1876e37a4ae91" Nov 28 10:17:49 crc kubenswrapper[4838]: I1128 10:17:49.931335 4838 scope.go:117] "RemoveContainer" containerID="706aaa0270dd3982a1389a47ad15ac587ef7c0813b68d2778e41aac8473bedce" Nov 28 10:17:49 crc kubenswrapper[4838]: I1128 10:17:49.950199 4838 scope.go:117] "RemoveContainer" containerID="1f73e632cd0e1d819ec9815b4b27ca0c4c298914c6d1179f42332a0ae8066ee7" Nov 28 10:17:49 crc kubenswrapper[4838]: E1128 10:17:49.950764 4838 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1f73e632cd0e1d819ec9815b4b27ca0c4c298914c6d1179f42332a0ae8066ee7\": container with ID starting with 1f73e632cd0e1d819ec9815b4b27ca0c4c298914c6d1179f42332a0ae8066ee7 not found: ID does not exist" containerID="1f73e632cd0e1d819ec9815b4b27ca0c4c298914c6d1179f42332a0ae8066ee7" Nov 28 10:17:49 crc kubenswrapper[4838]: I1128 10:17:49.950796 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1f73e632cd0e1d819ec9815b4b27ca0c4c298914c6d1179f42332a0ae8066ee7"} err="failed to get container status \"1f73e632cd0e1d819ec9815b4b27ca0c4c298914c6d1179f42332a0ae8066ee7\": rpc error: code = NotFound desc = could not find container \"1f73e632cd0e1d819ec9815b4b27ca0c4c298914c6d1179f42332a0ae8066ee7\": container with ID starting with 1f73e632cd0e1d819ec9815b4b27ca0c4c298914c6d1179f42332a0ae8066ee7 not found: ID does not exist" Nov 28 10:17:49 crc kubenswrapper[4838]: I1128 10:17:49.950823 4838 scope.go:117] "RemoveContainer" containerID="df27e7312a84ae6694017e4e578d7d2d9a0616fc792ec2ef8ebedd3ea5e1031d" Nov 28 10:17:49 crc kubenswrapper[4838]: E1128 10:17:49.951096 4838 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"df27e7312a84ae6694017e4e578d7d2d9a0616fc792ec2ef8ebedd3ea5e1031d\": container with ID starting with df27e7312a84ae6694017e4e578d7d2d9a0616fc792ec2ef8ebedd3ea5e1031d not found: ID does not exist" containerID="df27e7312a84ae6694017e4e578d7d2d9a0616fc792ec2ef8ebedd3ea5e1031d" Nov 28 10:17:49 crc kubenswrapper[4838]: I1128 10:17:49.951117 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"df27e7312a84ae6694017e4e578d7d2d9a0616fc792ec2ef8ebedd3ea5e1031d"} err="failed to get container status \"df27e7312a84ae6694017e4e578d7d2d9a0616fc792ec2ef8ebedd3ea5e1031d\": rpc error: code = NotFound desc = could not find container \"df27e7312a84ae6694017e4e578d7d2d9a0616fc792ec2ef8ebedd3ea5e1031d\": container with ID starting with df27e7312a84ae6694017e4e578d7d2d9a0616fc792ec2ef8ebedd3ea5e1031d not found: ID does not exist" Nov 28 10:17:49 crc kubenswrapper[4838]: I1128 10:17:49.951133 4838 scope.go:117] "RemoveContainer" containerID="9bbcf3b4ec85835db0b14e0ae95a11283946d19b60d073adfac1876e37a4ae91" Nov 28 10:17:49 crc kubenswrapper[4838]: E1128 10:17:49.951370 4838 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9bbcf3b4ec85835db0b14e0ae95a11283946d19b60d073adfac1876e37a4ae91\": container with ID starting with 9bbcf3b4ec85835db0b14e0ae95a11283946d19b60d073adfac1876e37a4ae91 not found: ID does not exist" containerID="9bbcf3b4ec85835db0b14e0ae95a11283946d19b60d073adfac1876e37a4ae91" Nov 28 10:17:49 crc kubenswrapper[4838]: I1128 10:17:49.951392 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9bbcf3b4ec85835db0b14e0ae95a11283946d19b60d073adfac1876e37a4ae91"} err="failed to get container status \"9bbcf3b4ec85835db0b14e0ae95a11283946d19b60d073adfac1876e37a4ae91\": rpc error: code = NotFound desc = could not find container \"9bbcf3b4ec85835db0b14e0ae95a11283946d19b60d073adfac1876e37a4ae91\": container with ID starting with 9bbcf3b4ec85835db0b14e0ae95a11283946d19b60d073adfac1876e37a4ae91 not found: ID does not exist" Nov 28 10:17:49 crc kubenswrapper[4838]: I1128 10:17:49.951410 4838 scope.go:117] "RemoveContainer" containerID="706aaa0270dd3982a1389a47ad15ac587ef7c0813b68d2778e41aac8473bedce" Nov 28 10:17:49 crc kubenswrapper[4838]: E1128 10:17:49.951655 4838 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"706aaa0270dd3982a1389a47ad15ac587ef7c0813b68d2778e41aac8473bedce\": container with ID starting with 706aaa0270dd3982a1389a47ad15ac587ef7c0813b68d2778e41aac8473bedce not found: ID does not exist" containerID="706aaa0270dd3982a1389a47ad15ac587ef7c0813b68d2778e41aac8473bedce" Nov 28 10:17:49 crc kubenswrapper[4838]: I1128 10:17:49.951677 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"706aaa0270dd3982a1389a47ad15ac587ef7c0813b68d2778e41aac8473bedce"} err="failed to get container status \"706aaa0270dd3982a1389a47ad15ac587ef7c0813b68d2778e41aac8473bedce\": rpc error: code = NotFound desc = could not find container \"706aaa0270dd3982a1389a47ad15ac587ef7c0813b68d2778e41aac8473bedce\": container with ID starting with 706aaa0270dd3982a1389a47ad15ac587ef7c0813b68d2778e41aac8473bedce not found: ID does not exist" Nov 28 10:17:49 crc kubenswrapper[4838]: I1128 10:17:49.986864 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-cfc5-account-create-update-cfjq4"] Nov 28 10:17:49 crc kubenswrapper[4838]: W1128 10:17:49.990540 4838 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6f84685f_a9f2_45b4_af58_fb0218060369.slice/crio-399003d531d05ccae7bd17753b9980d95cbee13c15c33703bc2cebe8d5470f8a WatchSource:0}: Error finding container 399003d531d05ccae7bd17753b9980d95cbee13c15c33703bc2cebe8d5470f8a: Status 404 returned error can't find the container with id 399003d531d05ccae7bd17753b9980d95cbee13c15c33703bc2cebe8d5470f8a Nov 28 10:17:50 crc kubenswrapper[4838]: I1128 10:17:50.011985 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-r7rf2"] Nov 28 10:17:50 crc kubenswrapper[4838]: W1128 10:17:50.022530 4838 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2c180cf9_7d38_4e43_9723_1fa20242ff56.slice/crio-1636aef1e87bac9c1e43d7d97099656ea82aa780dcf697672255591d1c187bfb WatchSource:0}: Error finding container 1636aef1e87bac9c1e43d7d97099656ea82aa780dcf697672255591d1c187bfb: Status 404 returned error can't find the container with id 1636aef1e87bac9c1e43d7d97099656ea82aa780dcf697672255591d1c187bfb Nov 28 10:17:50 crc kubenswrapper[4838]: I1128 10:17:50.042674 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-1abe-account-create-update-74trh"] Nov 28 10:17:50 crc kubenswrapper[4838]: I1128 10:17:50.055577 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-d90b-account-create-update-lklt7"] Nov 28 10:17:50 crc kubenswrapper[4838]: I1128 10:17:50.323667 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 28 10:17:50 crc kubenswrapper[4838]: I1128 10:17:50.339968 4838 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 28 10:17:50 crc kubenswrapper[4838]: I1128 10:17:50.345831 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-7ff57884d4-gkgfr"] Nov 28 10:17:50 crc kubenswrapper[4838]: I1128 10:17:50.398204 4838 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-7ff57884d4-gkgfr"] Nov 28 10:17:50 crc kubenswrapper[4838]: I1128 10:17:50.412810 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 28 10:17:50 crc kubenswrapper[4838]: E1128 10:17:50.413476 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5144a764-e665-486c-ba49-02edbd12cf0b" containerName="ceilometer-central-agent" Nov 28 10:17:50 crc kubenswrapper[4838]: I1128 10:17:50.413488 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="5144a764-e665-486c-ba49-02edbd12cf0b" containerName="ceilometer-central-agent" Nov 28 10:17:50 crc kubenswrapper[4838]: E1128 10:17:50.413497 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9f301ff0-c619-4fb2-a1e6-a7254b39e13f" containerName="neutron-api" Nov 28 10:17:50 crc kubenswrapper[4838]: I1128 10:17:50.413504 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="9f301ff0-c619-4fb2-a1e6-a7254b39e13f" containerName="neutron-api" Nov 28 10:17:50 crc kubenswrapper[4838]: E1128 10:17:50.413523 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5144a764-e665-486c-ba49-02edbd12cf0b" containerName="proxy-httpd" Nov 28 10:17:50 crc kubenswrapper[4838]: I1128 10:17:50.413529 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="5144a764-e665-486c-ba49-02edbd12cf0b" containerName="proxy-httpd" Nov 28 10:17:50 crc kubenswrapper[4838]: E1128 10:17:50.413553 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9f301ff0-c619-4fb2-a1e6-a7254b39e13f" containerName="neutron-httpd" Nov 28 10:17:50 crc kubenswrapper[4838]: I1128 10:17:50.413559 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="9f301ff0-c619-4fb2-a1e6-a7254b39e13f" containerName="neutron-httpd" Nov 28 10:17:50 crc kubenswrapper[4838]: E1128 10:17:50.413588 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5144a764-e665-486c-ba49-02edbd12cf0b" containerName="ceilometer-notification-agent" Nov 28 10:17:50 crc kubenswrapper[4838]: I1128 10:17:50.413594 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="5144a764-e665-486c-ba49-02edbd12cf0b" containerName="ceilometer-notification-agent" Nov 28 10:17:50 crc kubenswrapper[4838]: E1128 10:17:50.413610 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5144a764-e665-486c-ba49-02edbd12cf0b" containerName="sg-core" Nov 28 10:17:50 crc kubenswrapper[4838]: I1128 10:17:50.413615 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="5144a764-e665-486c-ba49-02edbd12cf0b" containerName="sg-core" Nov 28 10:17:50 crc kubenswrapper[4838]: I1128 10:17:50.413939 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="9f301ff0-c619-4fb2-a1e6-a7254b39e13f" containerName="neutron-api" Nov 28 10:17:50 crc kubenswrapper[4838]: I1128 10:17:50.413949 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="5144a764-e665-486c-ba49-02edbd12cf0b" containerName="ceilometer-notification-agent" Nov 28 10:17:50 crc kubenswrapper[4838]: I1128 10:17:50.414794 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="9f301ff0-c619-4fb2-a1e6-a7254b39e13f" containerName="neutron-httpd" Nov 28 10:17:50 crc kubenswrapper[4838]: I1128 10:17:50.414815 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="5144a764-e665-486c-ba49-02edbd12cf0b" containerName="sg-core" Nov 28 10:17:50 crc kubenswrapper[4838]: I1128 10:17:50.414834 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="5144a764-e665-486c-ba49-02edbd12cf0b" containerName="ceilometer-central-agent" Nov 28 10:17:50 crc kubenswrapper[4838]: I1128 10:17:50.414854 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="5144a764-e665-486c-ba49-02edbd12cf0b" containerName="proxy-httpd" Nov 28 10:17:50 crc kubenswrapper[4838]: I1128 10:17:50.417972 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 10:17:50 crc kubenswrapper[4838]: I1128 10:17:50.420658 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 28 10:17:50 crc kubenswrapper[4838]: I1128 10:17:50.420866 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 28 10:17:50 crc kubenswrapper[4838]: I1128 10:17:50.422949 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 28 10:17:50 crc kubenswrapper[4838]: I1128 10:17:50.526933 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4ea6218f-637c-472d-83b3-1059c0d7e1f7-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"4ea6218f-637c-472d-83b3-1059c0d7e1f7\") " pod="openstack/ceilometer-0" Nov 28 10:17:50 crc kubenswrapper[4838]: I1128 10:17:50.526993 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4ea6218f-637c-472d-83b3-1059c0d7e1f7-run-httpd\") pod \"ceilometer-0\" (UID: \"4ea6218f-637c-472d-83b3-1059c0d7e1f7\") " pod="openstack/ceilometer-0" Nov 28 10:17:50 crc kubenswrapper[4838]: I1128 10:17:50.527027 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4ea6218f-637c-472d-83b3-1059c0d7e1f7-log-httpd\") pod \"ceilometer-0\" (UID: \"4ea6218f-637c-472d-83b3-1059c0d7e1f7\") " pod="openstack/ceilometer-0" Nov 28 10:17:50 crc kubenswrapper[4838]: I1128 10:17:50.527084 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/4ea6218f-637c-472d-83b3-1059c0d7e1f7-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"4ea6218f-637c-472d-83b3-1059c0d7e1f7\") " pod="openstack/ceilometer-0" Nov 28 10:17:50 crc kubenswrapper[4838]: I1128 10:17:50.527142 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xgmsk\" (UniqueName: \"kubernetes.io/projected/4ea6218f-637c-472d-83b3-1059c0d7e1f7-kube-api-access-xgmsk\") pod \"ceilometer-0\" (UID: \"4ea6218f-637c-472d-83b3-1059c0d7e1f7\") " pod="openstack/ceilometer-0" Nov 28 10:17:50 crc kubenswrapper[4838]: I1128 10:17:50.527167 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4ea6218f-637c-472d-83b3-1059c0d7e1f7-config-data\") pod \"ceilometer-0\" (UID: \"4ea6218f-637c-472d-83b3-1059c0d7e1f7\") " pod="openstack/ceilometer-0" Nov 28 10:17:50 crc kubenswrapper[4838]: I1128 10:17:50.527218 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4ea6218f-637c-472d-83b3-1059c0d7e1f7-scripts\") pod \"ceilometer-0\" (UID: \"4ea6218f-637c-472d-83b3-1059c0d7e1f7\") " pod="openstack/ceilometer-0" Nov 28 10:17:50 crc kubenswrapper[4838]: I1128 10:17:50.581843 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5144a764-e665-486c-ba49-02edbd12cf0b" path="/var/lib/kubelet/pods/5144a764-e665-486c-ba49-02edbd12cf0b/volumes" Nov 28 10:17:50 crc kubenswrapper[4838]: I1128 10:17:50.582690 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9f301ff0-c619-4fb2-a1e6-a7254b39e13f" path="/var/lib/kubelet/pods/9f301ff0-c619-4fb2-a1e6-a7254b39e13f/volumes" Nov 28 10:17:50 crc kubenswrapper[4838]: I1128 10:17:50.628325 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/4ea6218f-637c-472d-83b3-1059c0d7e1f7-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"4ea6218f-637c-472d-83b3-1059c0d7e1f7\") " pod="openstack/ceilometer-0" Nov 28 10:17:50 crc kubenswrapper[4838]: I1128 10:17:50.628378 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xgmsk\" (UniqueName: \"kubernetes.io/projected/4ea6218f-637c-472d-83b3-1059c0d7e1f7-kube-api-access-xgmsk\") pod \"ceilometer-0\" (UID: \"4ea6218f-637c-472d-83b3-1059c0d7e1f7\") " pod="openstack/ceilometer-0" Nov 28 10:17:50 crc kubenswrapper[4838]: I1128 10:17:50.628398 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4ea6218f-637c-472d-83b3-1059c0d7e1f7-config-data\") pod \"ceilometer-0\" (UID: \"4ea6218f-637c-472d-83b3-1059c0d7e1f7\") " pod="openstack/ceilometer-0" Nov 28 10:17:50 crc kubenswrapper[4838]: I1128 10:17:50.628482 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4ea6218f-637c-472d-83b3-1059c0d7e1f7-scripts\") pod \"ceilometer-0\" (UID: \"4ea6218f-637c-472d-83b3-1059c0d7e1f7\") " pod="openstack/ceilometer-0" Nov 28 10:17:50 crc kubenswrapper[4838]: I1128 10:17:50.628567 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4ea6218f-637c-472d-83b3-1059c0d7e1f7-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"4ea6218f-637c-472d-83b3-1059c0d7e1f7\") " pod="openstack/ceilometer-0" Nov 28 10:17:50 crc kubenswrapper[4838]: I1128 10:17:50.628594 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4ea6218f-637c-472d-83b3-1059c0d7e1f7-run-httpd\") pod \"ceilometer-0\" (UID: \"4ea6218f-637c-472d-83b3-1059c0d7e1f7\") " pod="openstack/ceilometer-0" Nov 28 10:17:50 crc kubenswrapper[4838]: I1128 10:17:50.628627 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4ea6218f-637c-472d-83b3-1059c0d7e1f7-log-httpd\") pod \"ceilometer-0\" (UID: \"4ea6218f-637c-472d-83b3-1059c0d7e1f7\") " pod="openstack/ceilometer-0" Nov 28 10:17:50 crc kubenswrapper[4838]: I1128 10:17:50.629021 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4ea6218f-637c-472d-83b3-1059c0d7e1f7-log-httpd\") pod \"ceilometer-0\" (UID: \"4ea6218f-637c-472d-83b3-1059c0d7e1f7\") " pod="openstack/ceilometer-0" Nov 28 10:17:50 crc kubenswrapper[4838]: I1128 10:17:50.632130 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4ea6218f-637c-472d-83b3-1059c0d7e1f7-run-httpd\") pod \"ceilometer-0\" (UID: \"4ea6218f-637c-472d-83b3-1059c0d7e1f7\") " pod="openstack/ceilometer-0" Nov 28 10:17:50 crc kubenswrapper[4838]: I1128 10:17:50.638606 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4ea6218f-637c-472d-83b3-1059c0d7e1f7-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"4ea6218f-637c-472d-83b3-1059c0d7e1f7\") " pod="openstack/ceilometer-0" Nov 28 10:17:50 crc kubenswrapper[4838]: I1128 10:17:50.640302 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/4ea6218f-637c-472d-83b3-1059c0d7e1f7-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"4ea6218f-637c-472d-83b3-1059c0d7e1f7\") " pod="openstack/ceilometer-0" Nov 28 10:17:50 crc kubenswrapper[4838]: I1128 10:17:50.640660 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4ea6218f-637c-472d-83b3-1059c0d7e1f7-config-data\") pod \"ceilometer-0\" (UID: \"4ea6218f-637c-472d-83b3-1059c0d7e1f7\") " pod="openstack/ceilometer-0" Nov 28 10:17:50 crc kubenswrapper[4838]: I1128 10:17:50.643347 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4ea6218f-637c-472d-83b3-1059c0d7e1f7-scripts\") pod \"ceilometer-0\" (UID: \"4ea6218f-637c-472d-83b3-1059c0d7e1f7\") " pod="openstack/ceilometer-0" Nov 28 10:17:50 crc kubenswrapper[4838]: I1128 10:17:50.653536 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xgmsk\" (UniqueName: \"kubernetes.io/projected/4ea6218f-637c-472d-83b3-1059c0d7e1f7-kube-api-access-xgmsk\") pod \"ceilometer-0\" (UID: \"4ea6218f-637c-472d-83b3-1059c0d7e1f7\") " pod="openstack/ceilometer-0" Nov 28 10:17:50 crc kubenswrapper[4838]: I1128 10:17:50.731986 4838 generic.go:334] "Generic (PLEG): container finished" podID="0a796e35-158d-43c9-a806-92dda81c78f2" containerID="f503451f64a3d6ce7301f5078be9807294088199d72366ea053d013481055e92" exitCode=0 Nov 28 10:17:50 crc kubenswrapper[4838]: I1128 10:17:50.732080 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-5rgrd" event={"ID":"0a796e35-158d-43c9-a806-92dda81c78f2","Type":"ContainerDied","Data":"f503451f64a3d6ce7301f5078be9807294088199d72366ea053d013481055e92"} Nov 28 10:17:50 crc kubenswrapper[4838]: I1128 10:17:50.737037 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-cfc5-account-create-update-cfjq4" event={"ID":"6f84685f-a9f2-45b4-af58-fb0218060369","Type":"ContainerStarted","Data":"8c43108d54dac7881693c1f429052dea6f2cd9b16d9ba8706bd53ed601704e96"} Nov 28 10:17:50 crc kubenswrapper[4838]: I1128 10:17:50.737093 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-cfc5-account-create-update-cfjq4" event={"ID":"6f84685f-a9f2-45b4-af58-fb0218060369","Type":"ContainerStarted","Data":"399003d531d05ccae7bd17753b9980d95cbee13c15c33703bc2cebe8d5470f8a"} Nov 28 10:17:50 crc kubenswrapper[4838]: I1128 10:17:50.739344 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-d90b-account-create-update-lklt7" event={"ID":"3af8b444-f619-4a2c-bfcc-1dbb7966eb62","Type":"ContainerStarted","Data":"e267b5e3e37639284b8d19984fefe7e01c4f917c413b0bad084bdfb7db50b0a2"} Nov 28 10:17:50 crc kubenswrapper[4838]: I1128 10:17:50.739371 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-d90b-account-create-update-lklt7" event={"ID":"3af8b444-f619-4a2c-bfcc-1dbb7966eb62","Type":"ContainerStarted","Data":"7a10b5ffdf2832379e1cc987ec1f7832db075aa3437096af6a54a9b776b3f9e8"} Nov 28 10:17:50 crc kubenswrapper[4838]: I1128 10:17:50.743042 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-1abe-account-create-update-74trh" event={"ID":"2d048538-8370-4422-89b8-1f4733ae72b1","Type":"ContainerStarted","Data":"e4931c9a00687c16d0a181006a54c48ef31e57bde057b26cf3de5d3c31fe1f74"} Nov 28 10:17:50 crc kubenswrapper[4838]: I1128 10:17:50.743096 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-1abe-account-create-update-74trh" event={"ID":"2d048538-8370-4422-89b8-1f4733ae72b1","Type":"ContainerStarted","Data":"2c5557df43ef8d54500c182a96a288588f90492c82ec7ea7d784017b1400d4ce"} Nov 28 10:17:50 crc kubenswrapper[4838]: I1128 10:17:50.745320 4838 generic.go:334] "Generic (PLEG): container finished" podID="ed9862ca-36d5-4ece-9aa7-4ed71b713e15" containerID="8ea3d7f756744baeaf8a841103f3055446d42e8273f27480a363f2c30d253dd8" exitCode=0 Nov 28 10:17:50 crc kubenswrapper[4838]: I1128 10:17:50.745431 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-h4wpj" event={"ID":"ed9862ca-36d5-4ece-9aa7-4ed71b713e15","Type":"ContainerDied","Data":"8ea3d7f756744baeaf8a841103f3055446d42e8273f27480a363f2c30d253dd8"} Nov 28 10:17:50 crc kubenswrapper[4838]: I1128 10:17:50.748014 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-r7rf2" event={"ID":"2c180cf9-7d38-4e43-9723-1fa20242ff56","Type":"ContainerStarted","Data":"4f0a73c500f34a37642c1fabed5297418f5aee5797a32290e0851d0c8cf90373"} Nov 28 10:17:50 crc kubenswrapper[4838]: I1128 10:17:50.748052 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-r7rf2" event={"ID":"2c180cf9-7d38-4e43-9723-1fa20242ff56","Type":"ContainerStarted","Data":"1636aef1e87bac9c1e43d7d97099656ea82aa780dcf697672255591d1c187bfb"} Nov 28 10:17:50 crc kubenswrapper[4838]: I1128 10:17:50.757844 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 10:17:50 crc kubenswrapper[4838]: I1128 10:17:50.769005 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-1abe-account-create-update-74trh" podStartSLOduration=6.768982764 podStartE2EDuration="6.768982764s" podCreationTimestamp="2025-11-28 10:17:44 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 10:17:50.767330521 +0000 UTC m=+1242.466304691" watchObservedRunningTime="2025-11-28 10:17:50.768982764 +0000 UTC m=+1242.467956934" Nov 28 10:17:50 crc kubenswrapper[4838]: I1128 10:17:50.824481 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-d90b-account-create-update-lklt7" podStartSLOduration=6.82445963 podStartE2EDuration="6.82445963s" podCreationTimestamp="2025-11-28 10:17:44 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 10:17:50.795692295 +0000 UTC m=+1242.494666465" watchObservedRunningTime="2025-11-28 10:17:50.82445963 +0000 UTC m=+1242.523433790" Nov 28 10:17:50 crc kubenswrapper[4838]: I1128 10:17:50.859405 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-db-create-r7rf2" podStartSLOduration=6.859386961 podStartE2EDuration="6.859386961s" podCreationTimestamp="2025-11-28 10:17:44 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 10:17:50.837584374 +0000 UTC m=+1242.536558544" watchObservedRunningTime="2025-11-28 10:17:50.859386961 +0000 UTC m=+1242.558361131" Nov 28 10:17:50 crc kubenswrapper[4838]: I1128 10:17:50.882803 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-cfc5-account-create-update-cfjq4" podStartSLOduration=6.882786252 podStartE2EDuration="6.882786252s" podCreationTimestamp="2025-11-28 10:17:44 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 10:17:50.873177492 +0000 UTC m=+1242.572151662" watchObservedRunningTime="2025-11-28 10:17:50.882786252 +0000 UTC m=+1242.581760422" Nov 28 10:17:51 crc kubenswrapper[4838]: I1128 10:17:51.322998 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 28 10:17:51 crc kubenswrapper[4838]: I1128 10:17:51.756419 4838 generic.go:334] "Generic (PLEG): container finished" podID="6f84685f-a9f2-45b4-af58-fb0218060369" containerID="8c43108d54dac7881693c1f429052dea6f2cd9b16d9ba8706bd53ed601704e96" exitCode=0 Nov 28 10:17:51 crc kubenswrapper[4838]: I1128 10:17:51.756491 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-cfc5-account-create-update-cfjq4" event={"ID":"6f84685f-a9f2-45b4-af58-fb0218060369","Type":"ContainerDied","Data":"8c43108d54dac7881693c1f429052dea6f2cd9b16d9ba8706bd53ed601704e96"} Nov 28 10:17:51 crc kubenswrapper[4838]: I1128 10:17:51.758167 4838 generic.go:334] "Generic (PLEG): container finished" podID="3af8b444-f619-4a2c-bfcc-1dbb7966eb62" containerID="e267b5e3e37639284b8d19984fefe7e01c4f917c413b0bad084bdfb7db50b0a2" exitCode=0 Nov 28 10:17:51 crc kubenswrapper[4838]: I1128 10:17:51.758213 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-d90b-account-create-update-lklt7" event={"ID":"3af8b444-f619-4a2c-bfcc-1dbb7966eb62","Type":"ContainerDied","Data":"e267b5e3e37639284b8d19984fefe7e01c4f917c413b0bad084bdfb7db50b0a2"} Nov 28 10:17:51 crc kubenswrapper[4838]: I1128 10:17:51.759868 4838 generic.go:334] "Generic (PLEG): container finished" podID="2d048538-8370-4422-89b8-1f4733ae72b1" containerID="e4931c9a00687c16d0a181006a54c48ef31e57bde057b26cf3de5d3c31fe1f74" exitCode=0 Nov 28 10:17:51 crc kubenswrapper[4838]: I1128 10:17:51.759920 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-1abe-account-create-update-74trh" event={"ID":"2d048538-8370-4422-89b8-1f4733ae72b1","Type":"ContainerDied","Data":"e4931c9a00687c16d0a181006a54c48ef31e57bde057b26cf3de5d3c31fe1f74"} Nov 28 10:17:51 crc kubenswrapper[4838]: I1128 10:17:51.761058 4838 generic.go:334] "Generic (PLEG): container finished" podID="2c180cf9-7d38-4e43-9723-1fa20242ff56" containerID="4f0a73c500f34a37642c1fabed5297418f5aee5797a32290e0851d0c8cf90373" exitCode=0 Nov 28 10:17:51 crc kubenswrapper[4838]: I1128 10:17:51.761129 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-r7rf2" event={"ID":"2c180cf9-7d38-4e43-9723-1fa20242ff56","Type":"ContainerDied","Data":"4f0a73c500f34a37642c1fabed5297418f5aee5797a32290e0851d0c8cf90373"} Nov 28 10:17:51 crc kubenswrapper[4838]: I1128 10:17:51.762485 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"4ea6218f-637c-472d-83b3-1059c0d7e1f7","Type":"ContainerStarted","Data":"6f8cf39af595d2f2d93e78a28da3f707d9adf7db218a03ea2d337473f6695ec9"} Nov 28 10:17:52 crc kubenswrapper[4838]: I1128 10:17:52.203794 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-5rgrd" Nov 28 10:17:52 crc kubenswrapper[4838]: I1128 10:17:52.208440 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-h4wpj" Nov 28 10:17:52 crc kubenswrapper[4838]: I1128 10:17:52.363607 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-szgmx\" (UniqueName: \"kubernetes.io/projected/ed9862ca-36d5-4ece-9aa7-4ed71b713e15-kube-api-access-szgmx\") pod \"ed9862ca-36d5-4ece-9aa7-4ed71b713e15\" (UID: \"ed9862ca-36d5-4ece-9aa7-4ed71b713e15\") " Nov 28 10:17:52 crc kubenswrapper[4838]: I1128 10:17:52.363668 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ed9862ca-36d5-4ece-9aa7-4ed71b713e15-operator-scripts\") pod \"ed9862ca-36d5-4ece-9aa7-4ed71b713e15\" (UID: \"ed9862ca-36d5-4ece-9aa7-4ed71b713e15\") " Nov 28 10:17:52 crc kubenswrapper[4838]: I1128 10:17:52.363701 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0a796e35-158d-43c9-a806-92dda81c78f2-operator-scripts\") pod \"0a796e35-158d-43c9-a806-92dda81c78f2\" (UID: \"0a796e35-158d-43c9-a806-92dda81c78f2\") " Nov 28 10:17:52 crc kubenswrapper[4838]: I1128 10:17:52.363839 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fmxxt\" (UniqueName: \"kubernetes.io/projected/0a796e35-158d-43c9-a806-92dda81c78f2-kube-api-access-fmxxt\") pod \"0a796e35-158d-43c9-a806-92dda81c78f2\" (UID: \"0a796e35-158d-43c9-a806-92dda81c78f2\") " Nov 28 10:17:52 crc kubenswrapper[4838]: I1128 10:17:52.364528 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0a796e35-158d-43c9-a806-92dda81c78f2-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "0a796e35-158d-43c9-a806-92dda81c78f2" (UID: "0a796e35-158d-43c9-a806-92dda81c78f2"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 10:17:52 crc kubenswrapper[4838]: I1128 10:17:52.364586 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ed9862ca-36d5-4ece-9aa7-4ed71b713e15-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "ed9862ca-36d5-4ece-9aa7-4ed71b713e15" (UID: "ed9862ca-36d5-4ece-9aa7-4ed71b713e15"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 10:17:52 crc kubenswrapper[4838]: I1128 10:17:52.368483 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0a796e35-158d-43c9-a806-92dda81c78f2-kube-api-access-fmxxt" (OuterVolumeSpecName: "kube-api-access-fmxxt") pod "0a796e35-158d-43c9-a806-92dda81c78f2" (UID: "0a796e35-158d-43c9-a806-92dda81c78f2"). InnerVolumeSpecName "kube-api-access-fmxxt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:17:52 crc kubenswrapper[4838]: I1128 10:17:52.369208 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ed9862ca-36d5-4ece-9aa7-4ed71b713e15-kube-api-access-szgmx" (OuterVolumeSpecName: "kube-api-access-szgmx") pod "ed9862ca-36d5-4ece-9aa7-4ed71b713e15" (UID: "ed9862ca-36d5-4ece-9aa7-4ed71b713e15"). InnerVolumeSpecName "kube-api-access-szgmx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:17:52 crc kubenswrapper[4838]: I1128 10:17:52.466841 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-szgmx\" (UniqueName: \"kubernetes.io/projected/ed9862ca-36d5-4ece-9aa7-4ed71b713e15-kube-api-access-szgmx\") on node \"crc\" DevicePath \"\"" Nov 28 10:17:52 crc kubenswrapper[4838]: I1128 10:17:52.467218 4838 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ed9862ca-36d5-4ece-9aa7-4ed71b713e15-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 10:17:52 crc kubenswrapper[4838]: I1128 10:17:52.467240 4838 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0a796e35-158d-43c9-a806-92dda81c78f2-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 10:17:52 crc kubenswrapper[4838]: I1128 10:17:52.467258 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fmxxt\" (UniqueName: \"kubernetes.io/projected/0a796e35-158d-43c9-a806-92dda81c78f2-kube-api-access-fmxxt\") on node \"crc\" DevicePath \"\"" Nov 28 10:17:52 crc kubenswrapper[4838]: I1128 10:17:52.655113 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 28 10:17:52 crc kubenswrapper[4838]: I1128 10:17:52.777783 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-5rgrd" Nov 28 10:17:52 crc kubenswrapper[4838]: I1128 10:17:52.777845 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-5rgrd" event={"ID":"0a796e35-158d-43c9-a806-92dda81c78f2","Type":"ContainerDied","Data":"50d8d1877e728f8d06ae4204dfa01f7f3fafda04671348669785475bb9139f9d"} Nov 28 10:17:52 crc kubenswrapper[4838]: I1128 10:17:52.777913 4838 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="50d8d1877e728f8d06ae4204dfa01f7f3fafda04671348669785475bb9139f9d" Nov 28 10:17:52 crc kubenswrapper[4838]: I1128 10:17:52.781162 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-h4wpj" event={"ID":"ed9862ca-36d5-4ece-9aa7-4ed71b713e15","Type":"ContainerDied","Data":"24f56dd2617aeab0001876d5b1e7292fcfe34b1171a023507b05323d844449f4"} Nov 28 10:17:52 crc kubenswrapper[4838]: I1128 10:17:52.781187 4838 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="24f56dd2617aeab0001876d5b1e7292fcfe34b1171a023507b05323d844449f4" Nov 28 10:17:52 crc kubenswrapper[4838]: I1128 10:17:52.781227 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-h4wpj" Nov 28 10:17:52 crc kubenswrapper[4838]: I1128 10:17:52.783150 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"4ea6218f-637c-472d-83b3-1059c0d7e1f7","Type":"ContainerStarted","Data":"c9e20ccf58896c1d8350c42cb2e29d562d1295cc11d34b307e37a5e8889773dd"} Nov 28 10:17:53 crc kubenswrapper[4838]: I1128 10:17:53.189238 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-1abe-account-create-update-74trh" Nov 28 10:17:53 crc kubenswrapper[4838]: I1128 10:17:53.284488 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bqw5x\" (UniqueName: \"kubernetes.io/projected/2d048538-8370-4422-89b8-1f4733ae72b1-kube-api-access-bqw5x\") pod \"2d048538-8370-4422-89b8-1f4733ae72b1\" (UID: \"2d048538-8370-4422-89b8-1f4733ae72b1\") " Nov 28 10:17:53 crc kubenswrapper[4838]: I1128 10:17:53.284633 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2d048538-8370-4422-89b8-1f4733ae72b1-operator-scripts\") pod \"2d048538-8370-4422-89b8-1f4733ae72b1\" (UID: \"2d048538-8370-4422-89b8-1f4733ae72b1\") " Nov 28 10:17:53 crc kubenswrapper[4838]: I1128 10:17:53.285937 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2d048538-8370-4422-89b8-1f4733ae72b1-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "2d048538-8370-4422-89b8-1f4733ae72b1" (UID: "2d048538-8370-4422-89b8-1f4733ae72b1"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 10:17:53 crc kubenswrapper[4838]: I1128 10:17:53.292799 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2d048538-8370-4422-89b8-1f4733ae72b1-kube-api-access-bqw5x" (OuterVolumeSpecName: "kube-api-access-bqw5x") pod "2d048538-8370-4422-89b8-1f4733ae72b1" (UID: "2d048538-8370-4422-89b8-1f4733ae72b1"). InnerVolumeSpecName "kube-api-access-bqw5x". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:17:53 crc kubenswrapper[4838]: I1128 10:17:53.391790 4838 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2d048538-8370-4422-89b8-1f4733ae72b1-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 10:17:53 crc kubenswrapper[4838]: I1128 10:17:53.391835 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bqw5x\" (UniqueName: \"kubernetes.io/projected/2d048538-8370-4422-89b8-1f4733ae72b1-kube-api-access-bqw5x\") on node \"crc\" DevicePath \"\"" Nov 28 10:17:53 crc kubenswrapper[4838]: I1128 10:17:53.395035 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-d90b-account-create-update-lklt7" Nov 28 10:17:53 crc kubenswrapper[4838]: I1128 10:17:53.404185 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-r7rf2" Nov 28 10:17:53 crc kubenswrapper[4838]: I1128 10:17:53.411151 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-cfc5-account-create-update-cfjq4" Nov 28 10:17:53 crc kubenswrapper[4838]: I1128 10:17:53.494184 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3af8b444-f619-4a2c-bfcc-1dbb7966eb62-operator-scripts\") pod \"3af8b444-f619-4a2c-bfcc-1dbb7966eb62\" (UID: \"3af8b444-f619-4a2c-bfcc-1dbb7966eb62\") " Nov 28 10:17:53 crc kubenswrapper[4838]: I1128 10:17:53.494277 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2c180cf9-7d38-4e43-9723-1fa20242ff56-operator-scripts\") pod \"2c180cf9-7d38-4e43-9723-1fa20242ff56\" (UID: \"2c180cf9-7d38-4e43-9723-1fa20242ff56\") " Nov 28 10:17:53 crc kubenswrapper[4838]: I1128 10:17:53.494329 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6f84685f-a9f2-45b4-af58-fb0218060369-operator-scripts\") pod \"6f84685f-a9f2-45b4-af58-fb0218060369\" (UID: \"6f84685f-a9f2-45b4-af58-fb0218060369\") " Nov 28 10:17:53 crc kubenswrapper[4838]: I1128 10:17:53.494413 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qs76r\" (UniqueName: \"kubernetes.io/projected/3af8b444-f619-4a2c-bfcc-1dbb7966eb62-kube-api-access-qs76r\") pod \"3af8b444-f619-4a2c-bfcc-1dbb7966eb62\" (UID: \"3af8b444-f619-4a2c-bfcc-1dbb7966eb62\") " Nov 28 10:17:53 crc kubenswrapper[4838]: I1128 10:17:53.494450 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cpftl\" (UniqueName: \"kubernetes.io/projected/6f84685f-a9f2-45b4-af58-fb0218060369-kube-api-access-cpftl\") pod \"6f84685f-a9f2-45b4-af58-fb0218060369\" (UID: \"6f84685f-a9f2-45b4-af58-fb0218060369\") " Nov 28 10:17:53 crc kubenswrapper[4838]: I1128 10:17:53.494476 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z562h\" (UniqueName: \"kubernetes.io/projected/2c180cf9-7d38-4e43-9723-1fa20242ff56-kube-api-access-z562h\") pod \"2c180cf9-7d38-4e43-9723-1fa20242ff56\" (UID: \"2c180cf9-7d38-4e43-9723-1fa20242ff56\") " Nov 28 10:17:53 crc kubenswrapper[4838]: I1128 10:17:53.495371 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6f84685f-a9f2-45b4-af58-fb0218060369-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "6f84685f-a9f2-45b4-af58-fb0218060369" (UID: "6f84685f-a9f2-45b4-af58-fb0218060369"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 10:17:53 crc kubenswrapper[4838]: I1128 10:17:53.495953 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2c180cf9-7d38-4e43-9723-1fa20242ff56-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "2c180cf9-7d38-4e43-9723-1fa20242ff56" (UID: "2c180cf9-7d38-4e43-9723-1fa20242ff56"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 10:17:53 crc kubenswrapper[4838]: I1128 10:17:53.496956 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3af8b444-f619-4a2c-bfcc-1dbb7966eb62-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "3af8b444-f619-4a2c-bfcc-1dbb7966eb62" (UID: "3af8b444-f619-4a2c-bfcc-1dbb7966eb62"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 10:17:53 crc kubenswrapper[4838]: I1128 10:17:53.500947 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2c180cf9-7d38-4e43-9723-1fa20242ff56-kube-api-access-z562h" (OuterVolumeSpecName: "kube-api-access-z562h") pod "2c180cf9-7d38-4e43-9723-1fa20242ff56" (UID: "2c180cf9-7d38-4e43-9723-1fa20242ff56"). InnerVolumeSpecName "kube-api-access-z562h". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:17:53 crc kubenswrapper[4838]: I1128 10:17:53.505288 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3af8b444-f619-4a2c-bfcc-1dbb7966eb62-kube-api-access-qs76r" (OuterVolumeSpecName: "kube-api-access-qs76r") pod "3af8b444-f619-4a2c-bfcc-1dbb7966eb62" (UID: "3af8b444-f619-4a2c-bfcc-1dbb7966eb62"). InnerVolumeSpecName "kube-api-access-qs76r". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:17:53 crc kubenswrapper[4838]: I1128 10:17:53.506014 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6f84685f-a9f2-45b4-af58-fb0218060369-kube-api-access-cpftl" (OuterVolumeSpecName: "kube-api-access-cpftl") pod "6f84685f-a9f2-45b4-af58-fb0218060369" (UID: "6f84685f-a9f2-45b4-af58-fb0218060369"). InnerVolumeSpecName "kube-api-access-cpftl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:17:53 crc kubenswrapper[4838]: I1128 10:17:53.596397 4838 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6f84685f-a9f2-45b4-af58-fb0218060369-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 10:17:53 crc kubenswrapper[4838]: I1128 10:17:53.596435 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qs76r\" (UniqueName: \"kubernetes.io/projected/3af8b444-f619-4a2c-bfcc-1dbb7966eb62-kube-api-access-qs76r\") on node \"crc\" DevicePath \"\"" Nov 28 10:17:53 crc kubenswrapper[4838]: I1128 10:17:53.596447 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cpftl\" (UniqueName: \"kubernetes.io/projected/6f84685f-a9f2-45b4-af58-fb0218060369-kube-api-access-cpftl\") on node \"crc\" DevicePath \"\"" Nov 28 10:17:53 crc kubenswrapper[4838]: I1128 10:17:53.596456 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z562h\" (UniqueName: \"kubernetes.io/projected/2c180cf9-7d38-4e43-9723-1fa20242ff56-kube-api-access-z562h\") on node \"crc\" DevicePath \"\"" Nov 28 10:17:53 crc kubenswrapper[4838]: I1128 10:17:53.596464 4838 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3af8b444-f619-4a2c-bfcc-1dbb7966eb62-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 10:17:53 crc kubenswrapper[4838]: I1128 10:17:53.596474 4838 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2c180cf9-7d38-4e43-9723-1fa20242ff56-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 10:17:53 crc kubenswrapper[4838]: I1128 10:17:53.801461 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-cfc5-account-create-update-cfjq4" event={"ID":"6f84685f-a9f2-45b4-af58-fb0218060369","Type":"ContainerDied","Data":"399003d531d05ccae7bd17753b9980d95cbee13c15c33703bc2cebe8d5470f8a"} Nov 28 10:17:53 crc kubenswrapper[4838]: I1128 10:17:53.801523 4838 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="399003d531d05ccae7bd17753b9980d95cbee13c15c33703bc2cebe8d5470f8a" Nov 28 10:17:53 crc kubenswrapper[4838]: I1128 10:17:53.801483 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-cfc5-account-create-update-cfjq4" Nov 28 10:17:53 crc kubenswrapper[4838]: I1128 10:17:53.804073 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-d90b-account-create-update-lklt7" event={"ID":"3af8b444-f619-4a2c-bfcc-1dbb7966eb62","Type":"ContainerDied","Data":"7a10b5ffdf2832379e1cc987ec1f7832db075aa3437096af6a54a9b776b3f9e8"} Nov 28 10:17:53 crc kubenswrapper[4838]: I1128 10:17:53.804102 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-d90b-account-create-update-lklt7" Nov 28 10:17:53 crc kubenswrapper[4838]: I1128 10:17:53.804114 4838 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7a10b5ffdf2832379e1cc987ec1f7832db075aa3437096af6a54a9b776b3f9e8" Nov 28 10:17:53 crc kubenswrapper[4838]: I1128 10:17:53.805451 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-1abe-account-create-update-74trh" event={"ID":"2d048538-8370-4422-89b8-1f4733ae72b1","Type":"ContainerDied","Data":"2c5557df43ef8d54500c182a96a288588f90492c82ec7ea7d784017b1400d4ce"} Nov 28 10:17:53 crc kubenswrapper[4838]: I1128 10:17:53.805489 4838 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2c5557df43ef8d54500c182a96a288588f90492c82ec7ea7d784017b1400d4ce" Nov 28 10:17:53 crc kubenswrapper[4838]: I1128 10:17:53.805499 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-1abe-account-create-update-74trh" Nov 28 10:17:53 crc kubenswrapper[4838]: I1128 10:17:53.807040 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-r7rf2" event={"ID":"2c180cf9-7d38-4e43-9723-1fa20242ff56","Type":"ContainerDied","Data":"1636aef1e87bac9c1e43d7d97099656ea82aa780dcf697672255591d1c187bfb"} Nov 28 10:17:53 crc kubenswrapper[4838]: I1128 10:17:53.807080 4838 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1636aef1e87bac9c1e43d7d97099656ea82aa780dcf697672255591d1c187bfb" Nov 28 10:17:53 crc kubenswrapper[4838]: I1128 10:17:53.807138 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-r7rf2" Nov 28 10:17:53 crc kubenswrapper[4838]: I1128 10:17:53.823184 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"4ea6218f-637c-472d-83b3-1059c0d7e1f7","Type":"ContainerStarted","Data":"8983f42d585066f0f41ab8dc5e2f2bb4360e19c9682b5231754c22dcdfe35881"} Nov 28 10:17:53 crc kubenswrapper[4838]: I1128 10:17:53.939867 4838 patch_prober.go:28] interesting pod/machine-config-daemon-5dxdd container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 10:17:53 crc kubenswrapper[4838]: I1128 10:17:53.939967 4838 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 10:17:55 crc kubenswrapper[4838]: I1128 10:17:55.840201 4838 generic.go:334] "Generic (PLEG): container finished" podID="f189d761-b514-47b6-8d4f-6111e883e2b4" containerID="db7e3e258c1bc1f060a47489f3d80a15c7d89125acea1610151e4d65ccd67c02" exitCode=137 Nov 28 10:17:55 crc kubenswrapper[4838]: I1128 10:17:55.840237 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"f189d761-b514-47b6-8d4f-6111e883e2b4","Type":"ContainerDied","Data":"db7e3e258c1bc1f060a47489f3d80a15c7d89125acea1610151e4d65ccd67c02"} Nov 28 10:17:56 crc kubenswrapper[4838]: I1128 10:17:56.499507 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 28 10:17:56 crc kubenswrapper[4838]: I1128 10:17:56.652709 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-c6lcb\" (UniqueName: \"kubernetes.io/projected/f189d761-b514-47b6-8d4f-6111e883e2b4-kube-api-access-c6lcb\") pod \"f189d761-b514-47b6-8d4f-6111e883e2b4\" (UID: \"f189d761-b514-47b6-8d4f-6111e883e2b4\") " Nov 28 10:17:56 crc kubenswrapper[4838]: I1128 10:17:56.653114 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f189d761-b514-47b6-8d4f-6111e883e2b4-config-data-custom\") pod \"f189d761-b514-47b6-8d4f-6111e883e2b4\" (UID: \"f189d761-b514-47b6-8d4f-6111e883e2b4\") " Nov 28 10:17:56 crc kubenswrapper[4838]: I1128 10:17:56.653654 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f189d761-b514-47b6-8d4f-6111e883e2b4-scripts\") pod \"f189d761-b514-47b6-8d4f-6111e883e2b4\" (UID: \"f189d761-b514-47b6-8d4f-6111e883e2b4\") " Nov 28 10:17:56 crc kubenswrapper[4838]: I1128 10:17:56.653727 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f189d761-b514-47b6-8d4f-6111e883e2b4-combined-ca-bundle\") pod \"f189d761-b514-47b6-8d4f-6111e883e2b4\" (UID: \"f189d761-b514-47b6-8d4f-6111e883e2b4\") " Nov 28 10:17:56 crc kubenswrapper[4838]: I1128 10:17:56.653750 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/f189d761-b514-47b6-8d4f-6111e883e2b4-etc-machine-id\") pod \"f189d761-b514-47b6-8d4f-6111e883e2b4\" (UID: \"f189d761-b514-47b6-8d4f-6111e883e2b4\") " Nov 28 10:17:56 crc kubenswrapper[4838]: I1128 10:17:56.653810 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f189d761-b514-47b6-8d4f-6111e883e2b4-logs\") pod \"f189d761-b514-47b6-8d4f-6111e883e2b4\" (UID: \"f189d761-b514-47b6-8d4f-6111e883e2b4\") " Nov 28 10:17:56 crc kubenswrapper[4838]: I1128 10:17:56.653845 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f189d761-b514-47b6-8d4f-6111e883e2b4-config-data\") pod \"f189d761-b514-47b6-8d4f-6111e883e2b4\" (UID: \"f189d761-b514-47b6-8d4f-6111e883e2b4\") " Nov 28 10:17:56 crc kubenswrapper[4838]: I1128 10:17:56.655039 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f189d761-b514-47b6-8d4f-6111e883e2b4-logs" (OuterVolumeSpecName: "logs") pod "f189d761-b514-47b6-8d4f-6111e883e2b4" (UID: "f189d761-b514-47b6-8d4f-6111e883e2b4"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 10:17:56 crc kubenswrapper[4838]: I1128 10:17:56.655084 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f189d761-b514-47b6-8d4f-6111e883e2b4-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "f189d761-b514-47b6-8d4f-6111e883e2b4" (UID: "f189d761-b514-47b6-8d4f-6111e883e2b4"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 10:17:56 crc kubenswrapper[4838]: I1128 10:17:56.658865 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f189d761-b514-47b6-8d4f-6111e883e2b4-kube-api-access-c6lcb" (OuterVolumeSpecName: "kube-api-access-c6lcb") pod "f189d761-b514-47b6-8d4f-6111e883e2b4" (UID: "f189d761-b514-47b6-8d4f-6111e883e2b4"). InnerVolumeSpecName "kube-api-access-c6lcb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:17:56 crc kubenswrapper[4838]: I1128 10:17:56.660421 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f189d761-b514-47b6-8d4f-6111e883e2b4-scripts" (OuterVolumeSpecName: "scripts") pod "f189d761-b514-47b6-8d4f-6111e883e2b4" (UID: "f189d761-b514-47b6-8d4f-6111e883e2b4"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:17:56 crc kubenswrapper[4838]: I1128 10:17:56.660825 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f189d761-b514-47b6-8d4f-6111e883e2b4-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "f189d761-b514-47b6-8d4f-6111e883e2b4" (UID: "f189d761-b514-47b6-8d4f-6111e883e2b4"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:17:56 crc kubenswrapper[4838]: I1128 10:17:56.681341 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f189d761-b514-47b6-8d4f-6111e883e2b4-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "f189d761-b514-47b6-8d4f-6111e883e2b4" (UID: "f189d761-b514-47b6-8d4f-6111e883e2b4"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:17:56 crc kubenswrapper[4838]: I1128 10:17:56.723268 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f189d761-b514-47b6-8d4f-6111e883e2b4-config-data" (OuterVolumeSpecName: "config-data") pod "f189d761-b514-47b6-8d4f-6111e883e2b4" (UID: "f189d761-b514-47b6-8d4f-6111e883e2b4"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:17:56 crc kubenswrapper[4838]: I1128 10:17:56.755577 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-c6lcb\" (UniqueName: \"kubernetes.io/projected/f189d761-b514-47b6-8d4f-6111e883e2b4-kube-api-access-c6lcb\") on node \"crc\" DevicePath \"\"" Nov 28 10:17:56 crc kubenswrapper[4838]: I1128 10:17:56.755806 4838 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f189d761-b514-47b6-8d4f-6111e883e2b4-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 28 10:17:56 crc kubenswrapper[4838]: I1128 10:17:56.755885 4838 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f189d761-b514-47b6-8d4f-6111e883e2b4-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 10:17:56 crc kubenswrapper[4838]: I1128 10:17:56.755950 4838 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f189d761-b514-47b6-8d4f-6111e883e2b4-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 10:17:56 crc kubenswrapper[4838]: I1128 10:17:56.756044 4838 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/f189d761-b514-47b6-8d4f-6111e883e2b4-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 28 10:17:56 crc kubenswrapper[4838]: I1128 10:17:56.756108 4838 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f189d761-b514-47b6-8d4f-6111e883e2b4-logs\") on node \"crc\" DevicePath \"\"" Nov 28 10:17:56 crc kubenswrapper[4838]: I1128 10:17:56.756171 4838 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f189d761-b514-47b6-8d4f-6111e883e2b4-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 10:17:56 crc kubenswrapper[4838]: I1128 10:17:56.866609 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"4ea6218f-637c-472d-83b3-1059c0d7e1f7","Type":"ContainerStarted","Data":"af29677a91e5aaf2520a86b35decb0d3be0a8cb0ac90fa515d71544475dabbbe"} Nov 28 10:17:56 crc kubenswrapper[4838]: I1128 10:17:56.870901 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"f189d761-b514-47b6-8d4f-6111e883e2b4","Type":"ContainerDied","Data":"8480e56fc6f25f1fe2548dd39c0a971ee81f97918f5586f3037042a1f1aa6915"} Nov 28 10:17:56 crc kubenswrapper[4838]: I1128 10:17:56.870949 4838 scope.go:117] "RemoveContainer" containerID="db7e3e258c1bc1f060a47489f3d80a15c7d89125acea1610151e4d65ccd67c02" Nov 28 10:17:56 crc kubenswrapper[4838]: I1128 10:17:56.871143 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 28 10:17:56 crc kubenswrapper[4838]: I1128 10:17:56.893195 4838 scope.go:117] "RemoveContainer" containerID="b7b4742e4b6dcaf1915f7ea15b57a3f45776c52712002f2e8068241d3e40c491" Nov 28 10:17:56 crc kubenswrapper[4838]: I1128 10:17:56.922326 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Nov 28 10:17:56 crc kubenswrapper[4838]: I1128 10:17:56.932939 4838 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-api-0"] Nov 28 10:17:56 crc kubenswrapper[4838]: I1128 10:17:56.943420 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Nov 28 10:17:56 crc kubenswrapper[4838]: E1128 10:17:56.943980 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f189d761-b514-47b6-8d4f-6111e883e2b4" containerName="cinder-api" Nov 28 10:17:56 crc kubenswrapper[4838]: I1128 10:17:56.944003 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="f189d761-b514-47b6-8d4f-6111e883e2b4" containerName="cinder-api" Nov 28 10:17:56 crc kubenswrapper[4838]: E1128 10:17:56.944022 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f189d761-b514-47b6-8d4f-6111e883e2b4" containerName="cinder-api-log" Nov 28 10:17:56 crc kubenswrapper[4838]: I1128 10:17:56.944029 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="f189d761-b514-47b6-8d4f-6111e883e2b4" containerName="cinder-api-log" Nov 28 10:17:56 crc kubenswrapper[4838]: E1128 10:17:56.944041 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6f84685f-a9f2-45b4-af58-fb0218060369" containerName="mariadb-account-create-update" Nov 28 10:17:56 crc kubenswrapper[4838]: I1128 10:17:56.944049 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="6f84685f-a9f2-45b4-af58-fb0218060369" containerName="mariadb-account-create-update" Nov 28 10:17:56 crc kubenswrapper[4838]: E1128 10:17:56.944071 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3af8b444-f619-4a2c-bfcc-1dbb7966eb62" containerName="mariadb-account-create-update" Nov 28 10:17:56 crc kubenswrapper[4838]: I1128 10:17:56.944079 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="3af8b444-f619-4a2c-bfcc-1dbb7966eb62" containerName="mariadb-account-create-update" Nov 28 10:17:56 crc kubenswrapper[4838]: E1128 10:17:56.944097 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0a796e35-158d-43c9-a806-92dda81c78f2" containerName="mariadb-database-create" Nov 28 10:17:56 crc kubenswrapper[4838]: I1128 10:17:56.944104 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="0a796e35-158d-43c9-a806-92dda81c78f2" containerName="mariadb-database-create" Nov 28 10:17:56 crc kubenswrapper[4838]: E1128 10:17:56.944115 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2c180cf9-7d38-4e43-9723-1fa20242ff56" containerName="mariadb-database-create" Nov 28 10:17:56 crc kubenswrapper[4838]: I1128 10:17:56.944123 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="2c180cf9-7d38-4e43-9723-1fa20242ff56" containerName="mariadb-database-create" Nov 28 10:17:56 crc kubenswrapper[4838]: E1128 10:17:56.944133 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ed9862ca-36d5-4ece-9aa7-4ed71b713e15" containerName="mariadb-database-create" Nov 28 10:17:56 crc kubenswrapper[4838]: I1128 10:17:56.944141 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="ed9862ca-36d5-4ece-9aa7-4ed71b713e15" containerName="mariadb-database-create" Nov 28 10:17:56 crc kubenswrapper[4838]: E1128 10:17:56.944157 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2d048538-8370-4422-89b8-1f4733ae72b1" containerName="mariadb-account-create-update" Nov 28 10:17:56 crc kubenswrapper[4838]: I1128 10:17:56.944175 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="2d048538-8370-4422-89b8-1f4733ae72b1" containerName="mariadb-account-create-update" Nov 28 10:17:56 crc kubenswrapper[4838]: I1128 10:17:56.944396 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="6f84685f-a9f2-45b4-af58-fb0218060369" containerName="mariadb-account-create-update" Nov 28 10:17:56 crc kubenswrapper[4838]: I1128 10:17:56.944421 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="f189d761-b514-47b6-8d4f-6111e883e2b4" containerName="cinder-api" Nov 28 10:17:56 crc kubenswrapper[4838]: I1128 10:17:56.944436 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="2d048538-8370-4422-89b8-1f4733ae72b1" containerName="mariadb-account-create-update" Nov 28 10:17:56 crc kubenswrapper[4838]: I1128 10:17:56.944446 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="2c180cf9-7d38-4e43-9723-1fa20242ff56" containerName="mariadb-database-create" Nov 28 10:17:56 crc kubenswrapper[4838]: I1128 10:17:56.944460 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="ed9862ca-36d5-4ece-9aa7-4ed71b713e15" containerName="mariadb-database-create" Nov 28 10:17:56 crc kubenswrapper[4838]: I1128 10:17:56.944474 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="f189d761-b514-47b6-8d4f-6111e883e2b4" containerName="cinder-api-log" Nov 28 10:17:56 crc kubenswrapper[4838]: I1128 10:17:56.944491 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="0a796e35-158d-43c9-a806-92dda81c78f2" containerName="mariadb-database-create" Nov 28 10:17:56 crc kubenswrapper[4838]: I1128 10:17:56.944500 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="3af8b444-f619-4a2c-bfcc-1dbb7966eb62" containerName="mariadb-account-create-update" Nov 28 10:17:56 crc kubenswrapper[4838]: I1128 10:17:56.945733 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 28 10:17:56 crc kubenswrapper[4838]: I1128 10:17:56.948610 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-internal-svc" Nov 28 10:17:56 crc kubenswrapper[4838]: I1128 10:17:56.951327 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 28 10:17:56 crc kubenswrapper[4838]: I1128 10:17:56.951931 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Nov 28 10:17:56 crc kubenswrapper[4838]: I1128 10:17:56.952059 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-public-svc" Nov 28 10:17:57 crc kubenswrapper[4838]: I1128 10:17:57.066059 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rctz9\" (UniqueName: \"kubernetes.io/projected/e94e5f12-61ab-40e8-97ce-dc6f3c706583-kube-api-access-rctz9\") pod \"cinder-api-0\" (UID: \"e94e5f12-61ab-40e8-97ce-dc6f3c706583\") " pod="openstack/cinder-api-0" Nov 28 10:17:57 crc kubenswrapper[4838]: I1128 10:17:57.066162 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e94e5f12-61ab-40e8-97ce-dc6f3c706583-logs\") pod \"cinder-api-0\" (UID: \"e94e5f12-61ab-40e8-97ce-dc6f3c706583\") " pod="openstack/cinder-api-0" Nov 28 10:17:57 crc kubenswrapper[4838]: I1128 10:17:57.066210 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/e94e5f12-61ab-40e8-97ce-dc6f3c706583-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"e94e5f12-61ab-40e8-97ce-dc6f3c706583\") " pod="openstack/cinder-api-0" Nov 28 10:17:57 crc kubenswrapper[4838]: I1128 10:17:57.066242 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/e94e5f12-61ab-40e8-97ce-dc6f3c706583-config-data-custom\") pod \"cinder-api-0\" (UID: \"e94e5f12-61ab-40e8-97ce-dc6f3c706583\") " pod="openstack/cinder-api-0" Nov 28 10:17:57 crc kubenswrapper[4838]: I1128 10:17:57.066269 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e94e5f12-61ab-40e8-97ce-dc6f3c706583-config-data\") pod \"cinder-api-0\" (UID: \"e94e5f12-61ab-40e8-97ce-dc6f3c706583\") " pod="openstack/cinder-api-0" Nov 28 10:17:57 crc kubenswrapper[4838]: I1128 10:17:57.066312 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/e94e5f12-61ab-40e8-97ce-dc6f3c706583-etc-machine-id\") pod \"cinder-api-0\" (UID: \"e94e5f12-61ab-40e8-97ce-dc6f3c706583\") " pod="openstack/cinder-api-0" Nov 28 10:17:57 crc kubenswrapper[4838]: I1128 10:17:57.066374 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/e94e5f12-61ab-40e8-97ce-dc6f3c706583-public-tls-certs\") pod \"cinder-api-0\" (UID: \"e94e5f12-61ab-40e8-97ce-dc6f3c706583\") " pod="openstack/cinder-api-0" Nov 28 10:17:57 crc kubenswrapper[4838]: I1128 10:17:57.066521 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e94e5f12-61ab-40e8-97ce-dc6f3c706583-scripts\") pod \"cinder-api-0\" (UID: \"e94e5f12-61ab-40e8-97ce-dc6f3c706583\") " pod="openstack/cinder-api-0" Nov 28 10:17:57 crc kubenswrapper[4838]: I1128 10:17:57.066579 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e94e5f12-61ab-40e8-97ce-dc6f3c706583-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"e94e5f12-61ab-40e8-97ce-dc6f3c706583\") " pod="openstack/cinder-api-0" Nov 28 10:17:57 crc kubenswrapper[4838]: I1128 10:17:57.168275 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e94e5f12-61ab-40e8-97ce-dc6f3c706583-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"e94e5f12-61ab-40e8-97ce-dc6f3c706583\") " pod="openstack/cinder-api-0" Nov 28 10:17:57 crc kubenswrapper[4838]: I1128 10:17:57.168391 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rctz9\" (UniqueName: \"kubernetes.io/projected/e94e5f12-61ab-40e8-97ce-dc6f3c706583-kube-api-access-rctz9\") pod \"cinder-api-0\" (UID: \"e94e5f12-61ab-40e8-97ce-dc6f3c706583\") " pod="openstack/cinder-api-0" Nov 28 10:17:57 crc kubenswrapper[4838]: I1128 10:17:57.168477 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e94e5f12-61ab-40e8-97ce-dc6f3c706583-logs\") pod \"cinder-api-0\" (UID: \"e94e5f12-61ab-40e8-97ce-dc6f3c706583\") " pod="openstack/cinder-api-0" Nov 28 10:17:57 crc kubenswrapper[4838]: I1128 10:17:57.168516 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/e94e5f12-61ab-40e8-97ce-dc6f3c706583-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"e94e5f12-61ab-40e8-97ce-dc6f3c706583\") " pod="openstack/cinder-api-0" Nov 28 10:17:57 crc kubenswrapper[4838]: I1128 10:17:57.168556 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/e94e5f12-61ab-40e8-97ce-dc6f3c706583-config-data-custom\") pod \"cinder-api-0\" (UID: \"e94e5f12-61ab-40e8-97ce-dc6f3c706583\") " pod="openstack/cinder-api-0" Nov 28 10:17:57 crc kubenswrapper[4838]: I1128 10:17:57.168587 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e94e5f12-61ab-40e8-97ce-dc6f3c706583-config-data\") pod \"cinder-api-0\" (UID: \"e94e5f12-61ab-40e8-97ce-dc6f3c706583\") " pod="openstack/cinder-api-0" Nov 28 10:17:57 crc kubenswrapper[4838]: I1128 10:17:57.168641 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/e94e5f12-61ab-40e8-97ce-dc6f3c706583-etc-machine-id\") pod \"cinder-api-0\" (UID: \"e94e5f12-61ab-40e8-97ce-dc6f3c706583\") " pod="openstack/cinder-api-0" Nov 28 10:17:57 crc kubenswrapper[4838]: I1128 10:17:57.168738 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/e94e5f12-61ab-40e8-97ce-dc6f3c706583-public-tls-certs\") pod \"cinder-api-0\" (UID: \"e94e5f12-61ab-40e8-97ce-dc6f3c706583\") " pod="openstack/cinder-api-0" Nov 28 10:17:57 crc kubenswrapper[4838]: I1128 10:17:57.168794 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e94e5f12-61ab-40e8-97ce-dc6f3c706583-scripts\") pod \"cinder-api-0\" (UID: \"e94e5f12-61ab-40e8-97ce-dc6f3c706583\") " pod="openstack/cinder-api-0" Nov 28 10:17:57 crc kubenswrapper[4838]: I1128 10:17:57.169181 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/e94e5f12-61ab-40e8-97ce-dc6f3c706583-etc-machine-id\") pod \"cinder-api-0\" (UID: \"e94e5f12-61ab-40e8-97ce-dc6f3c706583\") " pod="openstack/cinder-api-0" Nov 28 10:17:57 crc kubenswrapper[4838]: I1128 10:17:57.169639 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e94e5f12-61ab-40e8-97ce-dc6f3c706583-logs\") pod \"cinder-api-0\" (UID: \"e94e5f12-61ab-40e8-97ce-dc6f3c706583\") " pod="openstack/cinder-api-0" Nov 28 10:17:57 crc kubenswrapper[4838]: I1128 10:17:57.173606 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/e94e5f12-61ab-40e8-97ce-dc6f3c706583-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"e94e5f12-61ab-40e8-97ce-dc6f3c706583\") " pod="openstack/cinder-api-0" Nov 28 10:17:57 crc kubenswrapper[4838]: I1128 10:17:57.173659 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e94e5f12-61ab-40e8-97ce-dc6f3c706583-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"e94e5f12-61ab-40e8-97ce-dc6f3c706583\") " pod="openstack/cinder-api-0" Nov 28 10:17:57 crc kubenswrapper[4838]: I1128 10:17:57.174022 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e94e5f12-61ab-40e8-97ce-dc6f3c706583-scripts\") pod \"cinder-api-0\" (UID: \"e94e5f12-61ab-40e8-97ce-dc6f3c706583\") " pod="openstack/cinder-api-0" Nov 28 10:17:57 crc kubenswrapper[4838]: I1128 10:17:57.174613 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/e94e5f12-61ab-40e8-97ce-dc6f3c706583-config-data-custom\") pod \"cinder-api-0\" (UID: \"e94e5f12-61ab-40e8-97ce-dc6f3c706583\") " pod="openstack/cinder-api-0" Nov 28 10:17:57 crc kubenswrapper[4838]: I1128 10:17:57.175109 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/e94e5f12-61ab-40e8-97ce-dc6f3c706583-public-tls-certs\") pod \"cinder-api-0\" (UID: \"e94e5f12-61ab-40e8-97ce-dc6f3c706583\") " pod="openstack/cinder-api-0" Nov 28 10:17:57 crc kubenswrapper[4838]: I1128 10:17:57.183844 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e94e5f12-61ab-40e8-97ce-dc6f3c706583-config-data\") pod \"cinder-api-0\" (UID: \"e94e5f12-61ab-40e8-97ce-dc6f3c706583\") " pod="openstack/cinder-api-0" Nov 28 10:17:57 crc kubenswrapper[4838]: I1128 10:17:57.187768 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rctz9\" (UniqueName: \"kubernetes.io/projected/e94e5f12-61ab-40e8-97ce-dc6f3c706583-kube-api-access-rctz9\") pod \"cinder-api-0\" (UID: \"e94e5f12-61ab-40e8-97ce-dc6f3c706583\") " pod="openstack/cinder-api-0" Nov 28 10:17:57 crc kubenswrapper[4838]: I1128 10:17:57.294035 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 28 10:17:57 crc kubenswrapper[4838]: I1128 10:17:57.809419 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 28 10:17:57 crc kubenswrapper[4838]: W1128 10:17:57.816223 4838 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode94e5f12_61ab_40e8_97ce_dc6f3c706583.slice/crio-be3248990a15ec38ce90d490402b0512ba21cb1d4f856830c2502ffad27d7c4b WatchSource:0}: Error finding container be3248990a15ec38ce90d490402b0512ba21cb1d4f856830c2502ffad27d7c4b: Status 404 returned error can't find the container with id be3248990a15ec38ce90d490402b0512ba21cb1d4f856830c2502ffad27d7c4b Nov 28 10:17:57 crc kubenswrapper[4838]: I1128 10:17:57.898163 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"4ea6218f-637c-472d-83b3-1059c0d7e1f7","Type":"ContainerStarted","Data":"d355a598e4d1f8d4f00769d3b15fc5bc8a7814d8c6ae051c331b9057274655f3"} Nov 28 10:17:57 crc kubenswrapper[4838]: I1128 10:17:57.899129 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 28 10:17:57 crc kubenswrapper[4838]: I1128 10:17:57.898301 4838 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="4ea6218f-637c-472d-83b3-1059c0d7e1f7" containerName="sg-core" containerID="cri-o://af29677a91e5aaf2520a86b35decb0d3be0a8cb0ac90fa515d71544475dabbbe" gracePeriod=30 Nov 28 10:17:57 crc kubenswrapper[4838]: I1128 10:17:57.898327 4838 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="4ea6218f-637c-472d-83b3-1059c0d7e1f7" containerName="proxy-httpd" containerID="cri-o://d355a598e4d1f8d4f00769d3b15fc5bc8a7814d8c6ae051c331b9057274655f3" gracePeriod=30 Nov 28 10:17:57 crc kubenswrapper[4838]: I1128 10:17:57.898329 4838 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="4ea6218f-637c-472d-83b3-1059c0d7e1f7" containerName="ceilometer-notification-agent" containerID="cri-o://8983f42d585066f0f41ab8dc5e2f2bb4360e19c9682b5231754c22dcdfe35881" gracePeriod=30 Nov 28 10:17:57 crc kubenswrapper[4838]: I1128 10:17:57.898272 4838 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="4ea6218f-637c-472d-83b3-1059c0d7e1f7" containerName="ceilometer-central-agent" containerID="cri-o://c9e20ccf58896c1d8350c42cb2e29d562d1295cc11d34b307e37a5e8889773dd" gracePeriod=30 Nov 28 10:17:57 crc kubenswrapper[4838]: I1128 10:17:57.905350 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"e94e5f12-61ab-40e8-97ce-dc6f3c706583","Type":"ContainerStarted","Data":"be3248990a15ec38ce90d490402b0512ba21cb1d4f856830c2502ffad27d7c4b"} Nov 28 10:17:57 crc kubenswrapper[4838]: I1128 10:17:57.940072 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=1.8121494390000001 podStartE2EDuration="7.940055007s" podCreationTimestamp="2025-11-28 10:17:50 +0000 UTC" firstStartedPulling="2025-11-28 10:17:51.325341609 +0000 UTC m=+1243.024315819" lastFinishedPulling="2025-11-28 10:17:57.453247217 +0000 UTC m=+1249.152221387" observedRunningTime="2025-11-28 10:17:57.931177458 +0000 UTC m=+1249.630151628" watchObservedRunningTime="2025-11-28 10:17:57.940055007 +0000 UTC m=+1249.639029177" Nov 28 10:17:58 crc kubenswrapper[4838]: I1128 10:17:58.600014 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f189d761-b514-47b6-8d4f-6111e883e2b4" path="/var/lib/kubelet/pods/f189d761-b514-47b6-8d4f-6111e883e2b4/volumes" Nov 28 10:17:58 crc kubenswrapper[4838]: I1128 10:17:58.634878 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 10:17:58 crc kubenswrapper[4838]: I1128 10:17:58.801491 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4ea6218f-637c-472d-83b3-1059c0d7e1f7-log-httpd\") pod \"4ea6218f-637c-472d-83b3-1059c0d7e1f7\" (UID: \"4ea6218f-637c-472d-83b3-1059c0d7e1f7\") " Nov 28 10:17:58 crc kubenswrapper[4838]: I1128 10:17:58.801564 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4ea6218f-637c-472d-83b3-1059c0d7e1f7-combined-ca-bundle\") pod \"4ea6218f-637c-472d-83b3-1059c0d7e1f7\" (UID: \"4ea6218f-637c-472d-83b3-1059c0d7e1f7\") " Nov 28 10:17:58 crc kubenswrapper[4838]: I1128 10:17:58.801636 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4ea6218f-637c-472d-83b3-1059c0d7e1f7-config-data\") pod \"4ea6218f-637c-472d-83b3-1059c0d7e1f7\" (UID: \"4ea6218f-637c-472d-83b3-1059c0d7e1f7\") " Nov 28 10:17:58 crc kubenswrapper[4838]: I1128 10:17:58.801670 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/4ea6218f-637c-472d-83b3-1059c0d7e1f7-sg-core-conf-yaml\") pod \"4ea6218f-637c-472d-83b3-1059c0d7e1f7\" (UID: \"4ea6218f-637c-472d-83b3-1059c0d7e1f7\") " Nov 28 10:17:58 crc kubenswrapper[4838]: I1128 10:17:58.801687 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4ea6218f-637c-472d-83b3-1059c0d7e1f7-run-httpd\") pod \"4ea6218f-637c-472d-83b3-1059c0d7e1f7\" (UID: \"4ea6218f-637c-472d-83b3-1059c0d7e1f7\") " Nov 28 10:17:58 crc kubenswrapper[4838]: I1128 10:17:58.801775 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xgmsk\" (UniqueName: \"kubernetes.io/projected/4ea6218f-637c-472d-83b3-1059c0d7e1f7-kube-api-access-xgmsk\") pod \"4ea6218f-637c-472d-83b3-1059c0d7e1f7\" (UID: \"4ea6218f-637c-472d-83b3-1059c0d7e1f7\") " Nov 28 10:17:58 crc kubenswrapper[4838]: I1128 10:17:58.802170 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4ea6218f-637c-472d-83b3-1059c0d7e1f7-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "4ea6218f-637c-472d-83b3-1059c0d7e1f7" (UID: "4ea6218f-637c-472d-83b3-1059c0d7e1f7"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 10:17:58 crc kubenswrapper[4838]: I1128 10:17:58.802225 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4ea6218f-637c-472d-83b3-1059c0d7e1f7-scripts\") pod \"4ea6218f-637c-472d-83b3-1059c0d7e1f7\" (UID: \"4ea6218f-637c-472d-83b3-1059c0d7e1f7\") " Nov 28 10:17:58 crc kubenswrapper[4838]: I1128 10:17:58.802215 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4ea6218f-637c-472d-83b3-1059c0d7e1f7-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "4ea6218f-637c-472d-83b3-1059c0d7e1f7" (UID: "4ea6218f-637c-472d-83b3-1059c0d7e1f7"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 10:17:58 crc kubenswrapper[4838]: I1128 10:17:58.802574 4838 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4ea6218f-637c-472d-83b3-1059c0d7e1f7-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 28 10:17:58 crc kubenswrapper[4838]: I1128 10:17:58.802586 4838 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4ea6218f-637c-472d-83b3-1059c0d7e1f7-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 28 10:17:58 crc kubenswrapper[4838]: I1128 10:17:58.809331 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4ea6218f-637c-472d-83b3-1059c0d7e1f7-scripts" (OuterVolumeSpecName: "scripts") pod "4ea6218f-637c-472d-83b3-1059c0d7e1f7" (UID: "4ea6218f-637c-472d-83b3-1059c0d7e1f7"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:17:58 crc kubenswrapper[4838]: I1128 10:17:58.809817 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4ea6218f-637c-472d-83b3-1059c0d7e1f7-kube-api-access-xgmsk" (OuterVolumeSpecName: "kube-api-access-xgmsk") pod "4ea6218f-637c-472d-83b3-1059c0d7e1f7" (UID: "4ea6218f-637c-472d-83b3-1059c0d7e1f7"). InnerVolumeSpecName "kube-api-access-xgmsk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:17:58 crc kubenswrapper[4838]: I1128 10:17:58.842053 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4ea6218f-637c-472d-83b3-1059c0d7e1f7-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "4ea6218f-637c-472d-83b3-1059c0d7e1f7" (UID: "4ea6218f-637c-472d-83b3-1059c0d7e1f7"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:17:58 crc kubenswrapper[4838]: I1128 10:17:58.894204 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4ea6218f-637c-472d-83b3-1059c0d7e1f7-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "4ea6218f-637c-472d-83b3-1059c0d7e1f7" (UID: "4ea6218f-637c-472d-83b3-1059c0d7e1f7"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:17:58 crc kubenswrapper[4838]: I1128 10:17:58.904175 4838 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4ea6218f-637c-472d-83b3-1059c0d7e1f7-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 10:17:58 crc kubenswrapper[4838]: I1128 10:17:58.904211 4838 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/4ea6218f-637c-472d-83b3-1059c0d7e1f7-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 28 10:17:58 crc kubenswrapper[4838]: I1128 10:17:58.904224 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xgmsk\" (UniqueName: \"kubernetes.io/projected/4ea6218f-637c-472d-83b3-1059c0d7e1f7-kube-api-access-xgmsk\") on node \"crc\" DevicePath \"\"" Nov 28 10:17:58 crc kubenswrapper[4838]: I1128 10:17:58.904238 4838 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4ea6218f-637c-472d-83b3-1059c0d7e1f7-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 10:17:58 crc kubenswrapper[4838]: I1128 10:17:58.908427 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4ea6218f-637c-472d-83b3-1059c0d7e1f7-config-data" (OuterVolumeSpecName: "config-data") pod "4ea6218f-637c-472d-83b3-1059c0d7e1f7" (UID: "4ea6218f-637c-472d-83b3-1059c0d7e1f7"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:17:58 crc kubenswrapper[4838]: I1128 10:17:58.923203 4838 generic.go:334] "Generic (PLEG): container finished" podID="4ea6218f-637c-472d-83b3-1059c0d7e1f7" containerID="d355a598e4d1f8d4f00769d3b15fc5bc8a7814d8c6ae051c331b9057274655f3" exitCode=0 Nov 28 10:17:58 crc kubenswrapper[4838]: I1128 10:17:58.923235 4838 generic.go:334] "Generic (PLEG): container finished" podID="4ea6218f-637c-472d-83b3-1059c0d7e1f7" containerID="af29677a91e5aaf2520a86b35decb0d3be0a8cb0ac90fa515d71544475dabbbe" exitCode=2 Nov 28 10:17:58 crc kubenswrapper[4838]: I1128 10:17:58.923243 4838 generic.go:334] "Generic (PLEG): container finished" podID="4ea6218f-637c-472d-83b3-1059c0d7e1f7" containerID="8983f42d585066f0f41ab8dc5e2f2bb4360e19c9682b5231754c22dcdfe35881" exitCode=0 Nov 28 10:17:58 crc kubenswrapper[4838]: I1128 10:17:58.923249 4838 generic.go:334] "Generic (PLEG): container finished" podID="4ea6218f-637c-472d-83b3-1059c0d7e1f7" containerID="c9e20ccf58896c1d8350c42cb2e29d562d1295cc11d34b307e37a5e8889773dd" exitCode=0 Nov 28 10:17:58 crc kubenswrapper[4838]: I1128 10:17:58.923270 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 10:17:58 crc kubenswrapper[4838]: I1128 10:17:58.923542 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"4ea6218f-637c-472d-83b3-1059c0d7e1f7","Type":"ContainerDied","Data":"d355a598e4d1f8d4f00769d3b15fc5bc8a7814d8c6ae051c331b9057274655f3"} Nov 28 10:17:58 crc kubenswrapper[4838]: I1128 10:17:58.923640 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"4ea6218f-637c-472d-83b3-1059c0d7e1f7","Type":"ContainerDied","Data":"af29677a91e5aaf2520a86b35decb0d3be0a8cb0ac90fa515d71544475dabbbe"} Nov 28 10:17:58 crc kubenswrapper[4838]: I1128 10:17:58.923695 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"4ea6218f-637c-472d-83b3-1059c0d7e1f7","Type":"ContainerDied","Data":"8983f42d585066f0f41ab8dc5e2f2bb4360e19c9682b5231754c22dcdfe35881"} Nov 28 10:17:58 crc kubenswrapper[4838]: I1128 10:17:58.923795 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"4ea6218f-637c-472d-83b3-1059c0d7e1f7","Type":"ContainerDied","Data":"c9e20ccf58896c1d8350c42cb2e29d562d1295cc11d34b307e37a5e8889773dd"} Nov 28 10:17:58 crc kubenswrapper[4838]: I1128 10:17:58.923851 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"4ea6218f-637c-472d-83b3-1059c0d7e1f7","Type":"ContainerDied","Data":"6f8cf39af595d2f2d93e78a28da3f707d9adf7db218a03ea2d337473f6695ec9"} Nov 28 10:17:58 crc kubenswrapper[4838]: I1128 10:17:58.923919 4838 scope.go:117] "RemoveContainer" containerID="d355a598e4d1f8d4f00769d3b15fc5bc8a7814d8c6ae051c331b9057274655f3" Nov 28 10:17:58 crc kubenswrapper[4838]: I1128 10:17:58.926259 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"e94e5f12-61ab-40e8-97ce-dc6f3c706583","Type":"ContainerStarted","Data":"0258a92c2ee46c1c054c7430ce37b3f603fd396535325577f45e7c9d97c5c924"} Nov 28 10:17:58 crc kubenswrapper[4838]: I1128 10:17:58.958145 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 28 10:17:58 crc kubenswrapper[4838]: I1128 10:17:58.964605 4838 scope.go:117] "RemoveContainer" containerID="af29677a91e5aaf2520a86b35decb0d3be0a8cb0ac90fa515d71544475dabbbe" Nov 28 10:17:58 crc kubenswrapper[4838]: I1128 10:17:58.972802 4838 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 28 10:17:58 crc kubenswrapper[4838]: I1128 10:17:58.994884 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 28 10:17:59 crc kubenswrapper[4838]: E1128 10:17:59.008613 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4ea6218f-637c-472d-83b3-1059c0d7e1f7" containerName="ceilometer-central-agent" Nov 28 10:17:59 crc kubenswrapper[4838]: I1128 10:17:59.008846 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="4ea6218f-637c-472d-83b3-1059c0d7e1f7" containerName="ceilometer-central-agent" Nov 28 10:17:59 crc kubenswrapper[4838]: E1128 10:17:59.008939 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4ea6218f-637c-472d-83b3-1059c0d7e1f7" containerName="ceilometer-notification-agent" Nov 28 10:17:59 crc kubenswrapper[4838]: I1128 10:17:59.008989 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="4ea6218f-637c-472d-83b3-1059c0d7e1f7" containerName="ceilometer-notification-agent" Nov 28 10:17:59 crc kubenswrapper[4838]: E1128 10:17:59.009074 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4ea6218f-637c-472d-83b3-1059c0d7e1f7" containerName="proxy-httpd" Nov 28 10:17:59 crc kubenswrapper[4838]: I1128 10:17:59.009123 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="4ea6218f-637c-472d-83b3-1059c0d7e1f7" containerName="proxy-httpd" Nov 28 10:17:59 crc kubenswrapper[4838]: E1128 10:17:59.009202 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4ea6218f-637c-472d-83b3-1059c0d7e1f7" containerName="sg-core" Nov 28 10:17:59 crc kubenswrapper[4838]: I1128 10:17:59.009254 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="4ea6218f-637c-472d-83b3-1059c0d7e1f7" containerName="sg-core" Nov 28 10:17:59 crc kubenswrapper[4838]: I1128 10:17:59.009656 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="4ea6218f-637c-472d-83b3-1059c0d7e1f7" containerName="ceilometer-central-agent" Nov 28 10:17:59 crc kubenswrapper[4838]: I1128 10:17:59.009762 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="4ea6218f-637c-472d-83b3-1059c0d7e1f7" containerName="proxy-httpd" Nov 28 10:17:59 crc kubenswrapper[4838]: I1128 10:17:59.009866 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="4ea6218f-637c-472d-83b3-1059c0d7e1f7" containerName="ceilometer-notification-agent" Nov 28 10:17:59 crc kubenswrapper[4838]: I1128 10:17:59.010401 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="4ea6218f-637c-472d-83b3-1059c0d7e1f7" containerName="sg-core" Nov 28 10:17:59 crc kubenswrapper[4838]: I1128 10:17:59.008885 4838 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4ea6218f-637c-472d-83b3-1059c0d7e1f7-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 10:17:59 crc kubenswrapper[4838]: I1128 10:17:59.017955 4838 scope.go:117] "RemoveContainer" containerID="8983f42d585066f0f41ab8dc5e2f2bb4360e19c9682b5231754c22dcdfe35881" Nov 28 10:17:59 crc kubenswrapper[4838]: I1128 10:17:59.023010 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 28 10:17:59 crc kubenswrapper[4838]: I1128 10:17:59.023186 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 10:17:59 crc kubenswrapper[4838]: I1128 10:17:59.032041 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 28 10:17:59 crc kubenswrapper[4838]: I1128 10:17:59.032054 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 28 10:17:59 crc kubenswrapper[4838]: I1128 10:17:59.064637 4838 scope.go:117] "RemoveContainer" containerID="c9e20ccf58896c1d8350c42cb2e29d562d1295cc11d34b307e37a5e8889773dd" Nov 28 10:17:59 crc kubenswrapper[4838]: I1128 10:17:59.088630 4838 scope.go:117] "RemoveContainer" containerID="d355a598e4d1f8d4f00769d3b15fc5bc8a7814d8c6ae051c331b9057274655f3" Nov 28 10:17:59 crc kubenswrapper[4838]: E1128 10:17:59.089071 4838 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d355a598e4d1f8d4f00769d3b15fc5bc8a7814d8c6ae051c331b9057274655f3\": container with ID starting with d355a598e4d1f8d4f00769d3b15fc5bc8a7814d8c6ae051c331b9057274655f3 not found: ID does not exist" containerID="d355a598e4d1f8d4f00769d3b15fc5bc8a7814d8c6ae051c331b9057274655f3" Nov 28 10:17:59 crc kubenswrapper[4838]: I1128 10:17:59.089120 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d355a598e4d1f8d4f00769d3b15fc5bc8a7814d8c6ae051c331b9057274655f3"} err="failed to get container status \"d355a598e4d1f8d4f00769d3b15fc5bc8a7814d8c6ae051c331b9057274655f3\": rpc error: code = NotFound desc = could not find container \"d355a598e4d1f8d4f00769d3b15fc5bc8a7814d8c6ae051c331b9057274655f3\": container with ID starting with d355a598e4d1f8d4f00769d3b15fc5bc8a7814d8c6ae051c331b9057274655f3 not found: ID does not exist" Nov 28 10:17:59 crc kubenswrapper[4838]: I1128 10:17:59.089149 4838 scope.go:117] "RemoveContainer" containerID="af29677a91e5aaf2520a86b35decb0d3be0a8cb0ac90fa515d71544475dabbbe" Nov 28 10:17:59 crc kubenswrapper[4838]: E1128 10:17:59.089397 4838 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"af29677a91e5aaf2520a86b35decb0d3be0a8cb0ac90fa515d71544475dabbbe\": container with ID starting with af29677a91e5aaf2520a86b35decb0d3be0a8cb0ac90fa515d71544475dabbbe not found: ID does not exist" containerID="af29677a91e5aaf2520a86b35decb0d3be0a8cb0ac90fa515d71544475dabbbe" Nov 28 10:17:59 crc kubenswrapper[4838]: I1128 10:17:59.089422 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"af29677a91e5aaf2520a86b35decb0d3be0a8cb0ac90fa515d71544475dabbbe"} err="failed to get container status \"af29677a91e5aaf2520a86b35decb0d3be0a8cb0ac90fa515d71544475dabbbe\": rpc error: code = NotFound desc = could not find container \"af29677a91e5aaf2520a86b35decb0d3be0a8cb0ac90fa515d71544475dabbbe\": container with ID starting with af29677a91e5aaf2520a86b35decb0d3be0a8cb0ac90fa515d71544475dabbbe not found: ID does not exist" Nov 28 10:17:59 crc kubenswrapper[4838]: I1128 10:17:59.089437 4838 scope.go:117] "RemoveContainer" containerID="8983f42d585066f0f41ab8dc5e2f2bb4360e19c9682b5231754c22dcdfe35881" Nov 28 10:17:59 crc kubenswrapper[4838]: E1128 10:17:59.089614 4838 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8983f42d585066f0f41ab8dc5e2f2bb4360e19c9682b5231754c22dcdfe35881\": container with ID starting with 8983f42d585066f0f41ab8dc5e2f2bb4360e19c9682b5231754c22dcdfe35881 not found: ID does not exist" containerID="8983f42d585066f0f41ab8dc5e2f2bb4360e19c9682b5231754c22dcdfe35881" Nov 28 10:17:59 crc kubenswrapper[4838]: I1128 10:17:59.089636 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8983f42d585066f0f41ab8dc5e2f2bb4360e19c9682b5231754c22dcdfe35881"} err="failed to get container status \"8983f42d585066f0f41ab8dc5e2f2bb4360e19c9682b5231754c22dcdfe35881\": rpc error: code = NotFound desc = could not find container \"8983f42d585066f0f41ab8dc5e2f2bb4360e19c9682b5231754c22dcdfe35881\": container with ID starting with 8983f42d585066f0f41ab8dc5e2f2bb4360e19c9682b5231754c22dcdfe35881 not found: ID does not exist" Nov 28 10:17:59 crc kubenswrapper[4838]: I1128 10:17:59.089648 4838 scope.go:117] "RemoveContainer" containerID="c9e20ccf58896c1d8350c42cb2e29d562d1295cc11d34b307e37a5e8889773dd" Nov 28 10:17:59 crc kubenswrapper[4838]: E1128 10:17:59.089860 4838 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c9e20ccf58896c1d8350c42cb2e29d562d1295cc11d34b307e37a5e8889773dd\": container with ID starting with c9e20ccf58896c1d8350c42cb2e29d562d1295cc11d34b307e37a5e8889773dd not found: ID does not exist" containerID="c9e20ccf58896c1d8350c42cb2e29d562d1295cc11d34b307e37a5e8889773dd" Nov 28 10:17:59 crc kubenswrapper[4838]: I1128 10:17:59.089885 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c9e20ccf58896c1d8350c42cb2e29d562d1295cc11d34b307e37a5e8889773dd"} err="failed to get container status \"c9e20ccf58896c1d8350c42cb2e29d562d1295cc11d34b307e37a5e8889773dd\": rpc error: code = NotFound desc = could not find container \"c9e20ccf58896c1d8350c42cb2e29d562d1295cc11d34b307e37a5e8889773dd\": container with ID starting with c9e20ccf58896c1d8350c42cb2e29d562d1295cc11d34b307e37a5e8889773dd not found: ID does not exist" Nov 28 10:17:59 crc kubenswrapper[4838]: I1128 10:17:59.089899 4838 scope.go:117] "RemoveContainer" containerID="d355a598e4d1f8d4f00769d3b15fc5bc8a7814d8c6ae051c331b9057274655f3" Nov 28 10:17:59 crc kubenswrapper[4838]: I1128 10:17:59.090873 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d355a598e4d1f8d4f00769d3b15fc5bc8a7814d8c6ae051c331b9057274655f3"} err="failed to get container status \"d355a598e4d1f8d4f00769d3b15fc5bc8a7814d8c6ae051c331b9057274655f3\": rpc error: code = NotFound desc = could not find container \"d355a598e4d1f8d4f00769d3b15fc5bc8a7814d8c6ae051c331b9057274655f3\": container with ID starting with d355a598e4d1f8d4f00769d3b15fc5bc8a7814d8c6ae051c331b9057274655f3 not found: ID does not exist" Nov 28 10:17:59 crc kubenswrapper[4838]: I1128 10:17:59.090898 4838 scope.go:117] "RemoveContainer" containerID="af29677a91e5aaf2520a86b35decb0d3be0a8cb0ac90fa515d71544475dabbbe" Nov 28 10:17:59 crc kubenswrapper[4838]: I1128 10:17:59.091216 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"af29677a91e5aaf2520a86b35decb0d3be0a8cb0ac90fa515d71544475dabbbe"} err="failed to get container status \"af29677a91e5aaf2520a86b35decb0d3be0a8cb0ac90fa515d71544475dabbbe\": rpc error: code = NotFound desc = could not find container \"af29677a91e5aaf2520a86b35decb0d3be0a8cb0ac90fa515d71544475dabbbe\": container with ID starting with af29677a91e5aaf2520a86b35decb0d3be0a8cb0ac90fa515d71544475dabbbe not found: ID does not exist" Nov 28 10:17:59 crc kubenswrapper[4838]: I1128 10:17:59.091238 4838 scope.go:117] "RemoveContainer" containerID="8983f42d585066f0f41ab8dc5e2f2bb4360e19c9682b5231754c22dcdfe35881" Nov 28 10:17:59 crc kubenswrapper[4838]: I1128 10:17:59.091453 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8983f42d585066f0f41ab8dc5e2f2bb4360e19c9682b5231754c22dcdfe35881"} err="failed to get container status \"8983f42d585066f0f41ab8dc5e2f2bb4360e19c9682b5231754c22dcdfe35881\": rpc error: code = NotFound desc = could not find container \"8983f42d585066f0f41ab8dc5e2f2bb4360e19c9682b5231754c22dcdfe35881\": container with ID starting with 8983f42d585066f0f41ab8dc5e2f2bb4360e19c9682b5231754c22dcdfe35881 not found: ID does not exist" Nov 28 10:17:59 crc kubenswrapper[4838]: I1128 10:17:59.091471 4838 scope.go:117] "RemoveContainer" containerID="c9e20ccf58896c1d8350c42cb2e29d562d1295cc11d34b307e37a5e8889773dd" Nov 28 10:17:59 crc kubenswrapper[4838]: I1128 10:17:59.095220 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c9e20ccf58896c1d8350c42cb2e29d562d1295cc11d34b307e37a5e8889773dd"} err="failed to get container status \"c9e20ccf58896c1d8350c42cb2e29d562d1295cc11d34b307e37a5e8889773dd\": rpc error: code = NotFound desc = could not find container \"c9e20ccf58896c1d8350c42cb2e29d562d1295cc11d34b307e37a5e8889773dd\": container with ID starting with c9e20ccf58896c1d8350c42cb2e29d562d1295cc11d34b307e37a5e8889773dd not found: ID does not exist" Nov 28 10:17:59 crc kubenswrapper[4838]: I1128 10:17:59.095255 4838 scope.go:117] "RemoveContainer" containerID="d355a598e4d1f8d4f00769d3b15fc5bc8a7814d8c6ae051c331b9057274655f3" Nov 28 10:17:59 crc kubenswrapper[4838]: I1128 10:17:59.097316 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d355a598e4d1f8d4f00769d3b15fc5bc8a7814d8c6ae051c331b9057274655f3"} err="failed to get container status \"d355a598e4d1f8d4f00769d3b15fc5bc8a7814d8c6ae051c331b9057274655f3\": rpc error: code = NotFound desc = could not find container \"d355a598e4d1f8d4f00769d3b15fc5bc8a7814d8c6ae051c331b9057274655f3\": container with ID starting with d355a598e4d1f8d4f00769d3b15fc5bc8a7814d8c6ae051c331b9057274655f3 not found: ID does not exist" Nov 28 10:17:59 crc kubenswrapper[4838]: I1128 10:17:59.097343 4838 scope.go:117] "RemoveContainer" containerID="af29677a91e5aaf2520a86b35decb0d3be0a8cb0ac90fa515d71544475dabbbe" Nov 28 10:17:59 crc kubenswrapper[4838]: I1128 10:17:59.097636 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"af29677a91e5aaf2520a86b35decb0d3be0a8cb0ac90fa515d71544475dabbbe"} err="failed to get container status \"af29677a91e5aaf2520a86b35decb0d3be0a8cb0ac90fa515d71544475dabbbe\": rpc error: code = NotFound desc = could not find container \"af29677a91e5aaf2520a86b35decb0d3be0a8cb0ac90fa515d71544475dabbbe\": container with ID starting with af29677a91e5aaf2520a86b35decb0d3be0a8cb0ac90fa515d71544475dabbbe not found: ID does not exist" Nov 28 10:17:59 crc kubenswrapper[4838]: I1128 10:17:59.097656 4838 scope.go:117] "RemoveContainer" containerID="8983f42d585066f0f41ab8dc5e2f2bb4360e19c9682b5231754c22dcdfe35881" Nov 28 10:17:59 crc kubenswrapper[4838]: I1128 10:17:59.097936 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8983f42d585066f0f41ab8dc5e2f2bb4360e19c9682b5231754c22dcdfe35881"} err="failed to get container status \"8983f42d585066f0f41ab8dc5e2f2bb4360e19c9682b5231754c22dcdfe35881\": rpc error: code = NotFound desc = could not find container \"8983f42d585066f0f41ab8dc5e2f2bb4360e19c9682b5231754c22dcdfe35881\": container with ID starting with 8983f42d585066f0f41ab8dc5e2f2bb4360e19c9682b5231754c22dcdfe35881 not found: ID does not exist" Nov 28 10:17:59 crc kubenswrapper[4838]: I1128 10:17:59.097978 4838 scope.go:117] "RemoveContainer" containerID="c9e20ccf58896c1d8350c42cb2e29d562d1295cc11d34b307e37a5e8889773dd" Nov 28 10:17:59 crc kubenswrapper[4838]: I1128 10:17:59.098230 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c9e20ccf58896c1d8350c42cb2e29d562d1295cc11d34b307e37a5e8889773dd"} err="failed to get container status \"c9e20ccf58896c1d8350c42cb2e29d562d1295cc11d34b307e37a5e8889773dd\": rpc error: code = NotFound desc = could not find container \"c9e20ccf58896c1d8350c42cb2e29d562d1295cc11d34b307e37a5e8889773dd\": container with ID starting with c9e20ccf58896c1d8350c42cb2e29d562d1295cc11d34b307e37a5e8889773dd not found: ID does not exist" Nov 28 10:17:59 crc kubenswrapper[4838]: I1128 10:17:59.098253 4838 scope.go:117] "RemoveContainer" containerID="d355a598e4d1f8d4f00769d3b15fc5bc8a7814d8c6ae051c331b9057274655f3" Nov 28 10:17:59 crc kubenswrapper[4838]: I1128 10:17:59.098455 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d355a598e4d1f8d4f00769d3b15fc5bc8a7814d8c6ae051c331b9057274655f3"} err="failed to get container status \"d355a598e4d1f8d4f00769d3b15fc5bc8a7814d8c6ae051c331b9057274655f3\": rpc error: code = NotFound desc = could not find container \"d355a598e4d1f8d4f00769d3b15fc5bc8a7814d8c6ae051c331b9057274655f3\": container with ID starting with d355a598e4d1f8d4f00769d3b15fc5bc8a7814d8c6ae051c331b9057274655f3 not found: ID does not exist" Nov 28 10:17:59 crc kubenswrapper[4838]: I1128 10:17:59.098480 4838 scope.go:117] "RemoveContainer" containerID="af29677a91e5aaf2520a86b35decb0d3be0a8cb0ac90fa515d71544475dabbbe" Nov 28 10:17:59 crc kubenswrapper[4838]: I1128 10:17:59.098650 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"af29677a91e5aaf2520a86b35decb0d3be0a8cb0ac90fa515d71544475dabbbe"} err="failed to get container status \"af29677a91e5aaf2520a86b35decb0d3be0a8cb0ac90fa515d71544475dabbbe\": rpc error: code = NotFound desc = could not find container \"af29677a91e5aaf2520a86b35decb0d3be0a8cb0ac90fa515d71544475dabbbe\": container with ID starting with af29677a91e5aaf2520a86b35decb0d3be0a8cb0ac90fa515d71544475dabbbe not found: ID does not exist" Nov 28 10:17:59 crc kubenswrapper[4838]: I1128 10:17:59.098670 4838 scope.go:117] "RemoveContainer" containerID="8983f42d585066f0f41ab8dc5e2f2bb4360e19c9682b5231754c22dcdfe35881" Nov 28 10:17:59 crc kubenswrapper[4838]: I1128 10:17:59.098890 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8983f42d585066f0f41ab8dc5e2f2bb4360e19c9682b5231754c22dcdfe35881"} err="failed to get container status \"8983f42d585066f0f41ab8dc5e2f2bb4360e19c9682b5231754c22dcdfe35881\": rpc error: code = NotFound desc = could not find container \"8983f42d585066f0f41ab8dc5e2f2bb4360e19c9682b5231754c22dcdfe35881\": container with ID starting with 8983f42d585066f0f41ab8dc5e2f2bb4360e19c9682b5231754c22dcdfe35881 not found: ID does not exist" Nov 28 10:17:59 crc kubenswrapper[4838]: I1128 10:17:59.098909 4838 scope.go:117] "RemoveContainer" containerID="c9e20ccf58896c1d8350c42cb2e29d562d1295cc11d34b307e37a5e8889773dd" Nov 28 10:17:59 crc kubenswrapper[4838]: I1128 10:17:59.099156 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c9e20ccf58896c1d8350c42cb2e29d562d1295cc11d34b307e37a5e8889773dd"} err="failed to get container status \"c9e20ccf58896c1d8350c42cb2e29d562d1295cc11d34b307e37a5e8889773dd\": rpc error: code = NotFound desc = could not find container \"c9e20ccf58896c1d8350c42cb2e29d562d1295cc11d34b307e37a5e8889773dd\": container with ID starting with c9e20ccf58896c1d8350c42cb2e29d562d1295cc11d34b307e37a5e8889773dd not found: ID does not exist" Nov 28 10:17:59 crc kubenswrapper[4838]: I1128 10:17:59.215174 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/44f64b6e-29ed-489c-b9dd-6583f408ded2-log-httpd\") pod \"ceilometer-0\" (UID: \"44f64b6e-29ed-489c-b9dd-6583f408ded2\") " pod="openstack/ceilometer-0" Nov 28 10:17:59 crc kubenswrapper[4838]: I1128 10:17:59.215211 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/44f64b6e-29ed-489c-b9dd-6583f408ded2-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"44f64b6e-29ed-489c-b9dd-6583f408ded2\") " pod="openstack/ceilometer-0" Nov 28 10:17:59 crc kubenswrapper[4838]: I1128 10:17:59.215238 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/44f64b6e-29ed-489c-b9dd-6583f408ded2-scripts\") pod \"ceilometer-0\" (UID: \"44f64b6e-29ed-489c-b9dd-6583f408ded2\") " pod="openstack/ceilometer-0" Nov 28 10:17:59 crc kubenswrapper[4838]: I1128 10:17:59.215257 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/44f64b6e-29ed-489c-b9dd-6583f408ded2-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"44f64b6e-29ed-489c-b9dd-6583f408ded2\") " pod="openstack/ceilometer-0" Nov 28 10:17:59 crc kubenswrapper[4838]: I1128 10:17:59.215847 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/44f64b6e-29ed-489c-b9dd-6583f408ded2-config-data\") pod \"ceilometer-0\" (UID: \"44f64b6e-29ed-489c-b9dd-6583f408ded2\") " pod="openstack/ceilometer-0" Nov 28 10:17:59 crc kubenswrapper[4838]: I1128 10:17:59.215922 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6tk82\" (UniqueName: \"kubernetes.io/projected/44f64b6e-29ed-489c-b9dd-6583f408ded2-kube-api-access-6tk82\") pod \"ceilometer-0\" (UID: \"44f64b6e-29ed-489c-b9dd-6583f408ded2\") " pod="openstack/ceilometer-0" Nov 28 10:17:59 crc kubenswrapper[4838]: I1128 10:17:59.215947 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/44f64b6e-29ed-489c-b9dd-6583f408ded2-run-httpd\") pod \"ceilometer-0\" (UID: \"44f64b6e-29ed-489c-b9dd-6583f408ded2\") " pod="openstack/ceilometer-0" Nov 28 10:17:59 crc kubenswrapper[4838]: I1128 10:17:59.318052 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/44f64b6e-29ed-489c-b9dd-6583f408ded2-config-data\") pod \"ceilometer-0\" (UID: \"44f64b6e-29ed-489c-b9dd-6583f408ded2\") " pod="openstack/ceilometer-0" Nov 28 10:17:59 crc kubenswrapper[4838]: I1128 10:17:59.318190 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6tk82\" (UniqueName: \"kubernetes.io/projected/44f64b6e-29ed-489c-b9dd-6583f408ded2-kube-api-access-6tk82\") pod \"ceilometer-0\" (UID: \"44f64b6e-29ed-489c-b9dd-6583f408ded2\") " pod="openstack/ceilometer-0" Nov 28 10:17:59 crc kubenswrapper[4838]: I1128 10:17:59.318221 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/44f64b6e-29ed-489c-b9dd-6583f408ded2-run-httpd\") pod \"ceilometer-0\" (UID: \"44f64b6e-29ed-489c-b9dd-6583f408ded2\") " pod="openstack/ceilometer-0" Nov 28 10:17:59 crc kubenswrapper[4838]: I1128 10:17:59.318262 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/44f64b6e-29ed-489c-b9dd-6583f408ded2-log-httpd\") pod \"ceilometer-0\" (UID: \"44f64b6e-29ed-489c-b9dd-6583f408ded2\") " pod="openstack/ceilometer-0" Nov 28 10:17:59 crc kubenswrapper[4838]: I1128 10:17:59.318288 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/44f64b6e-29ed-489c-b9dd-6583f408ded2-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"44f64b6e-29ed-489c-b9dd-6583f408ded2\") " pod="openstack/ceilometer-0" Nov 28 10:17:59 crc kubenswrapper[4838]: I1128 10:17:59.318317 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/44f64b6e-29ed-489c-b9dd-6583f408ded2-scripts\") pod \"ceilometer-0\" (UID: \"44f64b6e-29ed-489c-b9dd-6583f408ded2\") " pod="openstack/ceilometer-0" Nov 28 10:17:59 crc kubenswrapper[4838]: I1128 10:17:59.318342 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/44f64b6e-29ed-489c-b9dd-6583f408ded2-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"44f64b6e-29ed-489c-b9dd-6583f408ded2\") " pod="openstack/ceilometer-0" Nov 28 10:17:59 crc kubenswrapper[4838]: I1128 10:17:59.319582 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/44f64b6e-29ed-489c-b9dd-6583f408ded2-log-httpd\") pod \"ceilometer-0\" (UID: \"44f64b6e-29ed-489c-b9dd-6583f408ded2\") " pod="openstack/ceilometer-0" Nov 28 10:17:59 crc kubenswrapper[4838]: I1128 10:17:59.321583 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/44f64b6e-29ed-489c-b9dd-6583f408ded2-run-httpd\") pod \"ceilometer-0\" (UID: \"44f64b6e-29ed-489c-b9dd-6583f408ded2\") " pod="openstack/ceilometer-0" Nov 28 10:17:59 crc kubenswrapper[4838]: I1128 10:17:59.326152 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/44f64b6e-29ed-489c-b9dd-6583f408ded2-scripts\") pod \"ceilometer-0\" (UID: \"44f64b6e-29ed-489c-b9dd-6583f408ded2\") " pod="openstack/ceilometer-0" Nov 28 10:17:59 crc kubenswrapper[4838]: I1128 10:17:59.326330 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/44f64b6e-29ed-489c-b9dd-6583f408ded2-config-data\") pod \"ceilometer-0\" (UID: \"44f64b6e-29ed-489c-b9dd-6583f408ded2\") " pod="openstack/ceilometer-0" Nov 28 10:17:59 crc kubenswrapper[4838]: I1128 10:17:59.326695 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/44f64b6e-29ed-489c-b9dd-6583f408ded2-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"44f64b6e-29ed-489c-b9dd-6583f408ded2\") " pod="openstack/ceilometer-0" Nov 28 10:17:59 crc kubenswrapper[4838]: I1128 10:17:59.328643 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/44f64b6e-29ed-489c-b9dd-6583f408ded2-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"44f64b6e-29ed-489c-b9dd-6583f408ded2\") " pod="openstack/ceilometer-0" Nov 28 10:17:59 crc kubenswrapper[4838]: I1128 10:17:59.338389 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6tk82\" (UniqueName: \"kubernetes.io/projected/44f64b6e-29ed-489c-b9dd-6583f408ded2-kube-api-access-6tk82\") pod \"ceilometer-0\" (UID: \"44f64b6e-29ed-489c-b9dd-6583f408ded2\") " pod="openstack/ceilometer-0" Nov 28 10:17:59 crc kubenswrapper[4838]: I1128 10:17:59.358562 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 10:17:59 crc kubenswrapper[4838]: I1128 10:17:59.712311 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-db-sync-84ph6"] Nov 28 10:17:59 crc kubenswrapper[4838]: I1128 10:17:59.713547 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-84ph6" Nov 28 10:17:59 crc kubenswrapper[4838]: I1128 10:17:59.715166 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-scripts" Nov 28 10:17:59 crc kubenswrapper[4838]: I1128 10:17:59.716099 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-prjd6" Nov 28 10:17:59 crc kubenswrapper[4838]: I1128 10:17:59.718252 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Nov 28 10:17:59 crc kubenswrapper[4838]: I1128 10:17:59.721418 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-84ph6"] Nov 28 10:17:59 crc kubenswrapper[4838]: I1128 10:17:59.735768 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k6thk\" (UniqueName: \"kubernetes.io/projected/61223154-98ac-45e7-af63-714d3c3cb7d7-kube-api-access-k6thk\") pod \"nova-cell0-conductor-db-sync-84ph6\" (UID: \"61223154-98ac-45e7-af63-714d3c3cb7d7\") " pod="openstack/nova-cell0-conductor-db-sync-84ph6" Nov 28 10:17:59 crc kubenswrapper[4838]: I1128 10:17:59.735829 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/61223154-98ac-45e7-af63-714d3c3cb7d7-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-84ph6\" (UID: \"61223154-98ac-45e7-af63-714d3c3cb7d7\") " pod="openstack/nova-cell0-conductor-db-sync-84ph6" Nov 28 10:17:59 crc kubenswrapper[4838]: I1128 10:17:59.735905 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/61223154-98ac-45e7-af63-714d3c3cb7d7-scripts\") pod \"nova-cell0-conductor-db-sync-84ph6\" (UID: \"61223154-98ac-45e7-af63-714d3c3cb7d7\") " pod="openstack/nova-cell0-conductor-db-sync-84ph6" Nov 28 10:17:59 crc kubenswrapper[4838]: I1128 10:17:59.735964 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/61223154-98ac-45e7-af63-714d3c3cb7d7-config-data\") pod \"nova-cell0-conductor-db-sync-84ph6\" (UID: \"61223154-98ac-45e7-af63-714d3c3cb7d7\") " pod="openstack/nova-cell0-conductor-db-sync-84ph6" Nov 28 10:17:59 crc kubenswrapper[4838]: I1128 10:17:59.812681 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 28 10:17:59 crc kubenswrapper[4838]: I1128 10:17:59.837983 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/61223154-98ac-45e7-af63-714d3c3cb7d7-scripts\") pod \"nova-cell0-conductor-db-sync-84ph6\" (UID: \"61223154-98ac-45e7-af63-714d3c3cb7d7\") " pod="openstack/nova-cell0-conductor-db-sync-84ph6" Nov 28 10:17:59 crc kubenswrapper[4838]: I1128 10:17:59.838448 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/61223154-98ac-45e7-af63-714d3c3cb7d7-config-data\") pod \"nova-cell0-conductor-db-sync-84ph6\" (UID: \"61223154-98ac-45e7-af63-714d3c3cb7d7\") " pod="openstack/nova-cell0-conductor-db-sync-84ph6" Nov 28 10:17:59 crc kubenswrapper[4838]: I1128 10:17:59.838589 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k6thk\" (UniqueName: \"kubernetes.io/projected/61223154-98ac-45e7-af63-714d3c3cb7d7-kube-api-access-k6thk\") pod \"nova-cell0-conductor-db-sync-84ph6\" (UID: \"61223154-98ac-45e7-af63-714d3c3cb7d7\") " pod="openstack/nova-cell0-conductor-db-sync-84ph6" Nov 28 10:17:59 crc kubenswrapper[4838]: I1128 10:17:59.838633 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/61223154-98ac-45e7-af63-714d3c3cb7d7-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-84ph6\" (UID: \"61223154-98ac-45e7-af63-714d3c3cb7d7\") " pod="openstack/nova-cell0-conductor-db-sync-84ph6" Nov 28 10:17:59 crc kubenswrapper[4838]: I1128 10:17:59.843528 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/61223154-98ac-45e7-af63-714d3c3cb7d7-config-data\") pod \"nova-cell0-conductor-db-sync-84ph6\" (UID: \"61223154-98ac-45e7-af63-714d3c3cb7d7\") " pod="openstack/nova-cell0-conductor-db-sync-84ph6" Nov 28 10:17:59 crc kubenswrapper[4838]: I1128 10:17:59.843873 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/61223154-98ac-45e7-af63-714d3c3cb7d7-scripts\") pod \"nova-cell0-conductor-db-sync-84ph6\" (UID: \"61223154-98ac-45e7-af63-714d3c3cb7d7\") " pod="openstack/nova-cell0-conductor-db-sync-84ph6" Nov 28 10:17:59 crc kubenswrapper[4838]: I1128 10:17:59.844583 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/61223154-98ac-45e7-af63-714d3c3cb7d7-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-84ph6\" (UID: \"61223154-98ac-45e7-af63-714d3c3cb7d7\") " pod="openstack/nova-cell0-conductor-db-sync-84ph6" Nov 28 10:17:59 crc kubenswrapper[4838]: I1128 10:17:59.857128 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k6thk\" (UniqueName: \"kubernetes.io/projected/61223154-98ac-45e7-af63-714d3c3cb7d7-kube-api-access-k6thk\") pod \"nova-cell0-conductor-db-sync-84ph6\" (UID: \"61223154-98ac-45e7-af63-714d3c3cb7d7\") " pod="openstack/nova-cell0-conductor-db-sync-84ph6" Nov 28 10:17:59 crc kubenswrapper[4838]: I1128 10:17:59.933569 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"44f64b6e-29ed-489c-b9dd-6583f408ded2","Type":"ContainerStarted","Data":"51ffca44027a606ac209193e008a4527f79fc0d594801c4a000bc0f9b8f4424b"} Nov 28 10:17:59 crc kubenswrapper[4838]: I1128 10:17:59.936784 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"e94e5f12-61ab-40e8-97ce-dc6f3c706583","Type":"ContainerStarted","Data":"f76c3b91e1fed9bdf2fec87c29f0496f89942a9fb9da3862c8900cc9b7d56ac4"} Nov 28 10:17:59 crc kubenswrapper[4838]: I1128 10:17:59.937577 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Nov 28 10:17:59 crc kubenswrapper[4838]: I1128 10:17:59.961068 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=3.961048453 podStartE2EDuration="3.961048453s" podCreationTimestamp="2025-11-28 10:17:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 10:17:59.957021635 +0000 UTC m=+1251.655995815" watchObservedRunningTime="2025-11-28 10:17:59.961048453 +0000 UTC m=+1251.660022623" Nov 28 10:18:00 crc kubenswrapper[4838]: I1128 10:18:00.036280 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-84ph6" Nov 28 10:18:00 crc kubenswrapper[4838]: I1128 10:18:00.474794 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-84ph6"] Nov 28 10:18:00 crc kubenswrapper[4838]: W1128 10:18:00.482941 4838 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod61223154_98ac_45e7_af63_714d3c3cb7d7.slice/crio-dd496bc35dbdce52f95cddde6054e78b09a7a25341501a4b2be3a959027f7c80 WatchSource:0}: Error finding container dd496bc35dbdce52f95cddde6054e78b09a7a25341501a4b2be3a959027f7c80: Status 404 returned error can't find the container with id dd496bc35dbdce52f95cddde6054e78b09a7a25341501a4b2be3a959027f7c80 Nov 28 10:18:00 crc kubenswrapper[4838]: I1128 10:18:00.573589 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4ea6218f-637c-472d-83b3-1059c0d7e1f7" path="/var/lib/kubelet/pods/4ea6218f-637c-472d-83b3-1059c0d7e1f7/volumes" Nov 28 10:18:00 crc kubenswrapper[4838]: I1128 10:18:00.946448 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-84ph6" event={"ID":"61223154-98ac-45e7-af63-714d3c3cb7d7","Type":"ContainerStarted","Data":"dd496bc35dbdce52f95cddde6054e78b09a7a25341501a4b2be3a959027f7c80"} Nov 28 10:18:00 crc kubenswrapper[4838]: I1128 10:18:00.948351 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"44f64b6e-29ed-489c-b9dd-6583f408ded2","Type":"ContainerStarted","Data":"3db582849dcfe77177c3ee0a81a7741c5a8ce9955cd30db22f85634753b2c5e6"} Nov 28 10:18:01 crc kubenswrapper[4838]: I1128 10:18:01.958589 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"44f64b6e-29ed-489c-b9dd-6583f408ded2","Type":"ContainerStarted","Data":"0d19406fc6a9bdd827c3b90b2efcaabc064dc6a737cf2fee7b807dc060f9752a"} Nov 28 10:18:02 crc kubenswrapper[4838]: I1128 10:18:02.974291 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"44f64b6e-29ed-489c-b9dd-6583f408ded2","Type":"ContainerStarted","Data":"1005b82b3330b4f65be6512bf4f0de8459f9c3896be9e1d4a4f059edc0145171"} Nov 28 10:18:03 crc kubenswrapper[4838]: I1128 10:18:03.984382 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"44f64b6e-29ed-489c-b9dd-6583f408ded2","Type":"ContainerStarted","Data":"190951ff9b912c47b9c422b7d9476a85ee2cf36c57b846b6602250efb7469dc4"} Nov 28 10:18:03 crc kubenswrapper[4838]: I1128 10:18:03.984963 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 28 10:18:04 crc kubenswrapper[4838]: I1128 10:18:04.006859 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.146835251 podStartE2EDuration="6.006841149s" podCreationTimestamp="2025-11-28 10:17:58 +0000 UTC" firstStartedPulling="2025-11-28 10:17:59.816517109 +0000 UTC m=+1251.515491279" lastFinishedPulling="2025-11-28 10:18:03.676523007 +0000 UTC m=+1255.375497177" observedRunningTime="2025-11-28 10:18:04.003228591 +0000 UTC m=+1255.702202761" watchObservedRunningTime="2025-11-28 10:18:04.006841149 +0000 UTC m=+1255.705815319" Nov 28 10:18:09 crc kubenswrapper[4838]: I1128 10:18:09.223981 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/cinder-api-0" Nov 28 10:18:11 crc kubenswrapper[4838]: I1128 10:18:11.076296 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-84ph6" event={"ID":"61223154-98ac-45e7-af63-714d3c3cb7d7","Type":"ContainerStarted","Data":"52f4d7bae6dd59014073affb4fb7b22afdce4f3fb975f9f5ee9c730f8edadab0"} Nov 28 10:18:11 crc kubenswrapper[4838]: I1128 10:18:11.098441 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-db-sync-84ph6" podStartSLOduration=1.8439807689999999 podStartE2EDuration="12.098421729s" podCreationTimestamp="2025-11-28 10:17:59 +0000 UTC" firstStartedPulling="2025-11-28 10:18:00.485122918 +0000 UTC m=+1252.184097088" lastFinishedPulling="2025-11-28 10:18:10.739563878 +0000 UTC m=+1262.438538048" observedRunningTime="2025-11-28 10:18:11.095061929 +0000 UTC m=+1262.794036099" watchObservedRunningTime="2025-11-28 10:18:11.098421729 +0000 UTC m=+1262.797395899" Nov 28 10:18:13 crc kubenswrapper[4838]: I1128 10:18:13.693504 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 28 10:18:13 crc kubenswrapper[4838]: I1128 10:18:13.694530 4838 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="44f64b6e-29ed-489c-b9dd-6583f408ded2" containerName="ceilometer-central-agent" containerID="cri-o://3db582849dcfe77177c3ee0a81a7741c5a8ce9955cd30db22f85634753b2c5e6" gracePeriod=30 Nov 28 10:18:13 crc kubenswrapper[4838]: I1128 10:18:13.694590 4838 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="44f64b6e-29ed-489c-b9dd-6583f408ded2" containerName="sg-core" containerID="cri-o://1005b82b3330b4f65be6512bf4f0de8459f9c3896be9e1d4a4f059edc0145171" gracePeriod=30 Nov 28 10:18:13 crc kubenswrapper[4838]: I1128 10:18:13.694629 4838 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="44f64b6e-29ed-489c-b9dd-6583f408ded2" containerName="proxy-httpd" containerID="cri-o://190951ff9b912c47b9c422b7d9476a85ee2cf36c57b846b6602250efb7469dc4" gracePeriod=30 Nov 28 10:18:13 crc kubenswrapper[4838]: I1128 10:18:13.694674 4838 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="44f64b6e-29ed-489c-b9dd-6583f408ded2" containerName="ceilometer-notification-agent" containerID="cri-o://0d19406fc6a9bdd827c3b90b2efcaabc064dc6a737cf2fee7b807dc060f9752a" gracePeriod=30 Nov 28 10:18:13 crc kubenswrapper[4838]: I1128 10:18:13.704274 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Nov 28 10:18:14 crc kubenswrapper[4838]: I1128 10:18:14.107069 4838 generic.go:334] "Generic (PLEG): container finished" podID="44f64b6e-29ed-489c-b9dd-6583f408ded2" containerID="190951ff9b912c47b9c422b7d9476a85ee2cf36c57b846b6602250efb7469dc4" exitCode=0 Nov 28 10:18:14 crc kubenswrapper[4838]: I1128 10:18:14.107118 4838 generic.go:334] "Generic (PLEG): container finished" podID="44f64b6e-29ed-489c-b9dd-6583f408ded2" containerID="1005b82b3330b4f65be6512bf4f0de8459f9c3896be9e1d4a4f059edc0145171" exitCode=2 Nov 28 10:18:14 crc kubenswrapper[4838]: I1128 10:18:14.107146 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"44f64b6e-29ed-489c-b9dd-6583f408ded2","Type":"ContainerDied","Data":"190951ff9b912c47b9c422b7d9476a85ee2cf36c57b846b6602250efb7469dc4"} Nov 28 10:18:14 crc kubenswrapper[4838]: I1128 10:18:14.107188 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"44f64b6e-29ed-489c-b9dd-6583f408ded2","Type":"ContainerDied","Data":"1005b82b3330b4f65be6512bf4f0de8459f9c3896be9e1d4a4f059edc0145171"} Nov 28 10:18:14 crc kubenswrapper[4838]: I1128 10:18:14.766949 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 10:18:14 crc kubenswrapper[4838]: I1128 10:18:14.773275 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/44f64b6e-29ed-489c-b9dd-6583f408ded2-log-httpd\") pod \"44f64b6e-29ed-489c-b9dd-6583f408ded2\" (UID: \"44f64b6e-29ed-489c-b9dd-6583f408ded2\") " Nov 28 10:18:14 crc kubenswrapper[4838]: I1128 10:18:14.773408 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/44f64b6e-29ed-489c-b9dd-6583f408ded2-config-data\") pod \"44f64b6e-29ed-489c-b9dd-6583f408ded2\" (UID: \"44f64b6e-29ed-489c-b9dd-6583f408ded2\") " Nov 28 10:18:14 crc kubenswrapper[4838]: I1128 10:18:14.773452 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6tk82\" (UniqueName: \"kubernetes.io/projected/44f64b6e-29ed-489c-b9dd-6583f408ded2-kube-api-access-6tk82\") pod \"44f64b6e-29ed-489c-b9dd-6583f408ded2\" (UID: \"44f64b6e-29ed-489c-b9dd-6583f408ded2\") " Nov 28 10:18:14 crc kubenswrapper[4838]: I1128 10:18:14.773478 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/44f64b6e-29ed-489c-b9dd-6583f408ded2-sg-core-conf-yaml\") pod \"44f64b6e-29ed-489c-b9dd-6583f408ded2\" (UID: \"44f64b6e-29ed-489c-b9dd-6583f408ded2\") " Nov 28 10:18:14 crc kubenswrapper[4838]: I1128 10:18:14.773553 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/44f64b6e-29ed-489c-b9dd-6583f408ded2-scripts\") pod \"44f64b6e-29ed-489c-b9dd-6583f408ded2\" (UID: \"44f64b6e-29ed-489c-b9dd-6583f408ded2\") " Nov 28 10:18:14 crc kubenswrapper[4838]: I1128 10:18:14.773588 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/44f64b6e-29ed-489c-b9dd-6583f408ded2-combined-ca-bundle\") pod \"44f64b6e-29ed-489c-b9dd-6583f408ded2\" (UID: \"44f64b6e-29ed-489c-b9dd-6583f408ded2\") " Nov 28 10:18:14 crc kubenswrapper[4838]: I1128 10:18:14.773619 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/44f64b6e-29ed-489c-b9dd-6583f408ded2-run-httpd\") pod \"44f64b6e-29ed-489c-b9dd-6583f408ded2\" (UID: \"44f64b6e-29ed-489c-b9dd-6583f408ded2\") " Nov 28 10:18:14 crc kubenswrapper[4838]: I1128 10:18:14.773827 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/44f64b6e-29ed-489c-b9dd-6583f408ded2-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "44f64b6e-29ed-489c-b9dd-6583f408ded2" (UID: "44f64b6e-29ed-489c-b9dd-6583f408ded2"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 10:18:14 crc kubenswrapper[4838]: I1128 10:18:14.773991 4838 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/44f64b6e-29ed-489c-b9dd-6583f408ded2-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 28 10:18:14 crc kubenswrapper[4838]: I1128 10:18:14.774092 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/44f64b6e-29ed-489c-b9dd-6583f408ded2-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "44f64b6e-29ed-489c-b9dd-6583f408ded2" (UID: "44f64b6e-29ed-489c-b9dd-6583f408ded2"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 10:18:14 crc kubenswrapper[4838]: I1128 10:18:14.804982 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/44f64b6e-29ed-489c-b9dd-6583f408ded2-kube-api-access-6tk82" (OuterVolumeSpecName: "kube-api-access-6tk82") pod "44f64b6e-29ed-489c-b9dd-6583f408ded2" (UID: "44f64b6e-29ed-489c-b9dd-6583f408ded2"). InnerVolumeSpecName "kube-api-access-6tk82". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:18:14 crc kubenswrapper[4838]: I1128 10:18:14.805313 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/44f64b6e-29ed-489c-b9dd-6583f408ded2-scripts" (OuterVolumeSpecName: "scripts") pod "44f64b6e-29ed-489c-b9dd-6583f408ded2" (UID: "44f64b6e-29ed-489c-b9dd-6583f408ded2"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:18:14 crc kubenswrapper[4838]: I1128 10:18:14.849998 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/44f64b6e-29ed-489c-b9dd-6583f408ded2-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "44f64b6e-29ed-489c-b9dd-6583f408ded2" (UID: "44f64b6e-29ed-489c-b9dd-6583f408ded2"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:18:14 crc kubenswrapper[4838]: I1128 10:18:14.875015 4838 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/44f64b6e-29ed-489c-b9dd-6583f408ded2-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 10:18:14 crc kubenswrapper[4838]: I1128 10:18:14.875219 4838 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/44f64b6e-29ed-489c-b9dd-6583f408ded2-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 28 10:18:14 crc kubenswrapper[4838]: I1128 10:18:14.875299 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6tk82\" (UniqueName: \"kubernetes.io/projected/44f64b6e-29ed-489c-b9dd-6583f408ded2-kube-api-access-6tk82\") on node \"crc\" DevicePath \"\"" Nov 28 10:18:14 crc kubenswrapper[4838]: I1128 10:18:14.875378 4838 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/44f64b6e-29ed-489c-b9dd-6583f408ded2-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 28 10:18:14 crc kubenswrapper[4838]: I1128 10:18:14.908933 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/44f64b6e-29ed-489c-b9dd-6583f408ded2-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "44f64b6e-29ed-489c-b9dd-6583f408ded2" (UID: "44f64b6e-29ed-489c-b9dd-6583f408ded2"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:18:14 crc kubenswrapper[4838]: I1128 10:18:14.947647 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/44f64b6e-29ed-489c-b9dd-6583f408ded2-config-data" (OuterVolumeSpecName: "config-data") pod "44f64b6e-29ed-489c-b9dd-6583f408ded2" (UID: "44f64b6e-29ed-489c-b9dd-6583f408ded2"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:18:14 crc kubenswrapper[4838]: I1128 10:18:14.978906 4838 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/44f64b6e-29ed-489c-b9dd-6583f408ded2-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 10:18:14 crc kubenswrapper[4838]: I1128 10:18:14.978961 4838 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/44f64b6e-29ed-489c-b9dd-6583f408ded2-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 10:18:15 crc kubenswrapper[4838]: I1128 10:18:15.119464 4838 generic.go:334] "Generic (PLEG): container finished" podID="44f64b6e-29ed-489c-b9dd-6583f408ded2" containerID="0d19406fc6a9bdd827c3b90b2efcaabc064dc6a737cf2fee7b807dc060f9752a" exitCode=0 Nov 28 10:18:15 crc kubenswrapper[4838]: I1128 10:18:15.119805 4838 generic.go:334] "Generic (PLEG): container finished" podID="44f64b6e-29ed-489c-b9dd-6583f408ded2" containerID="3db582849dcfe77177c3ee0a81a7741c5a8ce9955cd30db22f85634753b2c5e6" exitCode=0 Nov 28 10:18:15 crc kubenswrapper[4838]: I1128 10:18:15.119595 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 10:18:15 crc kubenswrapper[4838]: I1128 10:18:15.119532 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"44f64b6e-29ed-489c-b9dd-6583f408ded2","Type":"ContainerDied","Data":"0d19406fc6a9bdd827c3b90b2efcaabc064dc6a737cf2fee7b807dc060f9752a"} Nov 28 10:18:15 crc kubenswrapper[4838]: I1128 10:18:15.119891 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"44f64b6e-29ed-489c-b9dd-6583f408ded2","Type":"ContainerDied","Data":"3db582849dcfe77177c3ee0a81a7741c5a8ce9955cd30db22f85634753b2c5e6"} Nov 28 10:18:15 crc kubenswrapper[4838]: I1128 10:18:15.119929 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"44f64b6e-29ed-489c-b9dd-6583f408ded2","Type":"ContainerDied","Data":"51ffca44027a606ac209193e008a4527f79fc0d594801c4a000bc0f9b8f4424b"} Nov 28 10:18:15 crc kubenswrapper[4838]: I1128 10:18:15.119971 4838 scope.go:117] "RemoveContainer" containerID="190951ff9b912c47b9c422b7d9476a85ee2cf36c57b846b6602250efb7469dc4" Nov 28 10:18:15 crc kubenswrapper[4838]: I1128 10:18:15.168045 4838 scope.go:117] "RemoveContainer" containerID="1005b82b3330b4f65be6512bf4f0de8459f9c3896be9e1d4a4f059edc0145171" Nov 28 10:18:15 crc kubenswrapper[4838]: I1128 10:18:15.194010 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 28 10:18:15 crc kubenswrapper[4838]: I1128 10:18:15.200306 4838 scope.go:117] "RemoveContainer" containerID="0d19406fc6a9bdd827c3b90b2efcaabc064dc6a737cf2fee7b807dc060f9752a" Nov 28 10:18:15 crc kubenswrapper[4838]: I1128 10:18:15.233488 4838 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 28 10:18:15 crc kubenswrapper[4838]: I1128 10:18:15.244578 4838 scope.go:117] "RemoveContainer" containerID="3db582849dcfe77177c3ee0a81a7741c5a8ce9955cd30db22f85634753b2c5e6" Nov 28 10:18:15 crc kubenswrapper[4838]: I1128 10:18:15.246314 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 28 10:18:15 crc kubenswrapper[4838]: E1128 10:18:15.246821 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="44f64b6e-29ed-489c-b9dd-6583f408ded2" containerName="sg-core" Nov 28 10:18:15 crc kubenswrapper[4838]: I1128 10:18:15.246851 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="44f64b6e-29ed-489c-b9dd-6583f408ded2" containerName="sg-core" Nov 28 10:18:15 crc kubenswrapper[4838]: E1128 10:18:15.246880 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="44f64b6e-29ed-489c-b9dd-6583f408ded2" containerName="proxy-httpd" Nov 28 10:18:15 crc kubenswrapper[4838]: I1128 10:18:15.246890 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="44f64b6e-29ed-489c-b9dd-6583f408ded2" containerName="proxy-httpd" Nov 28 10:18:15 crc kubenswrapper[4838]: E1128 10:18:15.246908 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="44f64b6e-29ed-489c-b9dd-6583f408ded2" containerName="ceilometer-notification-agent" Nov 28 10:18:15 crc kubenswrapper[4838]: I1128 10:18:15.246916 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="44f64b6e-29ed-489c-b9dd-6583f408ded2" containerName="ceilometer-notification-agent" Nov 28 10:18:15 crc kubenswrapper[4838]: E1128 10:18:15.246930 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="44f64b6e-29ed-489c-b9dd-6583f408ded2" containerName="ceilometer-central-agent" Nov 28 10:18:15 crc kubenswrapper[4838]: I1128 10:18:15.246938 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="44f64b6e-29ed-489c-b9dd-6583f408ded2" containerName="ceilometer-central-agent" Nov 28 10:18:15 crc kubenswrapper[4838]: I1128 10:18:15.247135 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="44f64b6e-29ed-489c-b9dd-6583f408ded2" containerName="ceilometer-notification-agent" Nov 28 10:18:15 crc kubenswrapper[4838]: I1128 10:18:15.247157 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="44f64b6e-29ed-489c-b9dd-6583f408ded2" containerName="ceilometer-central-agent" Nov 28 10:18:15 crc kubenswrapper[4838]: I1128 10:18:15.247186 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="44f64b6e-29ed-489c-b9dd-6583f408ded2" containerName="sg-core" Nov 28 10:18:15 crc kubenswrapper[4838]: I1128 10:18:15.247205 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="44f64b6e-29ed-489c-b9dd-6583f408ded2" containerName="proxy-httpd" Nov 28 10:18:15 crc kubenswrapper[4838]: I1128 10:18:15.249136 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 10:18:15 crc kubenswrapper[4838]: I1128 10:18:15.251921 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 28 10:18:15 crc kubenswrapper[4838]: I1128 10:18:15.252217 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 28 10:18:15 crc kubenswrapper[4838]: I1128 10:18:15.256541 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 28 10:18:15 crc kubenswrapper[4838]: I1128 10:18:15.276075 4838 scope.go:117] "RemoveContainer" containerID="190951ff9b912c47b9c422b7d9476a85ee2cf36c57b846b6602250efb7469dc4" Nov 28 10:18:15 crc kubenswrapper[4838]: E1128 10:18:15.277524 4838 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"190951ff9b912c47b9c422b7d9476a85ee2cf36c57b846b6602250efb7469dc4\": container with ID starting with 190951ff9b912c47b9c422b7d9476a85ee2cf36c57b846b6602250efb7469dc4 not found: ID does not exist" containerID="190951ff9b912c47b9c422b7d9476a85ee2cf36c57b846b6602250efb7469dc4" Nov 28 10:18:15 crc kubenswrapper[4838]: I1128 10:18:15.277561 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"190951ff9b912c47b9c422b7d9476a85ee2cf36c57b846b6602250efb7469dc4"} err="failed to get container status \"190951ff9b912c47b9c422b7d9476a85ee2cf36c57b846b6602250efb7469dc4\": rpc error: code = NotFound desc = could not find container \"190951ff9b912c47b9c422b7d9476a85ee2cf36c57b846b6602250efb7469dc4\": container with ID starting with 190951ff9b912c47b9c422b7d9476a85ee2cf36c57b846b6602250efb7469dc4 not found: ID does not exist" Nov 28 10:18:15 crc kubenswrapper[4838]: I1128 10:18:15.277585 4838 scope.go:117] "RemoveContainer" containerID="1005b82b3330b4f65be6512bf4f0de8459f9c3896be9e1d4a4f059edc0145171" Nov 28 10:18:15 crc kubenswrapper[4838]: E1128 10:18:15.277935 4838 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1005b82b3330b4f65be6512bf4f0de8459f9c3896be9e1d4a4f059edc0145171\": container with ID starting with 1005b82b3330b4f65be6512bf4f0de8459f9c3896be9e1d4a4f059edc0145171 not found: ID does not exist" containerID="1005b82b3330b4f65be6512bf4f0de8459f9c3896be9e1d4a4f059edc0145171" Nov 28 10:18:15 crc kubenswrapper[4838]: I1128 10:18:15.277964 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1005b82b3330b4f65be6512bf4f0de8459f9c3896be9e1d4a4f059edc0145171"} err="failed to get container status \"1005b82b3330b4f65be6512bf4f0de8459f9c3896be9e1d4a4f059edc0145171\": rpc error: code = NotFound desc = could not find container \"1005b82b3330b4f65be6512bf4f0de8459f9c3896be9e1d4a4f059edc0145171\": container with ID starting with 1005b82b3330b4f65be6512bf4f0de8459f9c3896be9e1d4a4f059edc0145171 not found: ID does not exist" Nov 28 10:18:15 crc kubenswrapper[4838]: I1128 10:18:15.277982 4838 scope.go:117] "RemoveContainer" containerID="0d19406fc6a9bdd827c3b90b2efcaabc064dc6a737cf2fee7b807dc060f9752a" Nov 28 10:18:15 crc kubenswrapper[4838]: E1128 10:18:15.278221 4838 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0d19406fc6a9bdd827c3b90b2efcaabc064dc6a737cf2fee7b807dc060f9752a\": container with ID starting with 0d19406fc6a9bdd827c3b90b2efcaabc064dc6a737cf2fee7b807dc060f9752a not found: ID does not exist" containerID="0d19406fc6a9bdd827c3b90b2efcaabc064dc6a737cf2fee7b807dc060f9752a" Nov 28 10:18:15 crc kubenswrapper[4838]: I1128 10:18:15.278245 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0d19406fc6a9bdd827c3b90b2efcaabc064dc6a737cf2fee7b807dc060f9752a"} err="failed to get container status \"0d19406fc6a9bdd827c3b90b2efcaabc064dc6a737cf2fee7b807dc060f9752a\": rpc error: code = NotFound desc = could not find container \"0d19406fc6a9bdd827c3b90b2efcaabc064dc6a737cf2fee7b807dc060f9752a\": container with ID starting with 0d19406fc6a9bdd827c3b90b2efcaabc064dc6a737cf2fee7b807dc060f9752a not found: ID does not exist" Nov 28 10:18:15 crc kubenswrapper[4838]: I1128 10:18:15.278259 4838 scope.go:117] "RemoveContainer" containerID="3db582849dcfe77177c3ee0a81a7741c5a8ce9955cd30db22f85634753b2c5e6" Nov 28 10:18:15 crc kubenswrapper[4838]: E1128 10:18:15.279069 4838 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3db582849dcfe77177c3ee0a81a7741c5a8ce9955cd30db22f85634753b2c5e6\": container with ID starting with 3db582849dcfe77177c3ee0a81a7741c5a8ce9955cd30db22f85634753b2c5e6 not found: ID does not exist" containerID="3db582849dcfe77177c3ee0a81a7741c5a8ce9955cd30db22f85634753b2c5e6" Nov 28 10:18:15 crc kubenswrapper[4838]: I1128 10:18:15.279097 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3db582849dcfe77177c3ee0a81a7741c5a8ce9955cd30db22f85634753b2c5e6"} err="failed to get container status \"3db582849dcfe77177c3ee0a81a7741c5a8ce9955cd30db22f85634753b2c5e6\": rpc error: code = NotFound desc = could not find container \"3db582849dcfe77177c3ee0a81a7741c5a8ce9955cd30db22f85634753b2c5e6\": container with ID starting with 3db582849dcfe77177c3ee0a81a7741c5a8ce9955cd30db22f85634753b2c5e6 not found: ID does not exist" Nov 28 10:18:15 crc kubenswrapper[4838]: I1128 10:18:15.279114 4838 scope.go:117] "RemoveContainer" containerID="190951ff9b912c47b9c422b7d9476a85ee2cf36c57b846b6602250efb7469dc4" Nov 28 10:18:15 crc kubenswrapper[4838]: I1128 10:18:15.279342 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"190951ff9b912c47b9c422b7d9476a85ee2cf36c57b846b6602250efb7469dc4"} err="failed to get container status \"190951ff9b912c47b9c422b7d9476a85ee2cf36c57b846b6602250efb7469dc4\": rpc error: code = NotFound desc = could not find container \"190951ff9b912c47b9c422b7d9476a85ee2cf36c57b846b6602250efb7469dc4\": container with ID starting with 190951ff9b912c47b9c422b7d9476a85ee2cf36c57b846b6602250efb7469dc4 not found: ID does not exist" Nov 28 10:18:15 crc kubenswrapper[4838]: I1128 10:18:15.279370 4838 scope.go:117] "RemoveContainer" containerID="1005b82b3330b4f65be6512bf4f0de8459f9c3896be9e1d4a4f059edc0145171" Nov 28 10:18:15 crc kubenswrapper[4838]: I1128 10:18:15.279566 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1005b82b3330b4f65be6512bf4f0de8459f9c3896be9e1d4a4f059edc0145171"} err="failed to get container status \"1005b82b3330b4f65be6512bf4f0de8459f9c3896be9e1d4a4f059edc0145171\": rpc error: code = NotFound desc = could not find container \"1005b82b3330b4f65be6512bf4f0de8459f9c3896be9e1d4a4f059edc0145171\": container with ID starting with 1005b82b3330b4f65be6512bf4f0de8459f9c3896be9e1d4a4f059edc0145171 not found: ID does not exist" Nov 28 10:18:15 crc kubenswrapper[4838]: I1128 10:18:15.279588 4838 scope.go:117] "RemoveContainer" containerID="0d19406fc6a9bdd827c3b90b2efcaabc064dc6a737cf2fee7b807dc060f9752a" Nov 28 10:18:15 crc kubenswrapper[4838]: I1128 10:18:15.279808 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0d19406fc6a9bdd827c3b90b2efcaabc064dc6a737cf2fee7b807dc060f9752a"} err="failed to get container status \"0d19406fc6a9bdd827c3b90b2efcaabc064dc6a737cf2fee7b807dc060f9752a\": rpc error: code = NotFound desc = could not find container \"0d19406fc6a9bdd827c3b90b2efcaabc064dc6a737cf2fee7b807dc060f9752a\": container with ID starting with 0d19406fc6a9bdd827c3b90b2efcaabc064dc6a737cf2fee7b807dc060f9752a not found: ID does not exist" Nov 28 10:18:15 crc kubenswrapper[4838]: I1128 10:18:15.279831 4838 scope.go:117] "RemoveContainer" containerID="3db582849dcfe77177c3ee0a81a7741c5a8ce9955cd30db22f85634753b2c5e6" Nov 28 10:18:15 crc kubenswrapper[4838]: I1128 10:18:15.280048 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3db582849dcfe77177c3ee0a81a7741c5a8ce9955cd30db22f85634753b2c5e6"} err="failed to get container status \"3db582849dcfe77177c3ee0a81a7741c5a8ce9955cd30db22f85634753b2c5e6\": rpc error: code = NotFound desc = could not find container \"3db582849dcfe77177c3ee0a81a7741c5a8ce9955cd30db22f85634753b2c5e6\": container with ID starting with 3db582849dcfe77177c3ee0a81a7741c5a8ce9955cd30db22f85634753b2c5e6 not found: ID does not exist" Nov 28 10:18:15 crc kubenswrapper[4838]: I1128 10:18:15.393109 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7a57f595-4e5d-4ba1-80b9-f088c464d19b-scripts\") pod \"ceilometer-0\" (UID: \"7a57f595-4e5d-4ba1-80b9-f088c464d19b\") " pod="openstack/ceilometer-0" Nov 28 10:18:15 crc kubenswrapper[4838]: I1128 10:18:15.393179 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7a57f595-4e5d-4ba1-80b9-f088c464d19b-run-httpd\") pod \"ceilometer-0\" (UID: \"7a57f595-4e5d-4ba1-80b9-f088c464d19b\") " pod="openstack/ceilometer-0" Nov 28 10:18:15 crc kubenswrapper[4838]: I1128 10:18:15.393230 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7a57f595-4e5d-4ba1-80b9-f088c464d19b-config-data\") pod \"ceilometer-0\" (UID: \"7a57f595-4e5d-4ba1-80b9-f088c464d19b\") " pod="openstack/ceilometer-0" Nov 28 10:18:15 crc kubenswrapper[4838]: I1128 10:18:15.393258 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/7a57f595-4e5d-4ba1-80b9-f088c464d19b-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"7a57f595-4e5d-4ba1-80b9-f088c464d19b\") " pod="openstack/ceilometer-0" Nov 28 10:18:15 crc kubenswrapper[4838]: I1128 10:18:15.393309 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7a57f595-4e5d-4ba1-80b9-f088c464d19b-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"7a57f595-4e5d-4ba1-80b9-f088c464d19b\") " pod="openstack/ceilometer-0" Nov 28 10:18:15 crc kubenswrapper[4838]: I1128 10:18:15.393380 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rmwm5\" (UniqueName: \"kubernetes.io/projected/7a57f595-4e5d-4ba1-80b9-f088c464d19b-kube-api-access-rmwm5\") pod \"ceilometer-0\" (UID: \"7a57f595-4e5d-4ba1-80b9-f088c464d19b\") " pod="openstack/ceilometer-0" Nov 28 10:18:15 crc kubenswrapper[4838]: I1128 10:18:15.393405 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7a57f595-4e5d-4ba1-80b9-f088c464d19b-log-httpd\") pod \"ceilometer-0\" (UID: \"7a57f595-4e5d-4ba1-80b9-f088c464d19b\") " pod="openstack/ceilometer-0" Nov 28 10:18:15 crc kubenswrapper[4838]: I1128 10:18:15.494855 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rmwm5\" (UniqueName: \"kubernetes.io/projected/7a57f595-4e5d-4ba1-80b9-f088c464d19b-kube-api-access-rmwm5\") pod \"ceilometer-0\" (UID: \"7a57f595-4e5d-4ba1-80b9-f088c464d19b\") " pod="openstack/ceilometer-0" Nov 28 10:18:15 crc kubenswrapper[4838]: I1128 10:18:15.494927 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7a57f595-4e5d-4ba1-80b9-f088c464d19b-log-httpd\") pod \"ceilometer-0\" (UID: \"7a57f595-4e5d-4ba1-80b9-f088c464d19b\") " pod="openstack/ceilometer-0" Nov 28 10:18:15 crc kubenswrapper[4838]: I1128 10:18:15.495008 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7a57f595-4e5d-4ba1-80b9-f088c464d19b-scripts\") pod \"ceilometer-0\" (UID: \"7a57f595-4e5d-4ba1-80b9-f088c464d19b\") " pod="openstack/ceilometer-0" Nov 28 10:18:15 crc kubenswrapper[4838]: I1128 10:18:15.495062 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7a57f595-4e5d-4ba1-80b9-f088c464d19b-run-httpd\") pod \"ceilometer-0\" (UID: \"7a57f595-4e5d-4ba1-80b9-f088c464d19b\") " pod="openstack/ceilometer-0" Nov 28 10:18:15 crc kubenswrapper[4838]: I1128 10:18:15.495127 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7a57f595-4e5d-4ba1-80b9-f088c464d19b-config-data\") pod \"ceilometer-0\" (UID: \"7a57f595-4e5d-4ba1-80b9-f088c464d19b\") " pod="openstack/ceilometer-0" Nov 28 10:18:15 crc kubenswrapper[4838]: I1128 10:18:15.495188 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/7a57f595-4e5d-4ba1-80b9-f088c464d19b-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"7a57f595-4e5d-4ba1-80b9-f088c464d19b\") " pod="openstack/ceilometer-0" Nov 28 10:18:15 crc kubenswrapper[4838]: I1128 10:18:15.495255 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7a57f595-4e5d-4ba1-80b9-f088c464d19b-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"7a57f595-4e5d-4ba1-80b9-f088c464d19b\") " pod="openstack/ceilometer-0" Nov 28 10:18:15 crc kubenswrapper[4838]: I1128 10:18:15.496881 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7a57f595-4e5d-4ba1-80b9-f088c464d19b-run-httpd\") pod \"ceilometer-0\" (UID: \"7a57f595-4e5d-4ba1-80b9-f088c464d19b\") " pod="openstack/ceilometer-0" Nov 28 10:18:15 crc kubenswrapper[4838]: I1128 10:18:15.496912 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7a57f595-4e5d-4ba1-80b9-f088c464d19b-log-httpd\") pod \"ceilometer-0\" (UID: \"7a57f595-4e5d-4ba1-80b9-f088c464d19b\") " pod="openstack/ceilometer-0" Nov 28 10:18:15 crc kubenswrapper[4838]: I1128 10:18:15.500470 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7a57f595-4e5d-4ba1-80b9-f088c464d19b-scripts\") pod \"ceilometer-0\" (UID: \"7a57f595-4e5d-4ba1-80b9-f088c464d19b\") " pod="openstack/ceilometer-0" Nov 28 10:18:15 crc kubenswrapper[4838]: I1128 10:18:15.500691 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7a57f595-4e5d-4ba1-80b9-f088c464d19b-config-data\") pod \"ceilometer-0\" (UID: \"7a57f595-4e5d-4ba1-80b9-f088c464d19b\") " pod="openstack/ceilometer-0" Nov 28 10:18:15 crc kubenswrapper[4838]: I1128 10:18:15.501545 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7a57f595-4e5d-4ba1-80b9-f088c464d19b-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"7a57f595-4e5d-4ba1-80b9-f088c464d19b\") " pod="openstack/ceilometer-0" Nov 28 10:18:15 crc kubenswrapper[4838]: I1128 10:18:15.503546 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/7a57f595-4e5d-4ba1-80b9-f088c464d19b-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"7a57f595-4e5d-4ba1-80b9-f088c464d19b\") " pod="openstack/ceilometer-0" Nov 28 10:18:15 crc kubenswrapper[4838]: I1128 10:18:15.523803 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rmwm5\" (UniqueName: \"kubernetes.io/projected/7a57f595-4e5d-4ba1-80b9-f088c464d19b-kube-api-access-rmwm5\") pod \"ceilometer-0\" (UID: \"7a57f595-4e5d-4ba1-80b9-f088c464d19b\") " pod="openstack/ceilometer-0" Nov 28 10:18:15 crc kubenswrapper[4838]: I1128 10:18:15.576410 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 10:18:16 crc kubenswrapper[4838]: I1128 10:18:16.124654 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 28 10:18:16 crc kubenswrapper[4838]: W1128 10:18:16.133964 4838 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7a57f595_4e5d_4ba1_80b9_f088c464d19b.slice/crio-9ca1f8251e5e15f3c32053ddcd9fd0ef42e248c210e6a0467e9dd0a4d3c994df WatchSource:0}: Error finding container 9ca1f8251e5e15f3c32053ddcd9fd0ef42e248c210e6a0467e9dd0a4d3c994df: Status 404 returned error can't find the container with id 9ca1f8251e5e15f3c32053ddcd9fd0ef42e248c210e6a0467e9dd0a4d3c994df Nov 28 10:18:16 crc kubenswrapper[4838]: I1128 10:18:16.575612 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="44f64b6e-29ed-489c-b9dd-6583f408ded2" path="/var/lib/kubelet/pods/44f64b6e-29ed-489c-b9dd-6583f408ded2/volumes" Nov 28 10:18:17 crc kubenswrapper[4838]: I1128 10:18:17.148114 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7a57f595-4e5d-4ba1-80b9-f088c464d19b","Type":"ContainerStarted","Data":"cdbde60a6c838ba6fb999d6f8f84bbd37709c136916740ee6799a3c32c048b6e"} Nov 28 10:18:17 crc kubenswrapper[4838]: I1128 10:18:17.148397 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7a57f595-4e5d-4ba1-80b9-f088c464d19b","Type":"ContainerStarted","Data":"9ca1f8251e5e15f3c32053ddcd9fd0ef42e248c210e6a0467e9dd0a4d3c994df"} Nov 28 10:18:18 crc kubenswrapper[4838]: I1128 10:18:18.159062 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7a57f595-4e5d-4ba1-80b9-f088c464d19b","Type":"ContainerStarted","Data":"aed9a7a101be420df7f969904281c5a8cd1c385df3f5786132979ad3958d38e1"} Nov 28 10:18:19 crc kubenswrapper[4838]: I1128 10:18:19.169546 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7a57f595-4e5d-4ba1-80b9-f088c464d19b","Type":"ContainerStarted","Data":"344dfd80495c7c8e55b04f0caab130aed567f1b14b83eab62798042e5d86b78f"} Nov 28 10:18:21 crc kubenswrapper[4838]: I1128 10:18:21.195875 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7a57f595-4e5d-4ba1-80b9-f088c464d19b","Type":"ContainerStarted","Data":"8b0b0a562d211dc19cc048c9613d399453345ea08642ae5b2156129635eb26d4"} Nov 28 10:18:21 crc kubenswrapper[4838]: I1128 10:18:21.198341 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 28 10:18:21 crc kubenswrapper[4838]: I1128 10:18:21.204007 4838 generic.go:334] "Generic (PLEG): container finished" podID="61223154-98ac-45e7-af63-714d3c3cb7d7" containerID="52f4d7bae6dd59014073affb4fb7b22afdce4f3fb975f9f5ee9c730f8edadab0" exitCode=0 Nov 28 10:18:21 crc kubenswrapper[4838]: I1128 10:18:21.204196 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-84ph6" event={"ID":"61223154-98ac-45e7-af63-714d3c3cb7d7","Type":"ContainerDied","Data":"52f4d7bae6dd59014073affb4fb7b22afdce4f3fb975f9f5ee9c730f8edadab0"} Nov 28 10:18:21 crc kubenswrapper[4838]: I1128 10:18:21.252263 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=1.822752533 podStartE2EDuration="6.252245029s" podCreationTimestamp="2025-11-28 10:18:15 +0000 UTC" firstStartedPulling="2025-11-28 10:18:16.136839667 +0000 UTC m=+1267.835813847" lastFinishedPulling="2025-11-28 10:18:20.566332173 +0000 UTC m=+1272.265306343" observedRunningTime="2025-11-28 10:18:21.229836545 +0000 UTC m=+1272.928810755" watchObservedRunningTime="2025-11-28 10:18:21.252245029 +0000 UTC m=+1272.951219199" Nov 28 10:18:22 crc kubenswrapper[4838]: I1128 10:18:22.609245 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-84ph6" Nov 28 10:18:22 crc kubenswrapper[4838]: I1128 10:18:22.645658 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-k6thk\" (UniqueName: \"kubernetes.io/projected/61223154-98ac-45e7-af63-714d3c3cb7d7-kube-api-access-k6thk\") pod \"61223154-98ac-45e7-af63-714d3c3cb7d7\" (UID: \"61223154-98ac-45e7-af63-714d3c3cb7d7\") " Nov 28 10:18:22 crc kubenswrapper[4838]: I1128 10:18:22.645907 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/61223154-98ac-45e7-af63-714d3c3cb7d7-scripts\") pod \"61223154-98ac-45e7-af63-714d3c3cb7d7\" (UID: \"61223154-98ac-45e7-af63-714d3c3cb7d7\") " Nov 28 10:18:22 crc kubenswrapper[4838]: I1128 10:18:22.646006 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/61223154-98ac-45e7-af63-714d3c3cb7d7-combined-ca-bundle\") pod \"61223154-98ac-45e7-af63-714d3c3cb7d7\" (UID: \"61223154-98ac-45e7-af63-714d3c3cb7d7\") " Nov 28 10:18:22 crc kubenswrapper[4838]: I1128 10:18:22.646077 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/61223154-98ac-45e7-af63-714d3c3cb7d7-config-data\") pod \"61223154-98ac-45e7-af63-714d3c3cb7d7\" (UID: \"61223154-98ac-45e7-af63-714d3c3cb7d7\") " Nov 28 10:18:22 crc kubenswrapper[4838]: I1128 10:18:22.653558 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/61223154-98ac-45e7-af63-714d3c3cb7d7-scripts" (OuterVolumeSpecName: "scripts") pod "61223154-98ac-45e7-af63-714d3c3cb7d7" (UID: "61223154-98ac-45e7-af63-714d3c3cb7d7"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:18:22 crc kubenswrapper[4838]: I1128 10:18:22.653612 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/61223154-98ac-45e7-af63-714d3c3cb7d7-kube-api-access-k6thk" (OuterVolumeSpecName: "kube-api-access-k6thk") pod "61223154-98ac-45e7-af63-714d3c3cb7d7" (UID: "61223154-98ac-45e7-af63-714d3c3cb7d7"). InnerVolumeSpecName "kube-api-access-k6thk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:18:22 crc kubenswrapper[4838]: I1128 10:18:22.673359 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/61223154-98ac-45e7-af63-714d3c3cb7d7-config-data" (OuterVolumeSpecName: "config-data") pod "61223154-98ac-45e7-af63-714d3c3cb7d7" (UID: "61223154-98ac-45e7-af63-714d3c3cb7d7"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:18:22 crc kubenswrapper[4838]: I1128 10:18:22.679262 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/61223154-98ac-45e7-af63-714d3c3cb7d7-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "61223154-98ac-45e7-af63-714d3c3cb7d7" (UID: "61223154-98ac-45e7-af63-714d3c3cb7d7"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:18:22 crc kubenswrapper[4838]: I1128 10:18:22.748494 4838 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/61223154-98ac-45e7-af63-714d3c3cb7d7-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 10:18:22 crc kubenswrapper[4838]: I1128 10:18:22.748542 4838 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/61223154-98ac-45e7-af63-714d3c3cb7d7-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 10:18:22 crc kubenswrapper[4838]: I1128 10:18:22.748556 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-k6thk\" (UniqueName: \"kubernetes.io/projected/61223154-98ac-45e7-af63-714d3c3cb7d7-kube-api-access-k6thk\") on node \"crc\" DevicePath \"\"" Nov 28 10:18:22 crc kubenswrapper[4838]: I1128 10:18:22.748571 4838 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/61223154-98ac-45e7-af63-714d3c3cb7d7-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 10:18:23 crc kubenswrapper[4838]: I1128 10:18:23.233456 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-84ph6" event={"ID":"61223154-98ac-45e7-af63-714d3c3cb7d7","Type":"ContainerDied","Data":"dd496bc35dbdce52f95cddde6054e78b09a7a25341501a4b2be3a959027f7c80"} Nov 28 10:18:23 crc kubenswrapper[4838]: I1128 10:18:23.233493 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-84ph6" Nov 28 10:18:23 crc kubenswrapper[4838]: I1128 10:18:23.233518 4838 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="dd496bc35dbdce52f95cddde6054e78b09a7a25341501a4b2be3a959027f7c80" Nov 28 10:18:23 crc kubenswrapper[4838]: I1128 10:18:23.407100 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 28 10:18:23 crc kubenswrapper[4838]: E1128 10:18:23.407502 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="61223154-98ac-45e7-af63-714d3c3cb7d7" containerName="nova-cell0-conductor-db-sync" Nov 28 10:18:23 crc kubenswrapper[4838]: I1128 10:18:23.407525 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="61223154-98ac-45e7-af63-714d3c3cb7d7" containerName="nova-cell0-conductor-db-sync" Nov 28 10:18:23 crc kubenswrapper[4838]: I1128 10:18:23.407769 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="61223154-98ac-45e7-af63-714d3c3cb7d7" containerName="nova-cell0-conductor-db-sync" Nov 28 10:18:23 crc kubenswrapper[4838]: I1128 10:18:23.408327 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 28 10:18:23 crc kubenswrapper[4838]: I1128 10:18:23.412066 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Nov 28 10:18:23 crc kubenswrapper[4838]: I1128 10:18:23.414645 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-prjd6" Nov 28 10:18:23 crc kubenswrapper[4838]: I1128 10:18:23.423137 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 28 10:18:23 crc kubenswrapper[4838]: I1128 10:18:23.460521 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x7f6f\" (UniqueName: \"kubernetes.io/projected/71625902-2a8a-4e8b-beb2-faaee7714ed2-kube-api-access-x7f6f\") pod \"nova-cell0-conductor-0\" (UID: \"71625902-2a8a-4e8b-beb2-faaee7714ed2\") " pod="openstack/nova-cell0-conductor-0" Nov 28 10:18:23 crc kubenswrapper[4838]: I1128 10:18:23.460894 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/71625902-2a8a-4e8b-beb2-faaee7714ed2-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"71625902-2a8a-4e8b-beb2-faaee7714ed2\") " pod="openstack/nova-cell0-conductor-0" Nov 28 10:18:23 crc kubenswrapper[4838]: I1128 10:18:23.461023 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/71625902-2a8a-4e8b-beb2-faaee7714ed2-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"71625902-2a8a-4e8b-beb2-faaee7714ed2\") " pod="openstack/nova-cell0-conductor-0" Nov 28 10:18:23 crc kubenswrapper[4838]: I1128 10:18:23.563030 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/71625902-2a8a-4e8b-beb2-faaee7714ed2-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"71625902-2a8a-4e8b-beb2-faaee7714ed2\") " pod="openstack/nova-cell0-conductor-0" Nov 28 10:18:23 crc kubenswrapper[4838]: I1128 10:18:23.563149 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/71625902-2a8a-4e8b-beb2-faaee7714ed2-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"71625902-2a8a-4e8b-beb2-faaee7714ed2\") " pod="openstack/nova-cell0-conductor-0" Nov 28 10:18:23 crc kubenswrapper[4838]: I1128 10:18:23.563219 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x7f6f\" (UniqueName: \"kubernetes.io/projected/71625902-2a8a-4e8b-beb2-faaee7714ed2-kube-api-access-x7f6f\") pod \"nova-cell0-conductor-0\" (UID: \"71625902-2a8a-4e8b-beb2-faaee7714ed2\") " pod="openstack/nova-cell0-conductor-0" Nov 28 10:18:23 crc kubenswrapper[4838]: I1128 10:18:23.573569 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/71625902-2a8a-4e8b-beb2-faaee7714ed2-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"71625902-2a8a-4e8b-beb2-faaee7714ed2\") " pod="openstack/nova-cell0-conductor-0" Nov 28 10:18:23 crc kubenswrapper[4838]: I1128 10:18:23.578642 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/71625902-2a8a-4e8b-beb2-faaee7714ed2-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"71625902-2a8a-4e8b-beb2-faaee7714ed2\") " pod="openstack/nova-cell0-conductor-0" Nov 28 10:18:23 crc kubenswrapper[4838]: I1128 10:18:23.587174 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x7f6f\" (UniqueName: \"kubernetes.io/projected/71625902-2a8a-4e8b-beb2-faaee7714ed2-kube-api-access-x7f6f\") pod \"nova-cell0-conductor-0\" (UID: \"71625902-2a8a-4e8b-beb2-faaee7714ed2\") " pod="openstack/nova-cell0-conductor-0" Nov 28 10:18:23 crc kubenswrapper[4838]: I1128 10:18:23.725863 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 28 10:18:23 crc kubenswrapper[4838]: I1128 10:18:23.940299 4838 patch_prober.go:28] interesting pod/machine-config-daemon-5dxdd container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 10:18:23 crc kubenswrapper[4838]: I1128 10:18:23.940809 4838 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 10:18:24 crc kubenswrapper[4838]: I1128 10:18:24.260314 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 28 10:18:25 crc kubenswrapper[4838]: I1128 10:18:25.259415 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"71625902-2a8a-4e8b-beb2-faaee7714ed2","Type":"ContainerStarted","Data":"5ae0215975ea6834f9ce52f635297277d9ab6d66d0efdb768a60f74c783467e9"} Nov 28 10:18:25 crc kubenswrapper[4838]: I1128 10:18:25.259930 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"71625902-2a8a-4e8b-beb2-faaee7714ed2","Type":"ContainerStarted","Data":"cac4422ec2a04bd0de076cf0d7755af5bc4e7af622c8591a232e46c3346cc00f"} Nov 28 10:18:25 crc kubenswrapper[4838]: I1128 10:18:25.259971 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell0-conductor-0" Nov 28 10:18:25 crc kubenswrapper[4838]: I1128 10:18:25.293830 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-0" podStartSLOduration=2.293790539 podStartE2EDuration="2.293790539s" podCreationTimestamp="2025-11-28 10:18:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 10:18:25.288476726 +0000 UTC m=+1276.987450976" watchObservedRunningTime="2025-11-28 10:18:25.293790539 +0000 UTC m=+1276.992764709" Nov 28 10:18:33 crc kubenswrapper[4838]: I1128 10:18:33.768909 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell0-conductor-0" Nov 28 10:18:34 crc kubenswrapper[4838]: I1128 10:18:34.291971 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-cell-mapping-m8nk4"] Nov 28 10:18:34 crc kubenswrapper[4838]: I1128 10:18:34.294808 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-m8nk4" Nov 28 10:18:34 crc kubenswrapper[4838]: I1128 10:18:34.300418 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-scripts" Nov 28 10:18:34 crc kubenswrapper[4838]: I1128 10:18:34.300776 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-config-data" Nov 28 10:18:34 crc kubenswrapper[4838]: I1128 10:18:34.318361 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-m8nk4"] Nov 28 10:18:34 crc kubenswrapper[4838]: I1128 10:18:34.488319 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 10:18:34 crc kubenswrapper[4838]: I1128 10:18:34.490770 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 28 10:18:34 crc kubenswrapper[4838]: I1128 10:18:34.492805 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Nov 28 10:18:34 crc kubenswrapper[4838]: I1128 10:18:34.498108 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 10:18:34 crc kubenswrapper[4838]: I1128 10:18:34.499075 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mkwwb\" (UniqueName: \"kubernetes.io/projected/c7190b24-4e06-4c59-9498-33d64a31067d-kube-api-access-mkwwb\") pod \"nova-cell0-cell-mapping-m8nk4\" (UID: \"c7190b24-4e06-4c59-9498-33d64a31067d\") " pod="openstack/nova-cell0-cell-mapping-m8nk4" Nov 28 10:18:34 crc kubenswrapper[4838]: I1128 10:18:34.499148 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c7190b24-4e06-4c59-9498-33d64a31067d-scripts\") pod \"nova-cell0-cell-mapping-m8nk4\" (UID: \"c7190b24-4e06-4c59-9498-33d64a31067d\") " pod="openstack/nova-cell0-cell-mapping-m8nk4" Nov 28 10:18:34 crc kubenswrapper[4838]: I1128 10:18:34.499180 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c7190b24-4e06-4c59-9498-33d64a31067d-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-m8nk4\" (UID: \"c7190b24-4e06-4c59-9498-33d64a31067d\") " pod="openstack/nova-cell0-cell-mapping-m8nk4" Nov 28 10:18:34 crc kubenswrapper[4838]: I1128 10:18:34.499198 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c7190b24-4e06-4c59-9498-33d64a31067d-config-data\") pod \"nova-cell0-cell-mapping-m8nk4\" (UID: \"c7190b24-4e06-4c59-9498-33d64a31067d\") " pod="openstack/nova-cell0-cell-mapping-m8nk4" Nov 28 10:18:34 crc kubenswrapper[4838]: I1128 10:18:34.601149 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9mmgz\" (UniqueName: \"kubernetes.io/projected/4b719d62-400b-4141-8687-9fb7bf60e64a-kube-api-access-9mmgz\") pod \"nova-scheduler-0\" (UID: \"4b719d62-400b-4141-8687-9fb7bf60e64a\") " pod="openstack/nova-scheduler-0" Nov 28 10:18:34 crc kubenswrapper[4838]: I1128 10:18:34.601240 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mkwwb\" (UniqueName: \"kubernetes.io/projected/c7190b24-4e06-4c59-9498-33d64a31067d-kube-api-access-mkwwb\") pod \"nova-cell0-cell-mapping-m8nk4\" (UID: \"c7190b24-4e06-4c59-9498-33d64a31067d\") " pod="openstack/nova-cell0-cell-mapping-m8nk4" Nov 28 10:18:34 crc kubenswrapper[4838]: I1128 10:18:34.601330 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c7190b24-4e06-4c59-9498-33d64a31067d-scripts\") pod \"nova-cell0-cell-mapping-m8nk4\" (UID: \"c7190b24-4e06-4c59-9498-33d64a31067d\") " pod="openstack/nova-cell0-cell-mapping-m8nk4" Nov 28 10:18:34 crc kubenswrapper[4838]: I1128 10:18:34.601382 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c7190b24-4e06-4c59-9498-33d64a31067d-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-m8nk4\" (UID: \"c7190b24-4e06-4c59-9498-33d64a31067d\") " pod="openstack/nova-cell0-cell-mapping-m8nk4" Nov 28 10:18:34 crc kubenswrapper[4838]: I1128 10:18:34.601411 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c7190b24-4e06-4c59-9498-33d64a31067d-config-data\") pod \"nova-cell0-cell-mapping-m8nk4\" (UID: \"c7190b24-4e06-4c59-9498-33d64a31067d\") " pod="openstack/nova-cell0-cell-mapping-m8nk4" Nov 28 10:18:34 crc kubenswrapper[4838]: I1128 10:18:34.601441 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4b719d62-400b-4141-8687-9fb7bf60e64a-config-data\") pod \"nova-scheduler-0\" (UID: \"4b719d62-400b-4141-8687-9fb7bf60e64a\") " pod="openstack/nova-scheduler-0" Nov 28 10:18:34 crc kubenswrapper[4838]: I1128 10:18:34.601488 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4b719d62-400b-4141-8687-9fb7bf60e64a-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"4b719d62-400b-4141-8687-9fb7bf60e64a\") " pod="openstack/nova-scheduler-0" Nov 28 10:18:34 crc kubenswrapper[4838]: I1128 10:18:34.602509 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 28 10:18:34 crc kubenswrapper[4838]: I1128 10:18:34.610210 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c7190b24-4e06-4c59-9498-33d64a31067d-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-m8nk4\" (UID: \"c7190b24-4e06-4c59-9498-33d64a31067d\") " pod="openstack/nova-cell0-cell-mapping-m8nk4" Nov 28 10:18:34 crc kubenswrapper[4838]: I1128 10:18:34.610366 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c7190b24-4e06-4c59-9498-33d64a31067d-config-data\") pod \"nova-cell0-cell-mapping-m8nk4\" (UID: \"c7190b24-4e06-4c59-9498-33d64a31067d\") " pod="openstack/nova-cell0-cell-mapping-m8nk4" Nov 28 10:18:34 crc kubenswrapper[4838]: I1128 10:18:34.618879 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 28 10:18:34 crc kubenswrapper[4838]: I1128 10:18:34.618987 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 10:18:34 crc kubenswrapper[4838]: I1128 10:18:34.624888 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c7190b24-4e06-4c59-9498-33d64a31067d-scripts\") pod \"nova-cell0-cell-mapping-m8nk4\" (UID: \"c7190b24-4e06-4c59-9498-33d64a31067d\") " pod="openstack/nova-cell0-cell-mapping-m8nk4" Nov 28 10:18:34 crc kubenswrapper[4838]: I1128 10:18:34.625151 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 28 10:18:34 crc kubenswrapper[4838]: I1128 10:18:34.629474 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mkwwb\" (UniqueName: \"kubernetes.io/projected/c7190b24-4e06-4c59-9498-33d64a31067d-kube-api-access-mkwwb\") pod \"nova-cell0-cell-mapping-m8nk4\" (UID: \"c7190b24-4e06-4c59-9498-33d64a31067d\") " pod="openstack/nova-cell0-cell-mapping-m8nk4" Nov 28 10:18:34 crc kubenswrapper[4838]: I1128 10:18:34.631816 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-m8nk4" Nov 28 10:18:34 crc kubenswrapper[4838]: I1128 10:18:34.702862 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4b719d62-400b-4141-8687-9fb7bf60e64a-config-data\") pod \"nova-scheduler-0\" (UID: \"4b719d62-400b-4141-8687-9fb7bf60e64a\") " pod="openstack/nova-scheduler-0" Nov 28 10:18:34 crc kubenswrapper[4838]: I1128 10:18:34.702926 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4b719d62-400b-4141-8687-9fb7bf60e64a-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"4b719d62-400b-4141-8687-9fb7bf60e64a\") " pod="openstack/nova-scheduler-0" Nov 28 10:18:34 crc kubenswrapper[4838]: I1128 10:18:34.703004 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9mmgz\" (UniqueName: \"kubernetes.io/projected/4b719d62-400b-4141-8687-9fb7bf60e64a-kube-api-access-9mmgz\") pod \"nova-scheduler-0\" (UID: \"4b719d62-400b-4141-8687-9fb7bf60e64a\") " pod="openstack/nova-scheduler-0" Nov 28 10:18:34 crc kubenswrapper[4838]: I1128 10:18:34.708306 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4b719d62-400b-4141-8687-9fb7bf60e64a-config-data\") pod \"nova-scheduler-0\" (UID: \"4b719d62-400b-4141-8687-9fb7bf60e64a\") " pod="openstack/nova-scheduler-0" Nov 28 10:18:34 crc kubenswrapper[4838]: I1128 10:18:34.708811 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4b719d62-400b-4141-8687-9fb7bf60e64a-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"4b719d62-400b-4141-8687-9fb7bf60e64a\") " pod="openstack/nova-scheduler-0" Nov 28 10:18:34 crc kubenswrapper[4838]: I1128 10:18:34.715287 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 28 10:18:34 crc kubenswrapper[4838]: I1128 10:18:34.722267 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 10:18:34 crc kubenswrapper[4838]: I1128 10:18:34.725188 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 28 10:18:34 crc kubenswrapper[4838]: I1128 10:18:34.729113 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9mmgz\" (UniqueName: \"kubernetes.io/projected/4b719d62-400b-4141-8687-9fb7bf60e64a-kube-api-access-9mmgz\") pod \"nova-scheduler-0\" (UID: \"4b719d62-400b-4141-8687-9fb7bf60e64a\") " pod="openstack/nova-scheduler-0" Nov 28 10:18:34 crc kubenswrapper[4838]: I1128 10:18:34.738711 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 10:18:34 crc kubenswrapper[4838]: I1128 10:18:34.799535 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 28 10:18:34 crc kubenswrapper[4838]: I1128 10:18:34.805413 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-th7wp\" (UniqueName: \"kubernetes.io/projected/1e67c380-458a-497c-9230-49b8875934fd-kube-api-access-th7wp\") pod \"nova-api-0\" (UID: \"1e67c380-458a-497c-9230-49b8875934fd\") " pod="openstack/nova-api-0" Nov 28 10:18:34 crc kubenswrapper[4838]: I1128 10:18:34.805466 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1e67c380-458a-497c-9230-49b8875934fd-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"1e67c380-458a-497c-9230-49b8875934fd\") " pod="openstack/nova-api-0" Nov 28 10:18:34 crc kubenswrapper[4838]: I1128 10:18:34.805520 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1e67c380-458a-497c-9230-49b8875934fd-config-data\") pod \"nova-api-0\" (UID: \"1e67c380-458a-497c-9230-49b8875934fd\") " pod="openstack/nova-api-0" Nov 28 10:18:34 crc kubenswrapper[4838]: I1128 10:18:34.805556 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1e67c380-458a-497c-9230-49b8875934fd-logs\") pod \"nova-api-0\" (UID: \"1e67c380-458a-497c-9230-49b8875934fd\") " pod="openstack/nova-api-0" Nov 28 10:18:34 crc kubenswrapper[4838]: I1128 10:18:34.810906 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 28 10:18:34 crc kubenswrapper[4838]: I1128 10:18:34.816263 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Nov 28 10:18:34 crc kubenswrapper[4838]: I1128 10:18:34.816468 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 28 10:18:34 crc kubenswrapper[4838]: I1128 10:18:34.829548 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-566b5b7845-d47fb"] Nov 28 10:18:34 crc kubenswrapper[4838]: I1128 10:18:34.837996 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-566b5b7845-d47fb" Nov 28 10:18:34 crc kubenswrapper[4838]: I1128 10:18:34.848079 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-566b5b7845-d47fb"] Nov 28 10:18:34 crc kubenswrapper[4838]: I1128 10:18:34.866089 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 28 10:18:34 crc kubenswrapper[4838]: I1128 10:18:34.908902 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ae83c476-db75-4cf4-a6b4-8ca846d96c2c-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"ae83c476-db75-4cf4-a6b4-8ca846d96c2c\") " pod="openstack/nova-metadata-0" Nov 28 10:18:34 crc kubenswrapper[4838]: I1128 10:18:34.909097 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/44c808a6-3890-404e-8393-56982242d012-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"44c808a6-3890-404e-8393-56982242d012\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 10:18:34 crc kubenswrapper[4838]: I1128 10:18:34.909260 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-th7wp\" (UniqueName: \"kubernetes.io/projected/1e67c380-458a-497c-9230-49b8875934fd-kube-api-access-th7wp\") pod \"nova-api-0\" (UID: \"1e67c380-458a-497c-9230-49b8875934fd\") " pod="openstack/nova-api-0" Nov 28 10:18:34 crc kubenswrapper[4838]: I1128 10:18:34.909333 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5w9wt\" (UniqueName: \"kubernetes.io/projected/ae83c476-db75-4cf4-a6b4-8ca846d96c2c-kube-api-access-5w9wt\") pod \"nova-metadata-0\" (UID: \"ae83c476-db75-4cf4-a6b4-8ca846d96c2c\") " pod="openstack/nova-metadata-0" Nov 28 10:18:34 crc kubenswrapper[4838]: I1128 10:18:34.909407 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ae83c476-db75-4cf4-a6b4-8ca846d96c2c-config-data\") pod \"nova-metadata-0\" (UID: \"ae83c476-db75-4cf4-a6b4-8ca846d96c2c\") " pod="openstack/nova-metadata-0" Nov 28 10:18:34 crc kubenswrapper[4838]: I1128 10:18:34.909475 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1e67c380-458a-497c-9230-49b8875934fd-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"1e67c380-458a-497c-9230-49b8875934fd\") " pod="openstack/nova-api-0" Nov 28 10:18:34 crc kubenswrapper[4838]: I1128 10:18:34.909556 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k4ll5\" (UniqueName: \"kubernetes.io/projected/44c808a6-3890-404e-8393-56982242d012-kube-api-access-k4ll5\") pod \"nova-cell1-novncproxy-0\" (UID: \"44c808a6-3890-404e-8393-56982242d012\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 10:18:34 crc kubenswrapper[4838]: I1128 10:18:34.909647 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/44c808a6-3890-404e-8393-56982242d012-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"44c808a6-3890-404e-8393-56982242d012\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 10:18:34 crc kubenswrapper[4838]: I1128 10:18:34.909751 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1e67c380-458a-497c-9230-49b8875934fd-config-data\") pod \"nova-api-0\" (UID: \"1e67c380-458a-497c-9230-49b8875934fd\") " pod="openstack/nova-api-0" Nov 28 10:18:34 crc kubenswrapper[4838]: I1128 10:18:34.909913 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ae83c476-db75-4cf4-a6b4-8ca846d96c2c-logs\") pod \"nova-metadata-0\" (UID: \"ae83c476-db75-4cf4-a6b4-8ca846d96c2c\") " pod="openstack/nova-metadata-0" Nov 28 10:18:34 crc kubenswrapper[4838]: I1128 10:18:34.910013 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1e67c380-458a-497c-9230-49b8875934fd-logs\") pod \"nova-api-0\" (UID: \"1e67c380-458a-497c-9230-49b8875934fd\") " pod="openstack/nova-api-0" Nov 28 10:18:34 crc kubenswrapper[4838]: I1128 10:18:34.910576 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1e67c380-458a-497c-9230-49b8875934fd-logs\") pod \"nova-api-0\" (UID: \"1e67c380-458a-497c-9230-49b8875934fd\") " pod="openstack/nova-api-0" Nov 28 10:18:34 crc kubenswrapper[4838]: I1128 10:18:34.933235 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1e67c380-458a-497c-9230-49b8875934fd-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"1e67c380-458a-497c-9230-49b8875934fd\") " pod="openstack/nova-api-0" Nov 28 10:18:34 crc kubenswrapper[4838]: I1128 10:18:34.933486 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1e67c380-458a-497c-9230-49b8875934fd-config-data\") pod \"nova-api-0\" (UID: \"1e67c380-458a-497c-9230-49b8875934fd\") " pod="openstack/nova-api-0" Nov 28 10:18:34 crc kubenswrapper[4838]: I1128 10:18:34.969208 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-th7wp\" (UniqueName: \"kubernetes.io/projected/1e67c380-458a-497c-9230-49b8875934fd-kube-api-access-th7wp\") pod \"nova-api-0\" (UID: \"1e67c380-458a-497c-9230-49b8875934fd\") " pod="openstack/nova-api-0" Nov 28 10:18:35 crc kubenswrapper[4838]: I1128 10:18:35.014934 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5w9wt\" (UniqueName: \"kubernetes.io/projected/ae83c476-db75-4cf4-a6b4-8ca846d96c2c-kube-api-access-5w9wt\") pod \"nova-metadata-0\" (UID: \"ae83c476-db75-4cf4-a6b4-8ca846d96c2c\") " pod="openstack/nova-metadata-0" Nov 28 10:18:35 crc kubenswrapper[4838]: I1128 10:18:35.014989 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ae83c476-db75-4cf4-a6b4-8ca846d96c2c-config-data\") pod \"nova-metadata-0\" (UID: \"ae83c476-db75-4cf4-a6b4-8ca846d96c2c\") " pod="openstack/nova-metadata-0" Nov 28 10:18:35 crc kubenswrapper[4838]: I1128 10:18:35.015032 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g9hx9\" (UniqueName: \"kubernetes.io/projected/6b7969ed-0cb5-4696-95e0-3cd08d3fef1a-kube-api-access-g9hx9\") pod \"dnsmasq-dns-566b5b7845-d47fb\" (UID: \"6b7969ed-0cb5-4696-95e0-3cd08d3fef1a\") " pod="openstack/dnsmasq-dns-566b5b7845-d47fb" Nov 28 10:18:35 crc kubenswrapper[4838]: I1128 10:18:35.015083 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k4ll5\" (UniqueName: \"kubernetes.io/projected/44c808a6-3890-404e-8393-56982242d012-kube-api-access-k4ll5\") pod \"nova-cell1-novncproxy-0\" (UID: \"44c808a6-3890-404e-8393-56982242d012\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 10:18:35 crc kubenswrapper[4838]: I1128 10:18:35.015127 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/44c808a6-3890-404e-8393-56982242d012-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"44c808a6-3890-404e-8393-56982242d012\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 10:18:35 crc kubenswrapper[4838]: I1128 10:18:35.015166 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6b7969ed-0cb5-4696-95e0-3cd08d3fef1a-ovsdbserver-sb\") pod \"dnsmasq-dns-566b5b7845-d47fb\" (UID: \"6b7969ed-0cb5-4696-95e0-3cd08d3fef1a\") " pod="openstack/dnsmasq-dns-566b5b7845-d47fb" Nov 28 10:18:35 crc kubenswrapper[4838]: I1128 10:18:35.015203 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ae83c476-db75-4cf4-a6b4-8ca846d96c2c-logs\") pod \"nova-metadata-0\" (UID: \"ae83c476-db75-4cf4-a6b4-8ca846d96c2c\") " pod="openstack/nova-metadata-0" Nov 28 10:18:35 crc kubenswrapper[4838]: I1128 10:18:35.015234 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6b7969ed-0cb5-4696-95e0-3cd08d3fef1a-dns-svc\") pod \"dnsmasq-dns-566b5b7845-d47fb\" (UID: \"6b7969ed-0cb5-4696-95e0-3cd08d3fef1a\") " pod="openstack/dnsmasq-dns-566b5b7845-d47fb" Nov 28 10:18:35 crc kubenswrapper[4838]: I1128 10:18:35.015265 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/6b7969ed-0cb5-4696-95e0-3cd08d3fef1a-ovsdbserver-nb\") pod \"dnsmasq-dns-566b5b7845-d47fb\" (UID: \"6b7969ed-0cb5-4696-95e0-3cd08d3fef1a\") " pod="openstack/dnsmasq-dns-566b5b7845-d47fb" Nov 28 10:18:35 crc kubenswrapper[4838]: I1128 10:18:35.015335 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6b7969ed-0cb5-4696-95e0-3cd08d3fef1a-config\") pod \"dnsmasq-dns-566b5b7845-d47fb\" (UID: \"6b7969ed-0cb5-4696-95e0-3cd08d3fef1a\") " pod="openstack/dnsmasq-dns-566b5b7845-d47fb" Nov 28 10:18:35 crc kubenswrapper[4838]: I1128 10:18:35.015358 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ae83c476-db75-4cf4-a6b4-8ca846d96c2c-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"ae83c476-db75-4cf4-a6b4-8ca846d96c2c\") " pod="openstack/nova-metadata-0" Nov 28 10:18:35 crc kubenswrapper[4838]: I1128 10:18:35.015379 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/44c808a6-3890-404e-8393-56982242d012-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"44c808a6-3890-404e-8393-56982242d012\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 10:18:35 crc kubenswrapper[4838]: I1128 10:18:35.020257 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ae83c476-db75-4cf4-a6b4-8ca846d96c2c-logs\") pod \"nova-metadata-0\" (UID: \"ae83c476-db75-4cf4-a6b4-8ca846d96c2c\") " pod="openstack/nova-metadata-0" Nov 28 10:18:35 crc kubenswrapper[4838]: I1128 10:18:35.020885 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ae83c476-db75-4cf4-a6b4-8ca846d96c2c-config-data\") pod \"nova-metadata-0\" (UID: \"ae83c476-db75-4cf4-a6b4-8ca846d96c2c\") " pod="openstack/nova-metadata-0" Nov 28 10:18:35 crc kubenswrapper[4838]: I1128 10:18:35.024327 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/44c808a6-3890-404e-8393-56982242d012-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"44c808a6-3890-404e-8393-56982242d012\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 10:18:35 crc kubenswrapper[4838]: I1128 10:18:35.041278 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/44c808a6-3890-404e-8393-56982242d012-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"44c808a6-3890-404e-8393-56982242d012\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 10:18:35 crc kubenswrapper[4838]: I1128 10:18:35.049116 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k4ll5\" (UniqueName: \"kubernetes.io/projected/44c808a6-3890-404e-8393-56982242d012-kube-api-access-k4ll5\") pod \"nova-cell1-novncproxy-0\" (UID: \"44c808a6-3890-404e-8393-56982242d012\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 10:18:35 crc kubenswrapper[4838]: I1128 10:18:35.055216 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5w9wt\" (UniqueName: \"kubernetes.io/projected/ae83c476-db75-4cf4-a6b4-8ca846d96c2c-kube-api-access-5w9wt\") pod \"nova-metadata-0\" (UID: \"ae83c476-db75-4cf4-a6b4-8ca846d96c2c\") " pod="openstack/nova-metadata-0" Nov 28 10:18:35 crc kubenswrapper[4838]: I1128 10:18:35.085887 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ae83c476-db75-4cf4-a6b4-8ca846d96c2c-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"ae83c476-db75-4cf4-a6b4-8ca846d96c2c\") " pod="openstack/nova-metadata-0" Nov 28 10:18:35 crc kubenswrapper[4838]: I1128 10:18:35.118615 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6b7969ed-0cb5-4696-95e0-3cd08d3fef1a-config\") pod \"dnsmasq-dns-566b5b7845-d47fb\" (UID: \"6b7969ed-0cb5-4696-95e0-3cd08d3fef1a\") " pod="openstack/dnsmasq-dns-566b5b7845-d47fb" Nov 28 10:18:35 crc kubenswrapper[4838]: I1128 10:18:35.118702 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g9hx9\" (UniqueName: \"kubernetes.io/projected/6b7969ed-0cb5-4696-95e0-3cd08d3fef1a-kube-api-access-g9hx9\") pod \"dnsmasq-dns-566b5b7845-d47fb\" (UID: \"6b7969ed-0cb5-4696-95e0-3cd08d3fef1a\") " pod="openstack/dnsmasq-dns-566b5b7845-d47fb" Nov 28 10:18:35 crc kubenswrapper[4838]: I1128 10:18:35.118782 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6b7969ed-0cb5-4696-95e0-3cd08d3fef1a-ovsdbserver-sb\") pod \"dnsmasq-dns-566b5b7845-d47fb\" (UID: \"6b7969ed-0cb5-4696-95e0-3cd08d3fef1a\") " pod="openstack/dnsmasq-dns-566b5b7845-d47fb" Nov 28 10:18:35 crc kubenswrapper[4838]: I1128 10:18:35.118808 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6b7969ed-0cb5-4696-95e0-3cd08d3fef1a-dns-svc\") pod \"dnsmasq-dns-566b5b7845-d47fb\" (UID: \"6b7969ed-0cb5-4696-95e0-3cd08d3fef1a\") " pod="openstack/dnsmasq-dns-566b5b7845-d47fb" Nov 28 10:18:35 crc kubenswrapper[4838]: I1128 10:18:35.118834 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/6b7969ed-0cb5-4696-95e0-3cd08d3fef1a-ovsdbserver-nb\") pod \"dnsmasq-dns-566b5b7845-d47fb\" (UID: \"6b7969ed-0cb5-4696-95e0-3cd08d3fef1a\") " pod="openstack/dnsmasq-dns-566b5b7845-d47fb" Nov 28 10:18:35 crc kubenswrapper[4838]: I1128 10:18:35.119690 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/6b7969ed-0cb5-4696-95e0-3cd08d3fef1a-ovsdbserver-nb\") pod \"dnsmasq-dns-566b5b7845-d47fb\" (UID: \"6b7969ed-0cb5-4696-95e0-3cd08d3fef1a\") " pod="openstack/dnsmasq-dns-566b5b7845-d47fb" Nov 28 10:18:35 crc kubenswrapper[4838]: I1128 10:18:35.119729 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6b7969ed-0cb5-4696-95e0-3cd08d3fef1a-config\") pod \"dnsmasq-dns-566b5b7845-d47fb\" (UID: \"6b7969ed-0cb5-4696-95e0-3cd08d3fef1a\") " pod="openstack/dnsmasq-dns-566b5b7845-d47fb" Nov 28 10:18:35 crc kubenswrapper[4838]: I1128 10:18:35.120032 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6b7969ed-0cb5-4696-95e0-3cd08d3fef1a-ovsdbserver-sb\") pod \"dnsmasq-dns-566b5b7845-d47fb\" (UID: \"6b7969ed-0cb5-4696-95e0-3cd08d3fef1a\") " pod="openstack/dnsmasq-dns-566b5b7845-d47fb" Nov 28 10:18:35 crc kubenswrapper[4838]: I1128 10:18:35.120274 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6b7969ed-0cb5-4696-95e0-3cd08d3fef1a-dns-svc\") pod \"dnsmasq-dns-566b5b7845-d47fb\" (UID: \"6b7969ed-0cb5-4696-95e0-3cd08d3fef1a\") " pod="openstack/dnsmasq-dns-566b5b7845-d47fb" Nov 28 10:18:35 crc kubenswrapper[4838]: I1128 10:18:35.132021 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 10:18:35 crc kubenswrapper[4838]: I1128 10:18:35.141417 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g9hx9\" (UniqueName: \"kubernetes.io/projected/6b7969ed-0cb5-4696-95e0-3cd08d3fef1a-kube-api-access-g9hx9\") pod \"dnsmasq-dns-566b5b7845-d47fb\" (UID: \"6b7969ed-0cb5-4696-95e0-3cd08d3fef1a\") " pod="openstack/dnsmasq-dns-566b5b7845-d47fb" Nov 28 10:18:35 crc kubenswrapper[4838]: I1128 10:18:35.170041 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 28 10:18:35 crc kubenswrapper[4838]: I1128 10:18:35.180994 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 10:18:35 crc kubenswrapper[4838]: I1128 10:18:35.191606 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-566b5b7845-d47fb" Nov 28 10:18:35 crc kubenswrapper[4838]: I1128 10:18:35.354550 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-m8nk4"] Nov 28 10:18:35 crc kubenswrapper[4838]: I1128 10:18:35.432614 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-m8nk4" event={"ID":"c7190b24-4e06-4c59-9498-33d64a31067d","Type":"ContainerStarted","Data":"391a9bb8e0bcdb4ec5ea4e2fcbb73df8f2969f2cc799f5b88833977c2b43b804"} Nov 28 10:18:35 crc kubenswrapper[4838]: I1128 10:18:35.513772 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 10:18:35 crc kubenswrapper[4838]: I1128 10:18:35.570176 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-db-sync-p7qxj"] Nov 28 10:18:35 crc kubenswrapper[4838]: I1128 10:18:35.571867 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-p7qxj" Nov 28 10:18:35 crc kubenswrapper[4838]: I1128 10:18:35.574124 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-scripts" Nov 28 10:18:35 crc kubenswrapper[4838]: I1128 10:18:35.574365 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Nov 28 10:18:35 crc kubenswrapper[4838]: I1128 10:18:35.590827 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-p7qxj"] Nov 28 10:18:35 crc kubenswrapper[4838]: W1128 10:18:35.689804 4838 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1e67c380_458a_497c_9230_49b8875934fd.slice/crio-9594f5c7ae6ee5f78a059c7c50e5e19906f8d835395ae5363fc54456b1bd7498 WatchSource:0}: Error finding container 9594f5c7ae6ee5f78a059c7c50e5e19906f8d835395ae5363fc54456b1bd7498: Status 404 returned error can't find the container with id 9594f5c7ae6ee5f78a059c7c50e5e19906f8d835395ae5363fc54456b1bd7498 Nov 28 10:18:35 crc kubenswrapper[4838]: I1128 10:18:35.690812 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 28 10:18:35 crc kubenswrapper[4838]: I1128 10:18:35.732335 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2a6d8b81-2f74-472c-b289-f08fe548ec2f-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-p7qxj\" (UID: \"2a6d8b81-2f74-472c-b289-f08fe548ec2f\") " pod="openstack/nova-cell1-conductor-db-sync-p7qxj" Nov 28 10:18:35 crc kubenswrapper[4838]: I1128 10:18:35.732458 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2a6d8b81-2f74-472c-b289-f08fe548ec2f-config-data\") pod \"nova-cell1-conductor-db-sync-p7qxj\" (UID: \"2a6d8b81-2f74-472c-b289-f08fe548ec2f\") " pod="openstack/nova-cell1-conductor-db-sync-p7qxj" Nov 28 10:18:35 crc kubenswrapper[4838]: I1128 10:18:35.732661 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2a6d8b81-2f74-472c-b289-f08fe548ec2f-scripts\") pod \"nova-cell1-conductor-db-sync-p7qxj\" (UID: \"2a6d8b81-2f74-472c-b289-f08fe548ec2f\") " pod="openstack/nova-cell1-conductor-db-sync-p7qxj" Nov 28 10:18:35 crc kubenswrapper[4838]: I1128 10:18:35.732705 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dknw8\" (UniqueName: \"kubernetes.io/projected/2a6d8b81-2f74-472c-b289-f08fe548ec2f-kube-api-access-dknw8\") pod \"nova-cell1-conductor-db-sync-p7qxj\" (UID: \"2a6d8b81-2f74-472c-b289-f08fe548ec2f\") " pod="openstack/nova-cell1-conductor-db-sync-p7qxj" Nov 28 10:18:35 crc kubenswrapper[4838]: I1128 10:18:35.786193 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 28 10:18:35 crc kubenswrapper[4838]: I1128 10:18:35.834162 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2a6d8b81-2f74-472c-b289-f08fe548ec2f-scripts\") pod \"nova-cell1-conductor-db-sync-p7qxj\" (UID: \"2a6d8b81-2f74-472c-b289-f08fe548ec2f\") " pod="openstack/nova-cell1-conductor-db-sync-p7qxj" Nov 28 10:18:35 crc kubenswrapper[4838]: I1128 10:18:35.834210 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dknw8\" (UniqueName: \"kubernetes.io/projected/2a6d8b81-2f74-472c-b289-f08fe548ec2f-kube-api-access-dknw8\") pod \"nova-cell1-conductor-db-sync-p7qxj\" (UID: \"2a6d8b81-2f74-472c-b289-f08fe548ec2f\") " pod="openstack/nova-cell1-conductor-db-sync-p7qxj" Nov 28 10:18:35 crc kubenswrapper[4838]: I1128 10:18:35.834326 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2a6d8b81-2f74-472c-b289-f08fe548ec2f-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-p7qxj\" (UID: \"2a6d8b81-2f74-472c-b289-f08fe548ec2f\") " pod="openstack/nova-cell1-conductor-db-sync-p7qxj" Nov 28 10:18:35 crc kubenswrapper[4838]: I1128 10:18:35.834356 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2a6d8b81-2f74-472c-b289-f08fe548ec2f-config-data\") pod \"nova-cell1-conductor-db-sync-p7qxj\" (UID: \"2a6d8b81-2f74-472c-b289-f08fe548ec2f\") " pod="openstack/nova-cell1-conductor-db-sync-p7qxj" Nov 28 10:18:35 crc kubenswrapper[4838]: I1128 10:18:35.839104 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2a6d8b81-2f74-472c-b289-f08fe548ec2f-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-p7qxj\" (UID: \"2a6d8b81-2f74-472c-b289-f08fe548ec2f\") " pod="openstack/nova-cell1-conductor-db-sync-p7qxj" Nov 28 10:18:35 crc kubenswrapper[4838]: I1128 10:18:35.839279 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2a6d8b81-2f74-472c-b289-f08fe548ec2f-scripts\") pod \"nova-cell1-conductor-db-sync-p7qxj\" (UID: \"2a6d8b81-2f74-472c-b289-f08fe548ec2f\") " pod="openstack/nova-cell1-conductor-db-sync-p7qxj" Nov 28 10:18:35 crc kubenswrapper[4838]: I1128 10:18:35.840443 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2a6d8b81-2f74-472c-b289-f08fe548ec2f-config-data\") pod \"nova-cell1-conductor-db-sync-p7qxj\" (UID: \"2a6d8b81-2f74-472c-b289-f08fe548ec2f\") " pod="openstack/nova-cell1-conductor-db-sync-p7qxj" Nov 28 10:18:35 crc kubenswrapper[4838]: I1128 10:18:35.860649 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dknw8\" (UniqueName: \"kubernetes.io/projected/2a6d8b81-2f74-472c-b289-f08fe548ec2f-kube-api-access-dknw8\") pod \"nova-cell1-conductor-db-sync-p7qxj\" (UID: \"2a6d8b81-2f74-472c-b289-f08fe548ec2f\") " pod="openstack/nova-cell1-conductor-db-sync-p7qxj" Nov 28 10:18:35 crc kubenswrapper[4838]: I1128 10:18:35.873608 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 10:18:35 crc kubenswrapper[4838]: I1128 10:18:35.881703 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-566b5b7845-d47fb"] Nov 28 10:18:35 crc kubenswrapper[4838]: W1128 10:18:35.884756 4838 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6b7969ed_0cb5_4696_95e0_3cd08d3fef1a.slice/crio-a27cbbb0ce6ef4635d6cdb3a76ab27c2ed631e1ed5db8655b58859d665f96756 WatchSource:0}: Error finding container a27cbbb0ce6ef4635d6cdb3a76ab27c2ed631e1ed5db8655b58859d665f96756: Status 404 returned error can't find the container with id a27cbbb0ce6ef4635d6cdb3a76ab27c2ed631e1ed5db8655b58859d665f96756 Nov 28 10:18:35 crc kubenswrapper[4838]: I1128 10:18:35.904128 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-p7qxj" Nov 28 10:18:36 crc kubenswrapper[4838]: I1128 10:18:36.367624 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-p7qxj"] Nov 28 10:18:36 crc kubenswrapper[4838]: I1128 10:18:36.443938 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-m8nk4" event={"ID":"c7190b24-4e06-4c59-9498-33d64a31067d","Type":"ContainerStarted","Data":"fac63343239611b1603ce78b83b375c44ea11140c340c65753db0a1c491d5f48"} Nov 28 10:18:36 crc kubenswrapper[4838]: I1128 10:18:36.446955 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"44c808a6-3890-404e-8393-56982242d012","Type":"ContainerStarted","Data":"09e9da87e0853b6acfb709e507461964cd4b6af7a4b9d8867c3ed1fae981ed50"} Nov 28 10:18:36 crc kubenswrapper[4838]: I1128 10:18:36.448553 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"4b719d62-400b-4141-8687-9fb7bf60e64a","Type":"ContainerStarted","Data":"06d5e6d93074920aeb753ed71dc9d568420cc0683107527c23d982170adb4136"} Nov 28 10:18:36 crc kubenswrapper[4838]: I1128 10:18:36.460626 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-p7qxj" event={"ID":"2a6d8b81-2f74-472c-b289-f08fe548ec2f","Type":"ContainerStarted","Data":"203951f17f0c82bd0b65c20aaef45b5b38e30d79620b510afe73fffbafa8336e"} Nov 28 10:18:36 crc kubenswrapper[4838]: I1128 10:18:36.464610 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-cell-mapping-m8nk4" podStartSLOduration=2.464592987 podStartE2EDuration="2.464592987s" podCreationTimestamp="2025-11-28 10:18:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 10:18:36.460490226 +0000 UTC m=+1288.159464396" watchObservedRunningTime="2025-11-28 10:18:36.464592987 +0000 UTC m=+1288.163567157" Nov 28 10:18:36 crc kubenswrapper[4838]: I1128 10:18:36.469474 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"1e67c380-458a-497c-9230-49b8875934fd","Type":"ContainerStarted","Data":"9594f5c7ae6ee5f78a059c7c50e5e19906f8d835395ae5363fc54456b1bd7498"} Nov 28 10:18:36 crc kubenswrapper[4838]: I1128 10:18:36.472658 4838 generic.go:334] "Generic (PLEG): container finished" podID="6b7969ed-0cb5-4696-95e0-3cd08d3fef1a" containerID="d1d95500c3c42b167a4676e3bb8cd2b37319fc347148b90bd7805f680c08efec" exitCode=0 Nov 28 10:18:36 crc kubenswrapper[4838]: I1128 10:18:36.473277 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-566b5b7845-d47fb" event={"ID":"6b7969ed-0cb5-4696-95e0-3cd08d3fef1a","Type":"ContainerDied","Data":"d1d95500c3c42b167a4676e3bb8cd2b37319fc347148b90bd7805f680c08efec"} Nov 28 10:18:36 crc kubenswrapper[4838]: I1128 10:18:36.473306 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-566b5b7845-d47fb" event={"ID":"6b7969ed-0cb5-4696-95e0-3cd08d3fef1a","Type":"ContainerStarted","Data":"a27cbbb0ce6ef4635d6cdb3a76ab27c2ed631e1ed5db8655b58859d665f96756"} Nov 28 10:18:36 crc kubenswrapper[4838]: I1128 10:18:36.478309 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"ae83c476-db75-4cf4-a6b4-8ca846d96c2c","Type":"ContainerStarted","Data":"b2f8632a7596a31da48263bc4285472ae8c9733445dd164276025f1a29782293"} Nov 28 10:18:37 crc kubenswrapper[4838]: I1128 10:18:37.494877 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-p7qxj" event={"ID":"2a6d8b81-2f74-472c-b289-f08fe548ec2f","Type":"ContainerStarted","Data":"0a4317f4ad9fe07c39b10f20cae3421c50a515a76bf5fc88db86705e2bdf6788"} Nov 28 10:18:37 crc kubenswrapper[4838]: I1128 10:18:37.507139 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-566b5b7845-d47fb" event={"ID":"6b7969ed-0cb5-4696-95e0-3cd08d3fef1a","Type":"ContainerStarted","Data":"59326f45508a6ad9a3c8b44fafadd82350df901c91732d8795919b376058b40a"} Nov 28 10:18:37 crc kubenswrapper[4838]: I1128 10:18:37.507334 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-566b5b7845-d47fb" Nov 28 10:18:37 crc kubenswrapper[4838]: I1128 10:18:37.525750 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-db-sync-p7qxj" podStartSLOduration=2.525729525 podStartE2EDuration="2.525729525s" podCreationTimestamp="2025-11-28 10:18:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 10:18:37.51220724 +0000 UTC m=+1289.211181430" watchObservedRunningTime="2025-11-28 10:18:37.525729525 +0000 UTC m=+1289.224703695" Nov 28 10:18:37 crc kubenswrapper[4838]: I1128 10:18:37.529641 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-566b5b7845-d47fb" podStartSLOduration=3.529618999 podStartE2EDuration="3.529618999s" podCreationTimestamp="2025-11-28 10:18:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 10:18:37.528182811 +0000 UTC m=+1289.227156971" watchObservedRunningTime="2025-11-28 10:18:37.529618999 +0000 UTC m=+1289.228593169" Nov 28 10:18:38 crc kubenswrapper[4838]: I1128 10:18:38.490174 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 28 10:18:38 crc kubenswrapper[4838]: I1128 10:18:38.503918 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 10:18:39 crc kubenswrapper[4838]: I1128 10:18:39.539647 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"ae83c476-db75-4cf4-a6b4-8ca846d96c2c","Type":"ContainerStarted","Data":"6665b425259ed6c60fcfe8134f4c84ca73b0616ee9379c4447bf0bb59766f911"} Nov 28 10:18:39 crc kubenswrapper[4838]: I1128 10:18:39.540252 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"ae83c476-db75-4cf4-a6b4-8ca846d96c2c","Type":"ContainerStarted","Data":"5131603fd5d12b352d9323ff61a63574b3fb2788c9415b18b1756696a96b1373"} Nov 28 10:18:39 crc kubenswrapper[4838]: I1128 10:18:39.539923 4838 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="ae83c476-db75-4cf4-a6b4-8ca846d96c2c" containerName="nova-metadata-log" containerID="cri-o://5131603fd5d12b352d9323ff61a63574b3fb2788c9415b18b1756696a96b1373" gracePeriod=30 Nov 28 10:18:39 crc kubenswrapper[4838]: I1128 10:18:39.540346 4838 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="ae83c476-db75-4cf4-a6b4-8ca846d96c2c" containerName="nova-metadata-metadata" containerID="cri-o://6665b425259ed6c60fcfe8134f4c84ca73b0616ee9379c4447bf0bb59766f911" gracePeriod=30 Nov 28 10:18:39 crc kubenswrapper[4838]: I1128 10:18:39.560421 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"44c808a6-3890-404e-8393-56982242d012","Type":"ContainerStarted","Data":"d30c169cc7a921e9480fae2d04340e41cef7b21788e7d827a7625c95699709f3"} Nov 28 10:18:39 crc kubenswrapper[4838]: I1128 10:18:39.560572 4838 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell1-novncproxy-0" podUID="44c808a6-3890-404e-8393-56982242d012" containerName="nova-cell1-novncproxy-novncproxy" containerID="cri-o://d30c169cc7a921e9480fae2d04340e41cef7b21788e7d827a7625c95699709f3" gracePeriod=30 Nov 28 10:18:39 crc kubenswrapper[4838]: I1128 10:18:39.566765 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"4b719d62-400b-4141-8687-9fb7bf60e64a","Type":"ContainerStarted","Data":"e922688f8823b2cc6bacf2a12c0bedea1a62e7155a3f56fa3d534e9b8165ba3e"} Nov 28 10:18:39 crc kubenswrapper[4838]: I1128 10:18:39.569264 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.620742504 podStartE2EDuration="5.569254448s" podCreationTimestamp="2025-11-28 10:18:34 +0000 UTC" firstStartedPulling="2025-11-28 10:18:35.866000504 +0000 UTC m=+1287.564974674" lastFinishedPulling="2025-11-28 10:18:38.814512448 +0000 UTC m=+1290.513486618" observedRunningTime="2025-11-28 10:18:39.566221516 +0000 UTC m=+1291.265195686" watchObservedRunningTime="2025-11-28 10:18:39.569254448 +0000 UTC m=+1291.268228618" Nov 28 10:18:39 crc kubenswrapper[4838]: I1128 10:18:39.581526 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"1e67c380-458a-497c-9230-49b8875934fd","Type":"ContainerStarted","Data":"822d5243ce8b193c4fb551e5e472d329d41f24450f9d6f270a6eda16010197d6"} Nov 28 10:18:39 crc kubenswrapper[4838]: I1128 10:18:39.581574 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"1e67c380-458a-497c-9230-49b8875934fd","Type":"ContainerStarted","Data":"e020355e0736927bc17c9faa3ba0f126d89347c5f4ed51b81e584db36042fa25"} Nov 28 10:18:39 crc kubenswrapper[4838]: I1128 10:18:39.590749 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=2.568709182 podStartE2EDuration="5.590734407s" podCreationTimestamp="2025-11-28 10:18:34 +0000 UTC" firstStartedPulling="2025-11-28 10:18:35.781876357 +0000 UTC m=+1287.480850527" lastFinishedPulling="2025-11-28 10:18:38.803901582 +0000 UTC m=+1290.502875752" observedRunningTime="2025-11-28 10:18:39.585322751 +0000 UTC m=+1291.284296931" watchObservedRunningTime="2025-11-28 10:18:39.590734407 +0000 UTC m=+1291.289708577" Nov 28 10:18:39 crc kubenswrapper[4838]: I1128 10:18:39.632107 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.36525932 podStartE2EDuration="5.632093082s" podCreationTimestamp="2025-11-28 10:18:34 +0000 UTC" firstStartedPulling="2025-11-28 10:18:35.525299503 +0000 UTC m=+1287.224273673" lastFinishedPulling="2025-11-28 10:18:38.792133265 +0000 UTC m=+1290.491107435" observedRunningTime="2025-11-28 10:18:39.608213748 +0000 UTC m=+1291.307187918" watchObservedRunningTime="2025-11-28 10:18:39.632093082 +0000 UTC m=+1291.331067252" Nov 28 10:18:39 crc kubenswrapper[4838]: I1128 10:18:39.632568 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.520567375 podStartE2EDuration="5.632563294s" podCreationTimestamp="2025-11-28 10:18:34 +0000 UTC" firstStartedPulling="2025-11-28 10:18:35.693468155 +0000 UTC m=+1287.392442325" lastFinishedPulling="2025-11-28 10:18:38.805464074 +0000 UTC m=+1290.504438244" observedRunningTime="2025-11-28 10:18:39.629220915 +0000 UTC m=+1291.328195085" watchObservedRunningTime="2025-11-28 10:18:39.632563294 +0000 UTC m=+1291.331537464" Nov 28 10:18:39 crc kubenswrapper[4838]: I1128 10:18:39.867736 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Nov 28 10:18:40 crc kubenswrapper[4838]: I1128 10:18:40.171389 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Nov 28 10:18:40 crc kubenswrapper[4838]: I1128 10:18:40.181619 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 28 10:18:40 crc kubenswrapper[4838]: I1128 10:18:40.181692 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 28 10:18:40 crc kubenswrapper[4838]: I1128 10:18:40.425260 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 10:18:40 crc kubenswrapper[4838]: I1128 10:18:40.545479 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ae83c476-db75-4cf4-a6b4-8ca846d96c2c-config-data\") pod \"ae83c476-db75-4cf4-a6b4-8ca846d96c2c\" (UID: \"ae83c476-db75-4cf4-a6b4-8ca846d96c2c\") " Nov 28 10:18:40 crc kubenswrapper[4838]: I1128 10:18:40.545581 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ae83c476-db75-4cf4-a6b4-8ca846d96c2c-combined-ca-bundle\") pod \"ae83c476-db75-4cf4-a6b4-8ca846d96c2c\" (UID: \"ae83c476-db75-4cf4-a6b4-8ca846d96c2c\") " Nov 28 10:18:40 crc kubenswrapper[4838]: I1128 10:18:40.545624 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5w9wt\" (UniqueName: \"kubernetes.io/projected/ae83c476-db75-4cf4-a6b4-8ca846d96c2c-kube-api-access-5w9wt\") pod \"ae83c476-db75-4cf4-a6b4-8ca846d96c2c\" (UID: \"ae83c476-db75-4cf4-a6b4-8ca846d96c2c\") " Nov 28 10:18:40 crc kubenswrapper[4838]: I1128 10:18:40.545678 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ae83c476-db75-4cf4-a6b4-8ca846d96c2c-logs\") pod \"ae83c476-db75-4cf4-a6b4-8ca846d96c2c\" (UID: \"ae83c476-db75-4cf4-a6b4-8ca846d96c2c\") " Nov 28 10:18:40 crc kubenswrapper[4838]: I1128 10:18:40.546490 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ae83c476-db75-4cf4-a6b4-8ca846d96c2c-logs" (OuterVolumeSpecName: "logs") pod "ae83c476-db75-4cf4-a6b4-8ca846d96c2c" (UID: "ae83c476-db75-4cf4-a6b4-8ca846d96c2c"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 10:18:40 crc kubenswrapper[4838]: I1128 10:18:40.551883 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ae83c476-db75-4cf4-a6b4-8ca846d96c2c-kube-api-access-5w9wt" (OuterVolumeSpecName: "kube-api-access-5w9wt") pod "ae83c476-db75-4cf4-a6b4-8ca846d96c2c" (UID: "ae83c476-db75-4cf4-a6b4-8ca846d96c2c"). InnerVolumeSpecName "kube-api-access-5w9wt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:18:40 crc kubenswrapper[4838]: I1128 10:18:40.577826 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ae83c476-db75-4cf4-a6b4-8ca846d96c2c-config-data" (OuterVolumeSpecName: "config-data") pod "ae83c476-db75-4cf4-a6b4-8ca846d96c2c" (UID: "ae83c476-db75-4cf4-a6b4-8ca846d96c2c"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:18:40 crc kubenswrapper[4838]: I1128 10:18:40.597672 4838 generic.go:334] "Generic (PLEG): container finished" podID="ae83c476-db75-4cf4-a6b4-8ca846d96c2c" containerID="6665b425259ed6c60fcfe8134f4c84ca73b0616ee9379c4447bf0bb59766f911" exitCode=0 Nov 28 10:18:40 crc kubenswrapper[4838]: I1128 10:18:40.597709 4838 generic.go:334] "Generic (PLEG): container finished" podID="ae83c476-db75-4cf4-a6b4-8ca846d96c2c" containerID="5131603fd5d12b352d9323ff61a63574b3fb2788c9415b18b1756696a96b1373" exitCode=143 Nov 28 10:18:40 crc kubenswrapper[4838]: I1128 10:18:40.597950 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"ae83c476-db75-4cf4-a6b4-8ca846d96c2c","Type":"ContainerDied","Data":"6665b425259ed6c60fcfe8134f4c84ca73b0616ee9379c4447bf0bb59766f911"} Nov 28 10:18:40 crc kubenswrapper[4838]: I1128 10:18:40.597985 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"ae83c476-db75-4cf4-a6b4-8ca846d96c2c","Type":"ContainerDied","Data":"5131603fd5d12b352d9323ff61a63574b3fb2788c9415b18b1756696a96b1373"} Nov 28 10:18:40 crc kubenswrapper[4838]: I1128 10:18:40.597999 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"ae83c476-db75-4cf4-a6b4-8ca846d96c2c","Type":"ContainerDied","Data":"b2f8632a7596a31da48263bc4285472ae8c9733445dd164276025f1a29782293"} Nov 28 10:18:40 crc kubenswrapper[4838]: I1128 10:18:40.598018 4838 scope.go:117] "RemoveContainer" containerID="6665b425259ed6c60fcfe8134f4c84ca73b0616ee9379c4447bf0bb59766f911" Nov 28 10:18:40 crc kubenswrapper[4838]: I1128 10:18:40.598435 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 10:18:40 crc kubenswrapper[4838]: I1128 10:18:40.623454 4838 scope.go:117] "RemoveContainer" containerID="5131603fd5d12b352d9323ff61a63574b3fb2788c9415b18b1756696a96b1373" Nov 28 10:18:40 crc kubenswrapper[4838]: I1128 10:18:40.629762 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ae83c476-db75-4cf4-a6b4-8ca846d96c2c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ae83c476-db75-4cf4-a6b4-8ca846d96c2c" (UID: "ae83c476-db75-4cf4-a6b4-8ca846d96c2c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:18:40 crc kubenswrapper[4838]: I1128 10:18:40.639489 4838 scope.go:117] "RemoveContainer" containerID="6665b425259ed6c60fcfe8134f4c84ca73b0616ee9379c4447bf0bb59766f911" Nov 28 10:18:40 crc kubenswrapper[4838]: E1128 10:18:40.639991 4838 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6665b425259ed6c60fcfe8134f4c84ca73b0616ee9379c4447bf0bb59766f911\": container with ID starting with 6665b425259ed6c60fcfe8134f4c84ca73b0616ee9379c4447bf0bb59766f911 not found: ID does not exist" containerID="6665b425259ed6c60fcfe8134f4c84ca73b0616ee9379c4447bf0bb59766f911" Nov 28 10:18:40 crc kubenswrapper[4838]: I1128 10:18:40.640026 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6665b425259ed6c60fcfe8134f4c84ca73b0616ee9379c4447bf0bb59766f911"} err="failed to get container status \"6665b425259ed6c60fcfe8134f4c84ca73b0616ee9379c4447bf0bb59766f911\": rpc error: code = NotFound desc = could not find container \"6665b425259ed6c60fcfe8134f4c84ca73b0616ee9379c4447bf0bb59766f911\": container with ID starting with 6665b425259ed6c60fcfe8134f4c84ca73b0616ee9379c4447bf0bb59766f911 not found: ID does not exist" Nov 28 10:18:40 crc kubenswrapper[4838]: I1128 10:18:40.640051 4838 scope.go:117] "RemoveContainer" containerID="5131603fd5d12b352d9323ff61a63574b3fb2788c9415b18b1756696a96b1373" Nov 28 10:18:40 crc kubenswrapper[4838]: E1128 10:18:40.640401 4838 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5131603fd5d12b352d9323ff61a63574b3fb2788c9415b18b1756696a96b1373\": container with ID starting with 5131603fd5d12b352d9323ff61a63574b3fb2788c9415b18b1756696a96b1373 not found: ID does not exist" containerID="5131603fd5d12b352d9323ff61a63574b3fb2788c9415b18b1756696a96b1373" Nov 28 10:18:40 crc kubenswrapper[4838]: I1128 10:18:40.640462 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5131603fd5d12b352d9323ff61a63574b3fb2788c9415b18b1756696a96b1373"} err="failed to get container status \"5131603fd5d12b352d9323ff61a63574b3fb2788c9415b18b1756696a96b1373\": rpc error: code = NotFound desc = could not find container \"5131603fd5d12b352d9323ff61a63574b3fb2788c9415b18b1756696a96b1373\": container with ID starting with 5131603fd5d12b352d9323ff61a63574b3fb2788c9415b18b1756696a96b1373 not found: ID does not exist" Nov 28 10:18:40 crc kubenswrapper[4838]: I1128 10:18:40.640500 4838 scope.go:117] "RemoveContainer" containerID="6665b425259ed6c60fcfe8134f4c84ca73b0616ee9379c4447bf0bb59766f911" Nov 28 10:18:40 crc kubenswrapper[4838]: I1128 10:18:40.640979 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6665b425259ed6c60fcfe8134f4c84ca73b0616ee9379c4447bf0bb59766f911"} err="failed to get container status \"6665b425259ed6c60fcfe8134f4c84ca73b0616ee9379c4447bf0bb59766f911\": rpc error: code = NotFound desc = could not find container \"6665b425259ed6c60fcfe8134f4c84ca73b0616ee9379c4447bf0bb59766f911\": container with ID starting with 6665b425259ed6c60fcfe8134f4c84ca73b0616ee9379c4447bf0bb59766f911 not found: ID does not exist" Nov 28 10:18:40 crc kubenswrapper[4838]: I1128 10:18:40.641017 4838 scope.go:117] "RemoveContainer" containerID="5131603fd5d12b352d9323ff61a63574b3fb2788c9415b18b1756696a96b1373" Nov 28 10:18:40 crc kubenswrapper[4838]: I1128 10:18:40.641320 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5131603fd5d12b352d9323ff61a63574b3fb2788c9415b18b1756696a96b1373"} err="failed to get container status \"5131603fd5d12b352d9323ff61a63574b3fb2788c9415b18b1756696a96b1373\": rpc error: code = NotFound desc = could not find container \"5131603fd5d12b352d9323ff61a63574b3fb2788c9415b18b1756696a96b1373\": container with ID starting with 5131603fd5d12b352d9323ff61a63574b3fb2788c9415b18b1756696a96b1373 not found: ID does not exist" Nov 28 10:18:40 crc kubenswrapper[4838]: I1128 10:18:40.648018 4838 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ae83c476-db75-4cf4-a6b4-8ca846d96c2c-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 10:18:40 crc kubenswrapper[4838]: I1128 10:18:40.648058 4838 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ae83c476-db75-4cf4-a6b4-8ca846d96c2c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 10:18:40 crc kubenswrapper[4838]: I1128 10:18:40.648082 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5w9wt\" (UniqueName: \"kubernetes.io/projected/ae83c476-db75-4cf4-a6b4-8ca846d96c2c-kube-api-access-5w9wt\") on node \"crc\" DevicePath \"\"" Nov 28 10:18:40 crc kubenswrapper[4838]: I1128 10:18:40.648101 4838 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ae83c476-db75-4cf4-a6b4-8ca846d96c2c-logs\") on node \"crc\" DevicePath \"\"" Nov 28 10:18:40 crc kubenswrapper[4838]: I1128 10:18:40.938477 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 10:18:40 crc kubenswrapper[4838]: I1128 10:18:40.954032 4838 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 10:18:40 crc kubenswrapper[4838]: I1128 10:18:40.963393 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 28 10:18:40 crc kubenswrapper[4838]: E1128 10:18:40.963913 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ae83c476-db75-4cf4-a6b4-8ca846d96c2c" containerName="nova-metadata-log" Nov 28 10:18:40 crc kubenswrapper[4838]: I1128 10:18:40.963935 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="ae83c476-db75-4cf4-a6b4-8ca846d96c2c" containerName="nova-metadata-log" Nov 28 10:18:40 crc kubenswrapper[4838]: E1128 10:18:40.963975 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ae83c476-db75-4cf4-a6b4-8ca846d96c2c" containerName="nova-metadata-metadata" Nov 28 10:18:40 crc kubenswrapper[4838]: I1128 10:18:40.963984 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="ae83c476-db75-4cf4-a6b4-8ca846d96c2c" containerName="nova-metadata-metadata" Nov 28 10:18:40 crc kubenswrapper[4838]: I1128 10:18:40.964201 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="ae83c476-db75-4cf4-a6b4-8ca846d96c2c" containerName="nova-metadata-log" Nov 28 10:18:40 crc kubenswrapper[4838]: I1128 10:18:40.964233 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="ae83c476-db75-4cf4-a6b4-8ca846d96c2c" containerName="nova-metadata-metadata" Nov 28 10:18:40 crc kubenswrapper[4838]: I1128 10:18:40.965411 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 10:18:40 crc kubenswrapper[4838]: I1128 10:18:40.967651 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 28 10:18:40 crc kubenswrapper[4838]: I1128 10:18:40.967862 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Nov 28 10:18:40 crc kubenswrapper[4838]: I1128 10:18:40.995289 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 10:18:41 crc kubenswrapper[4838]: I1128 10:18:41.057138 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/47b8d231-2896-4d59-8d9d-c070770ced98-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"47b8d231-2896-4d59-8d9d-c070770ced98\") " pod="openstack/nova-metadata-0" Nov 28 10:18:41 crc kubenswrapper[4838]: I1128 10:18:41.057314 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/47b8d231-2896-4d59-8d9d-c070770ced98-logs\") pod \"nova-metadata-0\" (UID: \"47b8d231-2896-4d59-8d9d-c070770ced98\") " pod="openstack/nova-metadata-0" Nov 28 10:18:41 crc kubenswrapper[4838]: I1128 10:18:41.057375 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/47b8d231-2896-4d59-8d9d-c070770ced98-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"47b8d231-2896-4d59-8d9d-c070770ced98\") " pod="openstack/nova-metadata-0" Nov 28 10:18:41 crc kubenswrapper[4838]: I1128 10:18:41.057419 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/47b8d231-2896-4d59-8d9d-c070770ced98-config-data\") pod \"nova-metadata-0\" (UID: \"47b8d231-2896-4d59-8d9d-c070770ced98\") " pod="openstack/nova-metadata-0" Nov 28 10:18:41 crc kubenswrapper[4838]: I1128 10:18:41.057628 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vn57f\" (UniqueName: \"kubernetes.io/projected/47b8d231-2896-4d59-8d9d-c070770ced98-kube-api-access-vn57f\") pod \"nova-metadata-0\" (UID: \"47b8d231-2896-4d59-8d9d-c070770ced98\") " pod="openstack/nova-metadata-0" Nov 28 10:18:41 crc kubenswrapper[4838]: I1128 10:18:41.159843 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/47b8d231-2896-4d59-8d9d-c070770ced98-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"47b8d231-2896-4d59-8d9d-c070770ced98\") " pod="openstack/nova-metadata-0" Nov 28 10:18:41 crc kubenswrapper[4838]: I1128 10:18:41.159934 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/47b8d231-2896-4d59-8d9d-c070770ced98-config-data\") pod \"nova-metadata-0\" (UID: \"47b8d231-2896-4d59-8d9d-c070770ced98\") " pod="openstack/nova-metadata-0" Nov 28 10:18:41 crc kubenswrapper[4838]: I1128 10:18:41.160269 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vn57f\" (UniqueName: \"kubernetes.io/projected/47b8d231-2896-4d59-8d9d-c070770ced98-kube-api-access-vn57f\") pod \"nova-metadata-0\" (UID: \"47b8d231-2896-4d59-8d9d-c070770ced98\") " pod="openstack/nova-metadata-0" Nov 28 10:18:41 crc kubenswrapper[4838]: I1128 10:18:41.160344 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/47b8d231-2896-4d59-8d9d-c070770ced98-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"47b8d231-2896-4d59-8d9d-c070770ced98\") " pod="openstack/nova-metadata-0" Nov 28 10:18:41 crc kubenswrapper[4838]: I1128 10:18:41.160482 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/47b8d231-2896-4d59-8d9d-c070770ced98-logs\") pod \"nova-metadata-0\" (UID: \"47b8d231-2896-4d59-8d9d-c070770ced98\") " pod="openstack/nova-metadata-0" Nov 28 10:18:41 crc kubenswrapper[4838]: I1128 10:18:41.161133 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/47b8d231-2896-4d59-8d9d-c070770ced98-logs\") pod \"nova-metadata-0\" (UID: \"47b8d231-2896-4d59-8d9d-c070770ced98\") " pod="openstack/nova-metadata-0" Nov 28 10:18:41 crc kubenswrapper[4838]: I1128 10:18:41.165467 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/47b8d231-2896-4d59-8d9d-c070770ced98-config-data\") pod \"nova-metadata-0\" (UID: \"47b8d231-2896-4d59-8d9d-c070770ced98\") " pod="openstack/nova-metadata-0" Nov 28 10:18:41 crc kubenswrapper[4838]: I1128 10:18:41.165903 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/47b8d231-2896-4d59-8d9d-c070770ced98-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"47b8d231-2896-4d59-8d9d-c070770ced98\") " pod="openstack/nova-metadata-0" Nov 28 10:18:41 crc kubenswrapper[4838]: I1128 10:18:41.170927 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/47b8d231-2896-4d59-8d9d-c070770ced98-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"47b8d231-2896-4d59-8d9d-c070770ced98\") " pod="openstack/nova-metadata-0" Nov 28 10:18:41 crc kubenswrapper[4838]: I1128 10:18:41.194615 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vn57f\" (UniqueName: \"kubernetes.io/projected/47b8d231-2896-4d59-8d9d-c070770ced98-kube-api-access-vn57f\") pod \"nova-metadata-0\" (UID: \"47b8d231-2896-4d59-8d9d-c070770ced98\") " pod="openstack/nova-metadata-0" Nov 28 10:18:41 crc kubenswrapper[4838]: I1128 10:18:41.294416 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 10:18:41 crc kubenswrapper[4838]: I1128 10:18:41.777883 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 10:18:41 crc kubenswrapper[4838]: W1128 10:18:41.796482 4838 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod47b8d231_2896_4d59_8d9d_c070770ced98.slice/crio-d0f0cacbae5310f69836cde2a23561b23c9859402552012234a5a407bd3f2d82 WatchSource:0}: Error finding container d0f0cacbae5310f69836cde2a23561b23c9859402552012234a5a407bd3f2d82: Status 404 returned error can't find the container with id d0f0cacbae5310f69836cde2a23561b23c9859402552012234a5a407bd3f2d82 Nov 28 10:18:42 crc kubenswrapper[4838]: I1128 10:18:42.577063 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ae83c476-db75-4cf4-a6b4-8ca846d96c2c" path="/var/lib/kubelet/pods/ae83c476-db75-4cf4-a6b4-8ca846d96c2c/volumes" Nov 28 10:18:42 crc kubenswrapper[4838]: I1128 10:18:42.624440 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"47b8d231-2896-4d59-8d9d-c070770ced98","Type":"ContainerStarted","Data":"d0f0cacbae5310f69836cde2a23561b23c9859402552012234a5a407bd3f2d82"} Nov 28 10:18:43 crc kubenswrapper[4838]: I1128 10:18:43.639130 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"47b8d231-2896-4d59-8d9d-c070770ced98","Type":"ContainerStarted","Data":"911b956f50f32d2a01591fafd01d693dbb268c7ce0c69d99a0a3ae7827494b89"} Nov 28 10:18:43 crc kubenswrapper[4838]: I1128 10:18:43.639511 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"47b8d231-2896-4d59-8d9d-c070770ced98","Type":"ContainerStarted","Data":"8e74bf28611276e3798e5cd576c1226f881251eb9cc589e1e1b17cd43de4f581"} Nov 28 10:18:43 crc kubenswrapper[4838]: I1128 10:18:43.642427 4838 generic.go:334] "Generic (PLEG): container finished" podID="c7190b24-4e06-4c59-9498-33d64a31067d" containerID="fac63343239611b1603ce78b83b375c44ea11140c340c65753db0a1c491d5f48" exitCode=0 Nov 28 10:18:43 crc kubenswrapper[4838]: I1128 10:18:43.642617 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-m8nk4" event={"ID":"c7190b24-4e06-4c59-9498-33d64a31067d","Type":"ContainerDied","Data":"fac63343239611b1603ce78b83b375c44ea11140c340c65753db0a1c491d5f48"} Nov 28 10:18:43 crc kubenswrapper[4838]: I1128 10:18:43.678651 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=3.678624997 podStartE2EDuration="3.678624997s" podCreationTimestamp="2025-11-28 10:18:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 10:18:43.665882264 +0000 UTC m=+1295.364856464" watchObservedRunningTime="2025-11-28 10:18:43.678624997 +0000 UTC m=+1295.377599177" Nov 28 10:18:44 crc kubenswrapper[4838]: I1128 10:18:44.867367 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Nov 28 10:18:44 crc kubenswrapper[4838]: I1128 10:18:44.906032 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Nov 28 10:18:45 crc kubenswrapper[4838]: I1128 10:18:45.059057 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-m8nk4" Nov 28 10:18:45 crc kubenswrapper[4838]: I1128 10:18:45.133011 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 28 10:18:45 crc kubenswrapper[4838]: I1128 10:18:45.133090 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 28 10:18:45 crc kubenswrapper[4838]: I1128 10:18:45.160989 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mkwwb\" (UniqueName: \"kubernetes.io/projected/c7190b24-4e06-4c59-9498-33d64a31067d-kube-api-access-mkwwb\") pod \"c7190b24-4e06-4c59-9498-33d64a31067d\" (UID: \"c7190b24-4e06-4c59-9498-33d64a31067d\") " Nov 28 10:18:45 crc kubenswrapper[4838]: I1128 10:18:45.161123 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c7190b24-4e06-4c59-9498-33d64a31067d-combined-ca-bundle\") pod \"c7190b24-4e06-4c59-9498-33d64a31067d\" (UID: \"c7190b24-4e06-4c59-9498-33d64a31067d\") " Nov 28 10:18:45 crc kubenswrapper[4838]: I1128 10:18:45.161229 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c7190b24-4e06-4c59-9498-33d64a31067d-config-data\") pod \"c7190b24-4e06-4c59-9498-33d64a31067d\" (UID: \"c7190b24-4e06-4c59-9498-33d64a31067d\") " Nov 28 10:18:45 crc kubenswrapper[4838]: I1128 10:18:45.161311 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c7190b24-4e06-4c59-9498-33d64a31067d-scripts\") pod \"c7190b24-4e06-4c59-9498-33d64a31067d\" (UID: \"c7190b24-4e06-4c59-9498-33d64a31067d\") " Nov 28 10:18:45 crc kubenswrapper[4838]: I1128 10:18:45.167256 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c7190b24-4e06-4c59-9498-33d64a31067d-kube-api-access-mkwwb" (OuterVolumeSpecName: "kube-api-access-mkwwb") pod "c7190b24-4e06-4c59-9498-33d64a31067d" (UID: "c7190b24-4e06-4c59-9498-33d64a31067d"). InnerVolumeSpecName "kube-api-access-mkwwb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:18:45 crc kubenswrapper[4838]: I1128 10:18:45.190786 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c7190b24-4e06-4c59-9498-33d64a31067d-scripts" (OuterVolumeSpecName: "scripts") pod "c7190b24-4e06-4c59-9498-33d64a31067d" (UID: "c7190b24-4e06-4c59-9498-33d64a31067d"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:18:45 crc kubenswrapper[4838]: I1128 10:18:45.196095 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-566b5b7845-d47fb" Nov 28 10:18:45 crc kubenswrapper[4838]: I1128 10:18:45.205975 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c7190b24-4e06-4c59-9498-33d64a31067d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c7190b24-4e06-4c59-9498-33d64a31067d" (UID: "c7190b24-4e06-4c59-9498-33d64a31067d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:18:45 crc kubenswrapper[4838]: I1128 10:18:45.235835 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c7190b24-4e06-4c59-9498-33d64a31067d-config-data" (OuterVolumeSpecName: "config-data") pod "c7190b24-4e06-4c59-9498-33d64a31067d" (UID: "c7190b24-4e06-4c59-9498-33d64a31067d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:18:45 crc kubenswrapper[4838]: I1128 10:18:45.263614 4838 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c7190b24-4e06-4c59-9498-33d64a31067d-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 10:18:45 crc kubenswrapper[4838]: I1128 10:18:45.263899 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mkwwb\" (UniqueName: \"kubernetes.io/projected/c7190b24-4e06-4c59-9498-33d64a31067d-kube-api-access-mkwwb\") on node \"crc\" DevicePath \"\"" Nov 28 10:18:45 crc kubenswrapper[4838]: I1128 10:18:45.263989 4838 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c7190b24-4e06-4c59-9498-33d64a31067d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 10:18:45 crc kubenswrapper[4838]: I1128 10:18:45.264067 4838 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c7190b24-4e06-4c59-9498-33d64a31067d-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 10:18:45 crc kubenswrapper[4838]: I1128 10:18:45.287836 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6d97fcdd8f-j2pfr"] Nov 28 10:18:45 crc kubenswrapper[4838]: I1128 10:18:45.288395 4838 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-6d97fcdd8f-j2pfr" podUID="4a49a5cf-ca4f-4e48-bec8-8d5a728d8025" containerName="dnsmasq-dns" containerID="cri-o://495079ccddde65fce19fddf29ac8785094e6ce65cdbd107c52813d57e5d4c8ff" gracePeriod=10 Nov 28 10:18:45 crc kubenswrapper[4838]: I1128 10:18:45.586224 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Nov 28 10:18:45 crc kubenswrapper[4838]: I1128 10:18:45.663285 4838 generic.go:334] "Generic (PLEG): container finished" podID="4a49a5cf-ca4f-4e48-bec8-8d5a728d8025" containerID="495079ccddde65fce19fddf29ac8785094e6ce65cdbd107c52813d57e5d4c8ff" exitCode=0 Nov 28 10:18:45 crc kubenswrapper[4838]: I1128 10:18:45.663360 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6d97fcdd8f-j2pfr" event={"ID":"4a49a5cf-ca4f-4e48-bec8-8d5a728d8025","Type":"ContainerDied","Data":"495079ccddde65fce19fddf29ac8785094e6ce65cdbd107c52813d57e5d4c8ff"} Nov 28 10:18:45 crc kubenswrapper[4838]: I1128 10:18:45.664694 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-m8nk4" event={"ID":"c7190b24-4e06-4c59-9498-33d64a31067d","Type":"ContainerDied","Data":"391a9bb8e0bcdb4ec5ea4e2fcbb73df8f2969f2cc799f5b88833977c2b43b804"} Nov 28 10:18:45 crc kubenswrapper[4838]: I1128 10:18:45.664741 4838 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="391a9bb8e0bcdb4ec5ea4e2fcbb73df8f2969f2cc799f5b88833977c2b43b804" Nov 28 10:18:45 crc kubenswrapper[4838]: I1128 10:18:45.664831 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-m8nk4" Nov 28 10:18:45 crc kubenswrapper[4838]: I1128 10:18:45.672234 4838 generic.go:334] "Generic (PLEG): container finished" podID="2a6d8b81-2f74-472c-b289-f08fe548ec2f" containerID="0a4317f4ad9fe07c39b10f20cae3421c50a515a76bf5fc88db86705e2bdf6788" exitCode=0 Nov 28 10:18:45 crc kubenswrapper[4838]: I1128 10:18:45.672569 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-p7qxj" event={"ID":"2a6d8b81-2f74-472c-b289-f08fe548ec2f","Type":"ContainerDied","Data":"0a4317f4ad9fe07c39b10f20cae3421c50a515a76bf5fc88db86705e2bdf6788"} Nov 28 10:18:45 crc kubenswrapper[4838]: I1128 10:18:45.707211 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Nov 28 10:18:45 crc kubenswrapper[4838]: I1128 10:18:45.898603 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 28 10:18:45 crc kubenswrapper[4838]: I1128 10:18:45.898892 4838 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="1e67c380-458a-497c-9230-49b8875934fd" containerName="nova-api-log" containerID="cri-o://e020355e0736927bc17c9faa3ba0f126d89347c5f4ed51b81e584db36042fa25" gracePeriod=30 Nov 28 10:18:45 crc kubenswrapper[4838]: I1128 10:18:45.899405 4838 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="1e67c380-458a-497c-9230-49b8875934fd" containerName="nova-api-api" containerID="cri-o://822d5243ce8b193c4fb551e5e472d329d41f24450f9d6f270a6eda16010197d6" gracePeriod=30 Nov 28 10:18:45 crc kubenswrapper[4838]: I1128 10:18:45.906424 4838 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="1e67c380-458a-497c-9230-49b8875934fd" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.0.170:8774/\": EOF" Nov 28 10:18:45 crc kubenswrapper[4838]: I1128 10:18:45.906500 4838 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="1e67c380-458a-497c-9230-49b8875934fd" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.0.170:8774/\": EOF" Nov 28 10:18:45 crc kubenswrapper[4838]: I1128 10:18:45.936084 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 10:18:45 crc kubenswrapper[4838]: I1128 10:18:45.936620 4838 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="47b8d231-2896-4d59-8d9d-c070770ced98" containerName="nova-metadata-log" containerID="cri-o://8e74bf28611276e3798e5cd576c1226f881251eb9cc589e1e1b17cd43de4f581" gracePeriod=30 Nov 28 10:18:45 crc kubenswrapper[4838]: I1128 10:18:45.937156 4838 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="47b8d231-2896-4d59-8d9d-c070770ced98" containerName="nova-metadata-metadata" containerID="cri-o://911b956f50f32d2a01591fafd01d693dbb268c7ce0c69d99a0a3ae7827494b89" gracePeriod=30 Nov 28 10:18:46 crc kubenswrapper[4838]: I1128 10:18:46.095852 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6d97fcdd8f-j2pfr" Nov 28 10:18:46 crc kubenswrapper[4838]: I1128 10:18:46.283772 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4a49a5cf-ca4f-4e48-bec8-8d5a728d8025-dns-svc\") pod \"4a49a5cf-ca4f-4e48-bec8-8d5a728d8025\" (UID: \"4a49a5cf-ca4f-4e48-bec8-8d5a728d8025\") " Nov 28 10:18:46 crc kubenswrapper[4838]: I1128 10:18:46.283877 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4a49a5cf-ca4f-4e48-bec8-8d5a728d8025-config\") pod \"4a49a5cf-ca4f-4e48-bec8-8d5a728d8025\" (UID: \"4a49a5cf-ca4f-4e48-bec8-8d5a728d8025\") " Nov 28 10:18:46 crc kubenswrapper[4838]: I1128 10:18:46.283902 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/4a49a5cf-ca4f-4e48-bec8-8d5a728d8025-ovsdbserver-sb\") pod \"4a49a5cf-ca4f-4e48-bec8-8d5a728d8025\" (UID: \"4a49a5cf-ca4f-4e48-bec8-8d5a728d8025\") " Nov 28 10:18:46 crc kubenswrapper[4838]: I1128 10:18:46.283927 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/4a49a5cf-ca4f-4e48-bec8-8d5a728d8025-ovsdbserver-nb\") pod \"4a49a5cf-ca4f-4e48-bec8-8d5a728d8025\" (UID: \"4a49a5cf-ca4f-4e48-bec8-8d5a728d8025\") " Nov 28 10:18:46 crc kubenswrapper[4838]: I1128 10:18:46.283961 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m6xkl\" (UniqueName: \"kubernetes.io/projected/4a49a5cf-ca4f-4e48-bec8-8d5a728d8025-kube-api-access-m6xkl\") pod \"4a49a5cf-ca4f-4e48-bec8-8d5a728d8025\" (UID: \"4a49a5cf-ca4f-4e48-bec8-8d5a728d8025\") " Nov 28 10:18:46 crc kubenswrapper[4838]: I1128 10:18:46.306826 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 28 10:18:46 crc kubenswrapper[4838]: I1128 10:18:46.306885 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 28 10:18:46 crc kubenswrapper[4838]: I1128 10:18:46.323340 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4a49a5cf-ca4f-4e48-bec8-8d5a728d8025-kube-api-access-m6xkl" (OuterVolumeSpecName: "kube-api-access-m6xkl") pod "4a49a5cf-ca4f-4e48-bec8-8d5a728d8025" (UID: "4a49a5cf-ca4f-4e48-bec8-8d5a728d8025"). InnerVolumeSpecName "kube-api-access-m6xkl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:18:46 crc kubenswrapper[4838]: I1128 10:18:46.370633 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 10:18:46 crc kubenswrapper[4838]: I1128 10:18:46.378474 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4a49a5cf-ca4f-4e48-bec8-8d5a728d8025-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "4a49a5cf-ca4f-4e48-bec8-8d5a728d8025" (UID: "4a49a5cf-ca4f-4e48-bec8-8d5a728d8025"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 10:18:46 crc kubenswrapper[4838]: I1128 10:18:46.387400 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m6xkl\" (UniqueName: \"kubernetes.io/projected/4a49a5cf-ca4f-4e48-bec8-8d5a728d8025-kube-api-access-m6xkl\") on node \"crc\" DevicePath \"\"" Nov 28 10:18:46 crc kubenswrapper[4838]: I1128 10:18:46.387431 4838 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/4a49a5cf-ca4f-4e48-bec8-8d5a728d8025-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 28 10:18:46 crc kubenswrapper[4838]: I1128 10:18:46.406992 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4a49a5cf-ca4f-4e48-bec8-8d5a728d8025-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "4a49a5cf-ca4f-4e48-bec8-8d5a728d8025" (UID: "4a49a5cf-ca4f-4e48-bec8-8d5a728d8025"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 10:18:46 crc kubenswrapper[4838]: I1128 10:18:46.410677 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4a49a5cf-ca4f-4e48-bec8-8d5a728d8025-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "4a49a5cf-ca4f-4e48-bec8-8d5a728d8025" (UID: "4a49a5cf-ca4f-4e48-bec8-8d5a728d8025"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 10:18:46 crc kubenswrapper[4838]: I1128 10:18:46.431642 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4a49a5cf-ca4f-4e48-bec8-8d5a728d8025-config" (OuterVolumeSpecName: "config") pod "4a49a5cf-ca4f-4e48-bec8-8d5a728d8025" (UID: "4a49a5cf-ca4f-4e48-bec8-8d5a728d8025"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 10:18:46 crc kubenswrapper[4838]: I1128 10:18:46.489345 4838 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4a49a5cf-ca4f-4e48-bec8-8d5a728d8025-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 10:18:46 crc kubenswrapper[4838]: I1128 10:18:46.489391 4838 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4a49a5cf-ca4f-4e48-bec8-8d5a728d8025-config\") on node \"crc\" DevicePath \"\"" Nov 28 10:18:46 crc kubenswrapper[4838]: I1128 10:18:46.489403 4838 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/4a49a5cf-ca4f-4e48-bec8-8d5a728d8025-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 28 10:18:46 crc kubenswrapper[4838]: I1128 10:18:46.621879 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 10:18:46 crc kubenswrapper[4838]: I1128 10:18:46.693981 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6d97fcdd8f-j2pfr" Nov 28 10:18:46 crc kubenswrapper[4838]: I1128 10:18:46.694146 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6d97fcdd8f-j2pfr" event={"ID":"4a49a5cf-ca4f-4e48-bec8-8d5a728d8025","Type":"ContainerDied","Data":"eeb26e60d3bd0156e8399c98482b36bba0241e0811dbedd827ba4c6743006b6c"} Nov 28 10:18:46 crc kubenswrapper[4838]: I1128 10:18:46.694230 4838 scope.go:117] "RemoveContainer" containerID="495079ccddde65fce19fddf29ac8785094e6ce65cdbd107c52813d57e5d4c8ff" Nov 28 10:18:46 crc kubenswrapper[4838]: I1128 10:18:46.697408 4838 generic.go:334] "Generic (PLEG): container finished" podID="1e67c380-458a-497c-9230-49b8875934fd" containerID="e020355e0736927bc17c9faa3ba0f126d89347c5f4ed51b81e584db36042fa25" exitCode=143 Nov 28 10:18:46 crc kubenswrapper[4838]: I1128 10:18:46.697511 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"1e67c380-458a-497c-9230-49b8875934fd","Type":"ContainerDied","Data":"e020355e0736927bc17c9faa3ba0f126d89347c5f4ed51b81e584db36042fa25"} Nov 28 10:18:46 crc kubenswrapper[4838]: I1128 10:18:46.703682 4838 generic.go:334] "Generic (PLEG): container finished" podID="47b8d231-2896-4d59-8d9d-c070770ced98" containerID="911b956f50f32d2a01591fafd01d693dbb268c7ce0c69d99a0a3ae7827494b89" exitCode=0 Nov 28 10:18:46 crc kubenswrapper[4838]: I1128 10:18:46.703708 4838 generic.go:334] "Generic (PLEG): container finished" podID="47b8d231-2896-4d59-8d9d-c070770ced98" containerID="8e74bf28611276e3798e5cd576c1226f881251eb9cc589e1e1b17cd43de4f581" exitCode=143 Nov 28 10:18:46 crc kubenswrapper[4838]: I1128 10:18:46.703760 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 10:18:46 crc kubenswrapper[4838]: I1128 10:18:46.703764 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"47b8d231-2896-4d59-8d9d-c070770ced98","Type":"ContainerDied","Data":"911b956f50f32d2a01591fafd01d693dbb268c7ce0c69d99a0a3ae7827494b89"} Nov 28 10:18:46 crc kubenswrapper[4838]: I1128 10:18:46.703811 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"47b8d231-2896-4d59-8d9d-c070770ced98","Type":"ContainerDied","Data":"8e74bf28611276e3798e5cd576c1226f881251eb9cc589e1e1b17cd43de4f581"} Nov 28 10:18:46 crc kubenswrapper[4838]: I1128 10:18:46.703827 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"47b8d231-2896-4d59-8d9d-c070770ced98","Type":"ContainerDied","Data":"d0f0cacbae5310f69836cde2a23561b23c9859402552012234a5a407bd3f2d82"} Nov 28 10:18:46 crc kubenswrapper[4838]: I1128 10:18:46.717986 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6d97fcdd8f-j2pfr"] Nov 28 10:18:46 crc kubenswrapper[4838]: I1128 10:18:46.723352 4838 scope.go:117] "RemoveContainer" containerID="9841777dfd5f372db2627f815f5f99aba9243f8c35abda8e632440b50e23a98c" Nov 28 10:18:46 crc kubenswrapper[4838]: I1128 10:18:46.726192 4838 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6d97fcdd8f-j2pfr"] Nov 28 10:18:46 crc kubenswrapper[4838]: I1128 10:18:46.744607 4838 scope.go:117] "RemoveContainer" containerID="911b956f50f32d2a01591fafd01d693dbb268c7ce0c69d99a0a3ae7827494b89" Nov 28 10:18:46 crc kubenswrapper[4838]: I1128 10:18:46.766533 4838 scope.go:117] "RemoveContainer" containerID="8e74bf28611276e3798e5cd576c1226f881251eb9cc589e1e1b17cd43de4f581" Nov 28 10:18:46 crc kubenswrapper[4838]: I1128 10:18:46.786444 4838 scope.go:117] "RemoveContainer" containerID="911b956f50f32d2a01591fafd01d693dbb268c7ce0c69d99a0a3ae7827494b89" Nov 28 10:18:46 crc kubenswrapper[4838]: E1128 10:18:46.791148 4838 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"911b956f50f32d2a01591fafd01d693dbb268c7ce0c69d99a0a3ae7827494b89\": container with ID starting with 911b956f50f32d2a01591fafd01d693dbb268c7ce0c69d99a0a3ae7827494b89 not found: ID does not exist" containerID="911b956f50f32d2a01591fafd01d693dbb268c7ce0c69d99a0a3ae7827494b89" Nov 28 10:18:46 crc kubenswrapper[4838]: I1128 10:18:46.791195 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"911b956f50f32d2a01591fafd01d693dbb268c7ce0c69d99a0a3ae7827494b89"} err="failed to get container status \"911b956f50f32d2a01591fafd01d693dbb268c7ce0c69d99a0a3ae7827494b89\": rpc error: code = NotFound desc = could not find container \"911b956f50f32d2a01591fafd01d693dbb268c7ce0c69d99a0a3ae7827494b89\": container with ID starting with 911b956f50f32d2a01591fafd01d693dbb268c7ce0c69d99a0a3ae7827494b89 not found: ID does not exist" Nov 28 10:18:46 crc kubenswrapper[4838]: I1128 10:18:46.791221 4838 scope.go:117] "RemoveContainer" containerID="8e74bf28611276e3798e5cd576c1226f881251eb9cc589e1e1b17cd43de4f581" Nov 28 10:18:46 crc kubenswrapper[4838]: I1128 10:18:46.793734 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/47b8d231-2896-4d59-8d9d-c070770ced98-combined-ca-bundle\") pod \"47b8d231-2896-4d59-8d9d-c070770ced98\" (UID: \"47b8d231-2896-4d59-8d9d-c070770ced98\") " Nov 28 10:18:46 crc kubenswrapper[4838]: I1128 10:18:46.793962 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/47b8d231-2896-4d59-8d9d-c070770ced98-config-data\") pod \"47b8d231-2896-4d59-8d9d-c070770ced98\" (UID: \"47b8d231-2896-4d59-8d9d-c070770ced98\") " Nov 28 10:18:46 crc kubenswrapper[4838]: I1128 10:18:46.794005 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/47b8d231-2896-4d59-8d9d-c070770ced98-logs\") pod \"47b8d231-2896-4d59-8d9d-c070770ced98\" (UID: \"47b8d231-2896-4d59-8d9d-c070770ced98\") " Nov 28 10:18:46 crc kubenswrapper[4838]: I1128 10:18:46.794031 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/47b8d231-2896-4d59-8d9d-c070770ced98-nova-metadata-tls-certs\") pod \"47b8d231-2896-4d59-8d9d-c070770ced98\" (UID: \"47b8d231-2896-4d59-8d9d-c070770ced98\") " Nov 28 10:18:46 crc kubenswrapper[4838]: I1128 10:18:46.794078 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vn57f\" (UniqueName: \"kubernetes.io/projected/47b8d231-2896-4d59-8d9d-c070770ced98-kube-api-access-vn57f\") pod \"47b8d231-2896-4d59-8d9d-c070770ced98\" (UID: \"47b8d231-2896-4d59-8d9d-c070770ced98\") " Nov 28 10:18:46 crc kubenswrapper[4838]: E1128 10:18:46.795327 4838 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8e74bf28611276e3798e5cd576c1226f881251eb9cc589e1e1b17cd43de4f581\": container with ID starting with 8e74bf28611276e3798e5cd576c1226f881251eb9cc589e1e1b17cd43de4f581 not found: ID does not exist" containerID="8e74bf28611276e3798e5cd576c1226f881251eb9cc589e1e1b17cd43de4f581" Nov 28 10:18:46 crc kubenswrapper[4838]: I1128 10:18:46.795360 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8e74bf28611276e3798e5cd576c1226f881251eb9cc589e1e1b17cd43de4f581"} err="failed to get container status \"8e74bf28611276e3798e5cd576c1226f881251eb9cc589e1e1b17cd43de4f581\": rpc error: code = NotFound desc = could not find container \"8e74bf28611276e3798e5cd576c1226f881251eb9cc589e1e1b17cd43de4f581\": container with ID starting with 8e74bf28611276e3798e5cd576c1226f881251eb9cc589e1e1b17cd43de4f581 not found: ID does not exist" Nov 28 10:18:46 crc kubenswrapper[4838]: I1128 10:18:46.795382 4838 scope.go:117] "RemoveContainer" containerID="911b956f50f32d2a01591fafd01d693dbb268c7ce0c69d99a0a3ae7827494b89" Nov 28 10:18:46 crc kubenswrapper[4838]: I1128 10:18:46.796171 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/47b8d231-2896-4d59-8d9d-c070770ced98-logs" (OuterVolumeSpecName: "logs") pod "47b8d231-2896-4d59-8d9d-c070770ced98" (UID: "47b8d231-2896-4d59-8d9d-c070770ced98"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 10:18:46 crc kubenswrapper[4838]: I1128 10:18:46.797327 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"911b956f50f32d2a01591fafd01d693dbb268c7ce0c69d99a0a3ae7827494b89"} err="failed to get container status \"911b956f50f32d2a01591fafd01d693dbb268c7ce0c69d99a0a3ae7827494b89\": rpc error: code = NotFound desc = could not find container \"911b956f50f32d2a01591fafd01d693dbb268c7ce0c69d99a0a3ae7827494b89\": container with ID starting with 911b956f50f32d2a01591fafd01d693dbb268c7ce0c69d99a0a3ae7827494b89 not found: ID does not exist" Nov 28 10:18:46 crc kubenswrapper[4838]: I1128 10:18:46.797375 4838 scope.go:117] "RemoveContainer" containerID="8e74bf28611276e3798e5cd576c1226f881251eb9cc589e1e1b17cd43de4f581" Nov 28 10:18:46 crc kubenswrapper[4838]: I1128 10:18:46.798496 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/47b8d231-2896-4d59-8d9d-c070770ced98-kube-api-access-vn57f" (OuterVolumeSpecName: "kube-api-access-vn57f") pod "47b8d231-2896-4d59-8d9d-c070770ced98" (UID: "47b8d231-2896-4d59-8d9d-c070770ced98"). InnerVolumeSpecName "kube-api-access-vn57f". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:18:46 crc kubenswrapper[4838]: I1128 10:18:46.801017 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8e74bf28611276e3798e5cd576c1226f881251eb9cc589e1e1b17cd43de4f581"} err="failed to get container status \"8e74bf28611276e3798e5cd576c1226f881251eb9cc589e1e1b17cd43de4f581\": rpc error: code = NotFound desc = could not find container \"8e74bf28611276e3798e5cd576c1226f881251eb9cc589e1e1b17cd43de4f581\": container with ID starting with 8e74bf28611276e3798e5cd576c1226f881251eb9cc589e1e1b17cd43de4f581 not found: ID does not exist" Nov 28 10:18:46 crc kubenswrapper[4838]: I1128 10:18:46.825533 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/47b8d231-2896-4d59-8d9d-c070770ced98-config-data" (OuterVolumeSpecName: "config-data") pod "47b8d231-2896-4d59-8d9d-c070770ced98" (UID: "47b8d231-2896-4d59-8d9d-c070770ced98"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:18:46 crc kubenswrapper[4838]: I1128 10:18:46.827886 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/47b8d231-2896-4d59-8d9d-c070770ced98-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "47b8d231-2896-4d59-8d9d-c070770ced98" (UID: "47b8d231-2896-4d59-8d9d-c070770ced98"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:18:46 crc kubenswrapper[4838]: I1128 10:18:46.845959 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/47b8d231-2896-4d59-8d9d-c070770ced98-nova-metadata-tls-certs" (OuterVolumeSpecName: "nova-metadata-tls-certs") pod "47b8d231-2896-4d59-8d9d-c070770ced98" (UID: "47b8d231-2896-4d59-8d9d-c070770ced98"). InnerVolumeSpecName "nova-metadata-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:18:46 crc kubenswrapper[4838]: I1128 10:18:46.896766 4838 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/47b8d231-2896-4d59-8d9d-c070770ced98-logs\") on node \"crc\" DevicePath \"\"" Nov 28 10:18:46 crc kubenswrapper[4838]: I1128 10:18:46.896795 4838 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/47b8d231-2896-4d59-8d9d-c070770ced98-nova-metadata-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 10:18:46 crc kubenswrapper[4838]: I1128 10:18:46.896807 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vn57f\" (UniqueName: \"kubernetes.io/projected/47b8d231-2896-4d59-8d9d-c070770ced98-kube-api-access-vn57f\") on node \"crc\" DevicePath \"\"" Nov 28 10:18:46 crc kubenswrapper[4838]: I1128 10:18:46.896816 4838 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/47b8d231-2896-4d59-8d9d-c070770ced98-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 10:18:46 crc kubenswrapper[4838]: I1128 10:18:46.896825 4838 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/47b8d231-2896-4d59-8d9d-c070770ced98-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 10:18:47 crc kubenswrapper[4838]: I1128 10:18:47.086142 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-p7qxj" Nov 28 10:18:47 crc kubenswrapper[4838]: I1128 10:18:47.106132 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 10:18:47 crc kubenswrapper[4838]: I1128 10:18:47.114418 4838 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 10:18:47 crc kubenswrapper[4838]: I1128 10:18:47.136262 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 28 10:18:47 crc kubenswrapper[4838]: E1128 10:18:47.137767 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="47b8d231-2896-4d59-8d9d-c070770ced98" containerName="nova-metadata-metadata" Nov 28 10:18:47 crc kubenswrapper[4838]: I1128 10:18:47.137785 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="47b8d231-2896-4d59-8d9d-c070770ced98" containerName="nova-metadata-metadata" Nov 28 10:18:47 crc kubenswrapper[4838]: E1128 10:18:47.137805 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2a6d8b81-2f74-472c-b289-f08fe548ec2f" containerName="nova-cell1-conductor-db-sync" Nov 28 10:18:47 crc kubenswrapper[4838]: I1128 10:18:47.137811 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="2a6d8b81-2f74-472c-b289-f08fe548ec2f" containerName="nova-cell1-conductor-db-sync" Nov 28 10:18:47 crc kubenswrapper[4838]: E1128 10:18:47.137823 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4a49a5cf-ca4f-4e48-bec8-8d5a728d8025" containerName="dnsmasq-dns" Nov 28 10:18:47 crc kubenswrapper[4838]: I1128 10:18:47.137829 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="4a49a5cf-ca4f-4e48-bec8-8d5a728d8025" containerName="dnsmasq-dns" Nov 28 10:18:47 crc kubenswrapper[4838]: E1128 10:18:47.137848 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c7190b24-4e06-4c59-9498-33d64a31067d" containerName="nova-manage" Nov 28 10:18:47 crc kubenswrapper[4838]: I1128 10:18:47.137854 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="c7190b24-4e06-4c59-9498-33d64a31067d" containerName="nova-manage" Nov 28 10:18:47 crc kubenswrapper[4838]: E1128 10:18:47.137875 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4a49a5cf-ca4f-4e48-bec8-8d5a728d8025" containerName="init" Nov 28 10:18:47 crc kubenswrapper[4838]: I1128 10:18:47.137880 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="4a49a5cf-ca4f-4e48-bec8-8d5a728d8025" containerName="init" Nov 28 10:18:47 crc kubenswrapper[4838]: E1128 10:18:47.137888 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="47b8d231-2896-4d59-8d9d-c070770ced98" containerName="nova-metadata-log" Nov 28 10:18:47 crc kubenswrapper[4838]: I1128 10:18:47.137894 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="47b8d231-2896-4d59-8d9d-c070770ced98" containerName="nova-metadata-log" Nov 28 10:18:47 crc kubenswrapper[4838]: I1128 10:18:47.138043 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="c7190b24-4e06-4c59-9498-33d64a31067d" containerName="nova-manage" Nov 28 10:18:47 crc kubenswrapper[4838]: I1128 10:18:47.138054 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="47b8d231-2896-4d59-8d9d-c070770ced98" containerName="nova-metadata-log" Nov 28 10:18:47 crc kubenswrapper[4838]: I1128 10:18:47.138067 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="2a6d8b81-2f74-472c-b289-f08fe548ec2f" containerName="nova-cell1-conductor-db-sync" Nov 28 10:18:47 crc kubenswrapper[4838]: I1128 10:18:47.138078 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="47b8d231-2896-4d59-8d9d-c070770ced98" containerName="nova-metadata-metadata" Nov 28 10:18:47 crc kubenswrapper[4838]: I1128 10:18:47.138091 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="4a49a5cf-ca4f-4e48-bec8-8d5a728d8025" containerName="dnsmasq-dns" Nov 28 10:18:47 crc kubenswrapper[4838]: I1128 10:18:47.139018 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 10:18:47 crc kubenswrapper[4838]: I1128 10:18:47.145256 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 28 10:18:47 crc kubenswrapper[4838]: I1128 10:18:47.145285 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Nov 28 10:18:47 crc kubenswrapper[4838]: I1128 10:18:47.148832 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 10:18:47 crc kubenswrapper[4838]: I1128 10:18:47.200528 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dknw8\" (UniqueName: \"kubernetes.io/projected/2a6d8b81-2f74-472c-b289-f08fe548ec2f-kube-api-access-dknw8\") pod \"2a6d8b81-2f74-472c-b289-f08fe548ec2f\" (UID: \"2a6d8b81-2f74-472c-b289-f08fe548ec2f\") " Nov 28 10:18:47 crc kubenswrapper[4838]: I1128 10:18:47.200683 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2a6d8b81-2f74-472c-b289-f08fe548ec2f-scripts\") pod \"2a6d8b81-2f74-472c-b289-f08fe548ec2f\" (UID: \"2a6d8b81-2f74-472c-b289-f08fe548ec2f\") " Nov 28 10:18:47 crc kubenswrapper[4838]: I1128 10:18:47.200787 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2a6d8b81-2f74-472c-b289-f08fe548ec2f-config-data\") pod \"2a6d8b81-2f74-472c-b289-f08fe548ec2f\" (UID: \"2a6d8b81-2f74-472c-b289-f08fe548ec2f\") " Nov 28 10:18:47 crc kubenswrapper[4838]: I1128 10:18:47.200830 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2a6d8b81-2f74-472c-b289-f08fe548ec2f-combined-ca-bundle\") pod \"2a6d8b81-2f74-472c-b289-f08fe548ec2f\" (UID: \"2a6d8b81-2f74-472c-b289-f08fe548ec2f\") " Nov 28 10:18:47 crc kubenswrapper[4838]: I1128 10:18:47.210895 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2a6d8b81-2f74-472c-b289-f08fe548ec2f-kube-api-access-dknw8" (OuterVolumeSpecName: "kube-api-access-dknw8") pod "2a6d8b81-2f74-472c-b289-f08fe548ec2f" (UID: "2a6d8b81-2f74-472c-b289-f08fe548ec2f"). InnerVolumeSpecName "kube-api-access-dknw8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:18:47 crc kubenswrapper[4838]: I1128 10:18:47.221666 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2a6d8b81-2f74-472c-b289-f08fe548ec2f-scripts" (OuterVolumeSpecName: "scripts") pod "2a6d8b81-2f74-472c-b289-f08fe548ec2f" (UID: "2a6d8b81-2f74-472c-b289-f08fe548ec2f"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:18:47 crc kubenswrapper[4838]: E1128 10:18:47.235257 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/2a6d8b81-2f74-472c-b289-f08fe548ec2f-config-data podName:2a6d8b81-2f74-472c-b289-f08fe548ec2f nodeName:}" failed. No retries permitted until 2025-11-28 10:18:47.735232059 +0000 UTC m=+1299.434206219 (durationBeforeRetry 500ms). Error: error cleaning subPath mounts for volume "config-data" (UniqueName: "kubernetes.io/secret/2a6d8b81-2f74-472c-b289-f08fe548ec2f-config-data") pod "2a6d8b81-2f74-472c-b289-f08fe548ec2f" (UID: "2a6d8b81-2f74-472c-b289-f08fe548ec2f") : error deleting /var/lib/kubelet/pods/2a6d8b81-2f74-472c-b289-f08fe548ec2f/volume-subpaths: remove /var/lib/kubelet/pods/2a6d8b81-2f74-472c-b289-f08fe548ec2f/volume-subpaths: no such file or directory Nov 28 10:18:47 crc kubenswrapper[4838]: I1128 10:18:47.237842 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2a6d8b81-2f74-472c-b289-f08fe548ec2f-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "2a6d8b81-2f74-472c-b289-f08fe548ec2f" (UID: "2a6d8b81-2f74-472c-b289-f08fe548ec2f"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:18:47 crc kubenswrapper[4838]: I1128 10:18:47.304001 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a7e0e2c6-e63f-40c8-8db2-e6c738371e1a-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"a7e0e2c6-e63f-40c8-8db2-e6c738371e1a\") " pod="openstack/nova-metadata-0" Nov 28 10:18:47 crc kubenswrapper[4838]: I1128 10:18:47.304039 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a7e0e2c6-e63f-40c8-8db2-e6c738371e1a-config-data\") pod \"nova-metadata-0\" (UID: \"a7e0e2c6-e63f-40c8-8db2-e6c738371e1a\") " pod="openstack/nova-metadata-0" Nov 28 10:18:47 crc kubenswrapper[4838]: I1128 10:18:47.304117 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a7e0e2c6-e63f-40c8-8db2-e6c738371e1a-logs\") pod \"nova-metadata-0\" (UID: \"a7e0e2c6-e63f-40c8-8db2-e6c738371e1a\") " pod="openstack/nova-metadata-0" Nov 28 10:18:47 crc kubenswrapper[4838]: I1128 10:18:47.304161 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-82mgz\" (UniqueName: \"kubernetes.io/projected/a7e0e2c6-e63f-40c8-8db2-e6c738371e1a-kube-api-access-82mgz\") pod \"nova-metadata-0\" (UID: \"a7e0e2c6-e63f-40c8-8db2-e6c738371e1a\") " pod="openstack/nova-metadata-0" Nov 28 10:18:47 crc kubenswrapper[4838]: I1128 10:18:47.304212 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/a7e0e2c6-e63f-40c8-8db2-e6c738371e1a-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"a7e0e2c6-e63f-40c8-8db2-e6c738371e1a\") " pod="openstack/nova-metadata-0" Nov 28 10:18:47 crc kubenswrapper[4838]: I1128 10:18:47.304256 4838 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2a6d8b81-2f74-472c-b289-f08fe548ec2f-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 10:18:47 crc kubenswrapper[4838]: I1128 10:18:47.304267 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dknw8\" (UniqueName: \"kubernetes.io/projected/2a6d8b81-2f74-472c-b289-f08fe548ec2f-kube-api-access-dknw8\") on node \"crc\" DevicePath \"\"" Nov 28 10:18:47 crc kubenswrapper[4838]: I1128 10:18:47.304278 4838 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2a6d8b81-2f74-472c-b289-f08fe548ec2f-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 10:18:47 crc kubenswrapper[4838]: I1128 10:18:47.406082 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-82mgz\" (UniqueName: \"kubernetes.io/projected/a7e0e2c6-e63f-40c8-8db2-e6c738371e1a-kube-api-access-82mgz\") pod \"nova-metadata-0\" (UID: \"a7e0e2c6-e63f-40c8-8db2-e6c738371e1a\") " pod="openstack/nova-metadata-0" Nov 28 10:18:47 crc kubenswrapper[4838]: I1128 10:18:47.406217 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/a7e0e2c6-e63f-40c8-8db2-e6c738371e1a-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"a7e0e2c6-e63f-40c8-8db2-e6c738371e1a\") " pod="openstack/nova-metadata-0" Nov 28 10:18:47 crc kubenswrapper[4838]: I1128 10:18:47.406326 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a7e0e2c6-e63f-40c8-8db2-e6c738371e1a-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"a7e0e2c6-e63f-40c8-8db2-e6c738371e1a\") " pod="openstack/nova-metadata-0" Nov 28 10:18:47 crc kubenswrapper[4838]: I1128 10:18:47.406381 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a7e0e2c6-e63f-40c8-8db2-e6c738371e1a-config-data\") pod \"nova-metadata-0\" (UID: \"a7e0e2c6-e63f-40c8-8db2-e6c738371e1a\") " pod="openstack/nova-metadata-0" Nov 28 10:18:47 crc kubenswrapper[4838]: I1128 10:18:47.406482 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a7e0e2c6-e63f-40c8-8db2-e6c738371e1a-logs\") pod \"nova-metadata-0\" (UID: \"a7e0e2c6-e63f-40c8-8db2-e6c738371e1a\") " pod="openstack/nova-metadata-0" Nov 28 10:18:47 crc kubenswrapper[4838]: I1128 10:18:47.407159 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a7e0e2c6-e63f-40c8-8db2-e6c738371e1a-logs\") pod \"nova-metadata-0\" (UID: \"a7e0e2c6-e63f-40c8-8db2-e6c738371e1a\") " pod="openstack/nova-metadata-0" Nov 28 10:18:47 crc kubenswrapper[4838]: I1128 10:18:47.412861 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a7e0e2c6-e63f-40c8-8db2-e6c738371e1a-config-data\") pod \"nova-metadata-0\" (UID: \"a7e0e2c6-e63f-40c8-8db2-e6c738371e1a\") " pod="openstack/nova-metadata-0" Nov 28 10:18:47 crc kubenswrapper[4838]: I1128 10:18:47.416312 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/a7e0e2c6-e63f-40c8-8db2-e6c738371e1a-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"a7e0e2c6-e63f-40c8-8db2-e6c738371e1a\") " pod="openstack/nova-metadata-0" Nov 28 10:18:47 crc kubenswrapper[4838]: I1128 10:18:47.426049 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-82mgz\" (UniqueName: \"kubernetes.io/projected/a7e0e2c6-e63f-40c8-8db2-e6c738371e1a-kube-api-access-82mgz\") pod \"nova-metadata-0\" (UID: \"a7e0e2c6-e63f-40c8-8db2-e6c738371e1a\") " pod="openstack/nova-metadata-0" Nov 28 10:18:47 crc kubenswrapper[4838]: I1128 10:18:47.426183 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a7e0e2c6-e63f-40c8-8db2-e6c738371e1a-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"a7e0e2c6-e63f-40c8-8db2-e6c738371e1a\") " pod="openstack/nova-metadata-0" Nov 28 10:18:47 crc kubenswrapper[4838]: I1128 10:18:47.480660 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 10:18:47 crc kubenswrapper[4838]: I1128 10:18:47.774295 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 28 10:18:47 crc kubenswrapper[4838]: I1128 10:18:47.776818 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Nov 28 10:18:47 crc kubenswrapper[4838]: I1128 10:18:47.786344 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-p7qxj" Nov 28 10:18:47 crc kubenswrapper[4838]: I1128 10:18:47.786618 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-p7qxj" event={"ID":"2a6d8b81-2f74-472c-b289-f08fe548ec2f","Type":"ContainerDied","Data":"203951f17f0c82bd0b65c20aaef45b5b38e30d79620b510afe73fffbafa8336e"} Nov 28 10:18:47 crc kubenswrapper[4838]: I1128 10:18:47.786644 4838 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="203951f17f0c82bd0b65c20aaef45b5b38e30d79620b510afe73fffbafa8336e" Nov 28 10:18:47 crc kubenswrapper[4838]: I1128 10:18:47.786400 4838 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="4b719d62-400b-4141-8687-9fb7bf60e64a" containerName="nova-scheduler-scheduler" containerID="cri-o://e922688f8823b2cc6bacf2a12c0bedea1a62e7155a3f56fa3d534e9b8165ba3e" gracePeriod=30 Nov 28 10:18:47 crc kubenswrapper[4838]: I1128 10:18:47.790670 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 28 10:18:47 crc kubenswrapper[4838]: I1128 10:18:47.850196 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2a6d8b81-2f74-472c-b289-f08fe548ec2f-config-data\") pod \"2a6d8b81-2f74-472c-b289-f08fe548ec2f\" (UID: \"2a6d8b81-2f74-472c-b289-f08fe548ec2f\") " Nov 28 10:18:47 crc kubenswrapper[4838]: I1128 10:18:47.856019 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2a6d8b81-2f74-472c-b289-f08fe548ec2f-config-data" (OuterVolumeSpecName: "config-data") pod "2a6d8b81-2f74-472c-b289-f08fe548ec2f" (UID: "2a6d8b81-2f74-472c-b289-f08fe548ec2f"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:18:47 crc kubenswrapper[4838]: I1128 10:18:47.916556 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 10:18:47 crc kubenswrapper[4838]: I1128 10:18:47.952314 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h9mng\" (UniqueName: \"kubernetes.io/projected/88e49157-0dd0-455d-a9bd-5a13c3d95087-kube-api-access-h9mng\") pod \"nova-cell1-conductor-0\" (UID: \"88e49157-0dd0-455d-a9bd-5a13c3d95087\") " pod="openstack/nova-cell1-conductor-0" Nov 28 10:18:47 crc kubenswrapper[4838]: I1128 10:18:47.952434 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/88e49157-0dd0-455d-a9bd-5a13c3d95087-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"88e49157-0dd0-455d-a9bd-5a13c3d95087\") " pod="openstack/nova-cell1-conductor-0" Nov 28 10:18:47 crc kubenswrapper[4838]: I1128 10:18:47.953328 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/88e49157-0dd0-455d-a9bd-5a13c3d95087-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"88e49157-0dd0-455d-a9bd-5a13c3d95087\") " pod="openstack/nova-cell1-conductor-0" Nov 28 10:18:47 crc kubenswrapper[4838]: I1128 10:18:47.953415 4838 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2a6d8b81-2f74-472c-b289-f08fe548ec2f-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 10:18:48 crc kubenswrapper[4838]: I1128 10:18:48.055244 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/88e49157-0dd0-455d-a9bd-5a13c3d95087-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"88e49157-0dd0-455d-a9bd-5a13c3d95087\") " pod="openstack/nova-cell1-conductor-0" Nov 28 10:18:48 crc kubenswrapper[4838]: I1128 10:18:48.055306 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h9mng\" (UniqueName: \"kubernetes.io/projected/88e49157-0dd0-455d-a9bd-5a13c3d95087-kube-api-access-h9mng\") pod \"nova-cell1-conductor-0\" (UID: \"88e49157-0dd0-455d-a9bd-5a13c3d95087\") " pod="openstack/nova-cell1-conductor-0" Nov 28 10:18:48 crc kubenswrapper[4838]: I1128 10:18:48.055354 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/88e49157-0dd0-455d-a9bd-5a13c3d95087-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"88e49157-0dd0-455d-a9bd-5a13c3d95087\") " pod="openstack/nova-cell1-conductor-0" Nov 28 10:18:48 crc kubenswrapper[4838]: I1128 10:18:48.058874 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/88e49157-0dd0-455d-a9bd-5a13c3d95087-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"88e49157-0dd0-455d-a9bd-5a13c3d95087\") " pod="openstack/nova-cell1-conductor-0" Nov 28 10:18:48 crc kubenswrapper[4838]: I1128 10:18:48.059766 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/88e49157-0dd0-455d-a9bd-5a13c3d95087-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"88e49157-0dd0-455d-a9bd-5a13c3d95087\") " pod="openstack/nova-cell1-conductor-0" Nov 28 10:18:48 crc kubenswrapper[4838]: I1128 10:18:48.071856 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h9mng\" (UniqueName: \"kubernetes.io/projected/88e49157-0dd0-455d-a9bd-5a13c3d95087-kube-api-access-h9mng\") pod \"nova-cell1-conductor-0\" (UID: \"88e49157-0dd0-455d-a9bd-5a13c3d95087\") " pod="openstack/nova-cell1-conductor-0" Nov 28 10:18:48 crc kubenswrapper[4838]: I1128 10:18:48.104855 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Nov 28 10:18:48 crc kubenswrapper[4838]: I1128 10:18:48.403932 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 28 10:18:48 crc kubenswrapper[4838]: I1128 10:18:48.404454 4838 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/kube-state-metrics-0" podUID="229a7c23-7909-4e77-bfa9-92d7d4f0f0eb" containerName="kube-state-metrics" containerID="cri-o://6e9254aec5f2e5d8018bdc7354b5d1fbaa00f511bf8312253bd9b4c3cc48b94d" gracePeriod=30 Nov 28 10:18:48 crc kubenswrapper[4838]: I1128 10:18:48.557920 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 28 10:18:48 crc kubenswrapper[4838]: W1128 10:18:48.561119 4838 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod88e49157_0dd0_455d_a9bd_5a13c3d95087.slice/crio-86373101aafe46df4293397c9097c434c43e00bce6370769d0c3261f30d480a7 WatchSource:0}: Error finding container 86373101aafe46df4293397c9097c434c43e00bce6370769d0c3261f30d480a7: Status 404 returned error can't find the container with id 86373101aafe46df4293397c9097c434c43e00bce6370769d0c3261f30d480a7 Nov 28 10:18:48 crc kubenswrapper[4838]: I1128 10:18:48.577620 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="47b8d231-2896-4d59-8d9d-c070770ced98" path="/var/lib/kubelet/pods/47b8d231-2896-4d59-8d9d-c070770ced98/volumes" Nov 28 10:18:48 crc kubenswrapper[4838]: I1128 10:18:48.578673 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4a49a5cf-ca4f-4e48-bec8-8d5a728d8025" path="/var/lib/kubelet/pods/4a49a5cf-ca4f-4e48-bec8-8d5a728d8025/volumes" Nov 28 10:18:48 crc kubenswrapper[4838]: I1128 10:18:48.798258 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 28 10:18:48 crc kubenswrapper[4838]: I1128 10:18:48.798549 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"a7e0e2c6-e63f-40c8-8db2-e6c738371e1a","Type":"ContainerStarted","Data":"ac9417a62df7e5d09d56e9f2081e1f4926e36360fbd4f73a8b599f3615ebe4ad"} Nov 28 10:18:48 crc kubenswrapper[4838]: I1128 10:18:48.798578 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"a7e0e2c6-e63f-40c8-8db2-e6c738371e1a","Type":"ContainerStarted","Data":"931e5c14cb8a43f4a31e0931a3d13a23e05d2b737e6ce291e48d165f15dea03f"} Nov 28 10:18:48 crc kubenswrapper[4838]: I1128 10:18:48.798589 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"a7e0e2c6-e63f-40c8-8db2-e6c738371e1a","Type":"ContainerStarted","Data":"b6fb30e743b20441b7bd4d2616ce86f82f8de764089d51a9f4121ec70c5c1324"} Nov 28 10:18:48 crc kubenswrapper[4838]: I1128 10:18:48.799842 4838 generic.go:334] "Generic (PLEG): container finished" podID="229a7c23-7909-4e77-bfa9-92d7d4f0f0eb" containerID="6e9254aec5f2e5d8018bdc7354b5d1fbaa00f511bf8312253bd9b4c3cc48b94d" exitCode=2 Nov 28 10:18:48 crc kubenswrapper[4838]: I1128 10:18:48.799871 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"229a7c23-7909-4e77-bfa9-92d7d4f0f0eb","Type":"ContainerDied","Data":"6e9254aec5f2e5d8018bdc7354b5d1fbaa00f511bf8312253bd9b4c3cc48b94d"} Nov 28 10:18:48 crc kubenswrapper[4838]: I1128 10:18:48.799902 4838 scope.go:117] "RemoveContainer" containerID="6e9254aec5f2e5d8018bdc7354b5d1fbaa00f511bf8312253bd9b4c3cc48b94d" Nov 28 10:18:48 crc kubenswrapper[4838]: I1128 10:18:48.799899 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 28 10:18:48 crc kubenswrapper[4838]: I1128 10:18:48.801699 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"88e49157-0dd0-455d-a9bd-5a13c3d95087","Type":"ContainerStarted","Data":"ef93377ed0037be6136a19a3fe2099a142bbb4eed7152c4ca949cca3e20e2de5"} Nov 28 10:18:48 crc kubenswrapper[4838]: I1128 10:18:48.801733 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"88e49157-0dd0-455d-a9bd-5a13c3d95087","Type":"ContainerStarted","Data":"86373101aafe46df4293397c9097c434c43e00bce6370769d0c3261f30d480a7"} Nov 28 10:18:48 crc kubenswrapper[4838]: I1128 10:18:48.802026 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-conductor-0" Nov 28 10:18:48 crc kubenswrapper[4838]: I1128 10:18:48.842534 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=1.8425156710000001 podStartE2EDuration="1.842515671s" podCreationTimestamp="2025-11-28 10:18:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 10:18:48.835162298 +0000 UTC m=+1300.534136468" watchObservedRunningTime="2025-11-28 10:18:48.842515671 +0000 UTC m=+1300.541489841" Nov 28 10:18:48 crc kubenswrapper[4838]: I1128 10:18:48.864621 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-0" podStartSLOduration=1.8646049740000001 podStartE2EDuration="1.864604974s" podCreationTimestamp="2025-11-28 10:18:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 10:18:48.858278768 +0000 UTC m=+1300.557252938" watchObservedRunningTime="2025-11-28 10:18:48.864604974 +0000 UTC m=+1300.563579134" Nov 28 10:18:48 crc kubenswrapper[4838]: I1128 10:18:48.970055 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qd8rc\" (UniqueName: \"kubernetes.io/projected/229a7c23-7909-4e77-bfa9-92d7d4f0f0eb-kube-api-access-qd8rc\") pod \"229a7c23-7909-4e77-bfa9-92d7d4f0f0eb\" (UID: \"229a7c23-7909-4e77-bfa9-92d7d4f0f0eb\") " Nov 28 10:18:48 crc kubenswrapper[4838]: I1128 10:18:48.976224 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/229a7c23-7909-4e77-bfa9-92d7d4f0f0eb-kube-api-access-qd8rc" (OuterVolumeSpecName: "kube-api-access-qd8rc") pod "229a7c23-7909-4e77-bfa9-92d7d4f0f0eb" (UID: "229a7c23-7909-4e77-bfa9-92d7d4f0f0eb"). InnerVolumeSpecName "kube-api-access-qd8rc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:18:49 crc kubenswrapper[4838]: I1128 10:18:49.072199 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qd8rc\" (UniqueName: \"kubernetes.io/projected/229a7c23-7909-4e77-bfa9-92d7d4f0f0eb-kube-api-access-qd8rc\") on node \"crc\" DevicePath \"\"" Nov 28 10:18:49 crc kubenswrapper[4838]: I1128 10:18:49.133027 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 28 10:18:49 crc kubenswrapper[4838]: I1128 10:18:49.140607 4838 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 28 10:18:49 crc kubenswrapper[4838]: I1128 10:18:49.152246 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Nov 28 10:18:49 crc kubenswrapper[4838]: E1128 10:18:49.152592 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="229a7c23-7909-4e77-bfa9-92d7d4f0f0eb" containerName="kube-state-metrics" Nov 28 10:18:49 crc kubenswrapper[4838]: I1128 10:18:49.152608 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="229a7c23-7909-4e77-bfa9-92d7d4f0f0eb" containerName="kube-state-metrics" Nov 28 10:18:49 crc kubenswrapper[4838]: I1128 10:18:49.152835 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="229a7c23-7909-4e77-bfa9-92d7d4f0f0eb" containerName="kube-state-metrics" Nov 28 10:18:49 crc kubenswrapper[4838]: I1128 10:18:49.153345 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 28 10:18:49 crc kubenswrapper[4838]: I1128 10:18:49.157340 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-kube-state-metrics-svc" Nov 28 10:18:49 crc kubenswrapper[4838]: I1128 10:18:49.157689 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"kube-state-metrics-tls-config" Nov 28 10:18:49 crc kubenswrapper[4838]: I1128 10:18:49.164109 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 28 10:18:49 crc kubenswrapper[4838]: I1128 10:18:49.288067 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/9d3e1aba-11d2-478c-9715-49ba175c7b03-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"9d3e1aba-11d2-478c-9715-49ba175c7b03\") " pod="openstack/kube-state-metrics-0" Nov 28 10:18:49 crc kubenswrapper[4838]: I1128 10:18:49.288126 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/9d3e1aba-11d2-478c-9715-49ba175c7b03-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"9d3e1aba-11d2-478c-9715-49ba175c7b03\") " pod="openstack/kube-state-metrics-0" Nov 28 10:18:49 crc kubenswrapper[4838]: I1128 10:18:49.288239 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x74jg\" (UniqueName: \"kubernetes.io/projected/9d3e1aba-11d2-478c-9715-49ba175c7b03-kube-api-access-x74jg\") pod \"kube-state-metrics-0\" (UID: \"9d3e1aba-11d2-478c-9715-49ba175c7b03\") " pod="openstack/kube-state-metrics-0" Nov 28 10:18:49 crc kubenswrapper[4838]: I1128 10:18:49.288287 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9d3e1aba-11d2-478c-9715-49ba175c7b03-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"9d3e1aba-11d2-478c-9715-49ba175c7b03\") " pod="openstack/kube-state-metrics-0" Nov 28 10:18:49 crc kubenswrapper[4838]: I1128 10:18:49.389875 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x74jg\" (UniqueName: \"kubernetes.io/projected/9d3e1aba-11d2-478c-9715-49ba175c7b03-kube-api-access-x74jg\") pod \"kube-state-metrics-0\" (UID: \"9d3e1aba-11d2-478c-9715-49ba175c7b03\") " pod="openstack/kube-state-metrics-0" Nov 28 10:18:49 crc kubenswrapper[4838]: I1128 10:18:49.389958 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9d3e1aba-11d2-478c-9715-49ba175c7b03-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"9d3e1aba-11d2-478c-9715-49ba175c7b03\") " pod="openstack/kube-state-metrics-0" Nov 28 10:18:49 crc kubenswrapper[4838]: I1128 10:18:49.390059 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/9d3e1aba-11d2-478c-9715-49ba175c7b03-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"9d3e1aba-11d2-478c-9715-49ba175c7b03\") " pod="openstack/kube-state-metrics-0" Nov 28 10:18:49 crc kubenswrapper[4838]: I1128 10:18:49.390094 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/9d3e1aba-11d2-478c-9715-49ba175c7b03-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"9d3e1aba-11d2-478c-9715-49ba175c7b03\") " pod="openstack/kube-state-metrics-0" Nov 28 10:18:49 crc kubenswrapper[4838]: I1128 10:18:49.393675 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/9d3e1aba-11d2-478c-9715-49ba175c7b03-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"9d3e1aba-11d2-478c-9715-49ba175c7b03\") " pod="openstack/kube-state-metrics-0" Nov 28 10:18:49 crc kubenswrapper[4838]: I1128 10:18:49.401263 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/9d3e1aba-11d2-478c-9715-49ba175c7b03-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"9d3e1aba-11d2-478c-9715-49ba175c7b03\") " pod="openstack/kube-state-metrics-0" Nov 28 10:18:49 crc kubenswrapper[4838]: I1128 10:18:49.401982 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9d3e1aba-11d2-478c-9715-49ba175c7b03-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"9d3e1aba-11d2-478c-9715-49ba175c7b03\") " pod="openstack/kube-state-metrics-0" Nov 28 10:18:49 crc kubenswrapper[4838]: I1128 10:18:49.406666 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x74jg\" (UniqueName: \"kubernetes.io/projected/9d3e1aba-11d2-478c-9715-49ba175c7b03-kube-api-access-x74jg\") pod \"kube-state-metrics-0\" (UID: \"9d3e1aba-11d2-478c-9715-49ba175c7b03\") " pod="openstack/kube-state-metrics-0" Nov 28 10:18:49 crc kubenswrapper[4838]: I1128 10:18:49.490892 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 28 10:18:49 crc kubenswrapper[4838]: I1128 10:18:49.503403 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 28 10:18:49 crc kubenswrapper[4838]: I1128 10:18:49.503660 4838 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="7a57f595-4e5d-4ba1-80b9-f088c464d19b" containerName="ceilometer-central-agent" containerID="cri-o://cdbde60a6c838ba6fb999d6f8f84bbd37709c136916740ee6799a3c32c048b6e" gracePeriod=30 Nov 28 10:18:49 crc kubenswrapper[4838]: I1128 10:18:49.504148 4838 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="7a57f595-4e5d-4ba1-80b9-f088c464d19b" containerName="proxy-httpd" containerID="cri-o://8b0b0a562d211dc19cc048c9613d399453345ea08642ae5b2156129635eb26d4" gracePeriod=30 Nov 28 10:18:49 crc kubenswrapper[4838]: I1128 10:18:49.504198 4838 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="7a57f595-4e5d-4ba1-80b9-f088c464d19b" containerName="sg-core" containerID="cri-o://344dfd80495c7c8e55b04f0caab130aed567f1b14b83eab62798042e5d86b78f" gracePeriod=30 Nov 28 10:18:49 crc kubenswrapper[4838]: I1128 10:18:49.504230 4838 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="7a57f595-4e5d-4ba1-80b9-f088c464d19b" containerName="ceilometer-notification-agent" containerID="cri-o://aed9a7a101be420df7f969904281c5a8cd1c385df3f5786132979ad3958d38e1" gracePeriod=30 Nov 28 10:18:49 crc kubenswrapper[4838]: I1128 10:18:49.814551 4838 generic.go:334] "Generic (PLEG): container finished" podID="7a57f595-4e5d-4ba1-80b9-f088c464d19b" containerID="8b0b0a562d211dc19cc048c9613d399453345ea08642ae5b2156129635eb26d4" exitCode=0 Nov 28 10:18:49 crc kubenswrapper[4838]: I1128 10:18:49.814592 4838 generic.go:334] "Generic (PLEG): container finished" podID="7a57f595-4e5d-4ba1-80b9-f088c464d19b" containerID="344dfd80495c7c8e55b04f0caab130aed567f1b14b83eab62798042e5d86b78f" exitCode=2 Nov 28 10:18:49 crc kubenswrapper[4838]: I1128 10:18:49.814636 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7a57f595-4e5d-4ba1-80b9-f088c464d19b","Type":"ContainerDied","Data":"8b0b0a562d211dc19cc048c9613d399453345ea08642ae5b2156129635eb26d4"} Nov 28 10:18:49 crc kubenswrapper[4838]: I1128 10:18:49.814667 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7a57f595-4e5d-4ba1-80b9-f088c464d19b","Type":"ContainerDied","Data":"344dfd80495c7c8e55b04f0caab130aed567f1b14b83eab62798042e5d86b78f"} Nov 28 10:18:49 crc kubenswrapper[4838]: E1128 10:18:49.869305 4838 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="e922688f8823b2cc6bacf2a12c0bedea1a62e7155a3f56fa3d534e9b8165ba3e" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 28 10:18:49 crc kubenswrapper[4838]: E1128 10:18:49.871375 4838 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="e922688f8823b2cc6bacf2a12c0bedea1a62e7155a3f56fa3d534e9b8165ba3e" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 28 10:18:49 crc kubenswrapper[4838]: E1128 10:18:49.873100 4838 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="e922688f8823b2cc6bacf2a12c0bedea1a62e7155a3f56fa3d534e9b8165ba3e" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 28 10:18:49 crc kubenswrapper[4838]: E1128 10:18:49.873139 4838 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-scheduler-0" podUID="4b719d62-400b-4141-8687-9fb7bf60e64a" containerName="nova-scheduler-scheduler" Nov 28 10:18:49 crc kubenswrapper[4838]: I1128 10:18:49.971947 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 28 10:18:49 crc kubenswrapper[4838]: W1128 10:18:49.984945 4838 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9d3e1aba_11d2_478c_9715_49ba175c7b03.slice/crio-4df689a155d6b881668ecd17d57b2de05d586644817d85486174f1119c0f4e9b WatchSource:0}: Error finding container 4df689a155d6b881668ecd17d57b2de05d586644817d85486174f1119c0f4e9b: Status 404 returned error can't find the container with id 4df689a155d6b881668ecd17d57b2de05d586644817d85486174f1119c0f4e9b Nov 28 10:18:50 crc kubenswrapper[4838]: I1128 10:18:50.599908 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="229a7c23-7909-4e77-bfa9-92d7d4f0f0eb" path="/var/lib/kubelet/pods/229a7c23-7909-4e77-bfa9-92d7d4f0f0eb/volumes" Nov 28 10:18:50 crc kubenswrapper[4838]: I1128 10:18:50.834454 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"9d3e1aba-11d2-478c-9715-49ba175c7b03","Type":"ContainerStarted","Data":"04c8449faa0020ed9d98572bce2e1ec55f7a1ef6692ca46a9139c30b0cc1a8ac"} Nov 28 10:18:50 crc kubenswrapper[4838]: I1128 10:18:50.834989 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"9d3e1aba-11d2-478c-9715-49ba175c7b03","Type":"ContainerStarted","Data":"4df689a155d6b881668ecd17d57b2de05d586644817d85486174f1119c0f4e9b"} Nov 28 10:18:50 crc kubenswrapper[4838]: I1128 10:18:50.835044 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Nov 28 10:18:50 crc kubenswrapper[4838]: I1128 10:18:50.838756 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7a57f595-4e5d-4ba1-80b9-f088c464d19b","Type":"ContainerDied","Data":"cdbde60a6c838ba6fb999d6f8f84bbd37709c136916740ee6799a3c32c048b6e"} Nov 28 10:18:50 crc kubenswrapper[4838]: I1128 10:18:50.838819 4838 generic.go:334] "Generic (PLEG): container finished" podID="7a57f595-4e5d-4ba1-80b9-f088c464d19b" containerID="cdbde60a6c838ba6fb999d6f8f84bbd37709c136916740ee6799a3c32c048b6e" exitCode=0 Nov 28 10:18:50 crc kubenswrapper[4838]: I1128 10:18:50.860421 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=1.359470974 podStartE2EDuration="1.860400064s" podCreationTimestamp="2025-11-28 10:18:49 +0000 UTC" firstStartedPulling="2025-11-28 10:18:49.989994609 +0000 UTC m=+1301.688968809" lastFinishedPulling="2025-11-28 10:18:50.490923719 +0000 UTC m=+1302.189897899" observedRunningTime="2025-11-28 10:18:50.851170178 +0000 UTC m=+1302.550144358" watchObservedRunningTime="2025-11-28 10:18:50.860400064 +0000 UTC m=+1302.559374244" Nov 28 10:18:51 crc kubenswrapper[4838]: I1128 10:18:51.809411 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 28 10:18:51 crc kubenswrapper[4838]: I1128 10:18:51.814368 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 10:18:51 crc kubenswrapper[4838]: I1128 10:18:51.853921 4838 generic.go:334] "Generic (PLEG): container finished" podID="4b719d62-400b-4141-8687-9fb7bf60e64a" containerID="e922688f8823b2cc6bacf2a12c0bedea1a62e7155a3f56fa3d534e9b8165ba3e" exitCode=0 Nov 28 10:18:51 crc kubenswrapper[4838]: I1128 10:18:51.853974 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"4b719d62-400b-4141-8687-9fb7bf60e64a","Type":"ContainerDied","Data":"e922688f8823b2cc6bacf2a12c0bedea1a62e7155a3f56fa3d534e9b8165ba3e"} Nov 28 10:18:51 crc kubenswrapper[4838]: I1128 10:18:51.853999 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"4b719d62-400b-4141-8687-9fb7bf60e64a","Type":"ContainerDied","Data":"06d5e6d93074920aeb753ed71dc9d568420cc0683107527c23d982170adb4136"} Nov 28 10:18:51 crc kubenswrapper[4838]: I1128 10:18:51.854017 4838 scope.go:117] "RemoveContainer" containerID="e922688f8823b2cc6bacf2a12c0bedea1a62e7155a3f56fa3d534e9b8165ba3e" Nov 28 10:18:51 crc kubenswrapper[4838]: I1128 10:18:51.854136 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 28 10:18:51 crc kubenswrapper[4838]: I1128 10:18:51.857333 4838 generic.go:334] "Generic (PLEG): container finished" podID="1e67c380-458a-497c-9230-49b8875934fd" containerID="822d5243ce8b193c4fb551e5e472d329d41f24450f9d6f270a6eda16010197d6" exitCode=0 Nov 28 10:18:51 crc kubenswrapper[4838]: I1128 10:18:51.858990 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 10:18:51 crc kubenswrapper[4838]: I1128 10:18:51.859138 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"1e67c380-458a-497c-9230-49b8875934fd","Type":"ContainerDied","Data":"822d5243ce8b193c4fb551e5e472d329d41f24450f9d6f270a6eda16010197d6"} Nov 28 10:18:51 crc kubenswrapper[4838]: I1128 10:18:51.859160 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"1e67c380-458a-497c-9230-49b8875934fd","Type":"ContainerDied","Data":"9594f5c7ae6ee5f78a059c7c50e5e19906f8d835395ae5363fc54456b1bd7498"} Nov 28 10:18:51 crc kubenswrapper[4838]: I1128 10:18:51.886595 4838 scope.go:117] "RemoveContainer" containerID="e922688f8823b2cc6bacf2a12c0bedea1a62e7155a3f56fa3d534e9b8165ba3e" Nov 28 10:18:51 crc kubenswrapper[4838]: E1128 10:18:51.887153 4838 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e922688f8823b2cc6bacf2a12c0bedea1a62e7155a3f56fa3d534e9b8165ba3e\": container with ID starting with e922688f8823b2cc6bacf2a12c0bedea1a62e7155a3f56fa3d534e9b8165ba3e not found: ID does not exist" containerID="e922688f8823b2cc6bacf2a12c0bedea1a62e7155a3f56fa3d534e9b8165ba3e" Nov 28 10:18:51 crc kubenswrapper[4838]: I1128 10:18:51.887217 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e922688f8823b2cc6bacf2a12c0bedea1a62e7155a3f56fa3d534e9b8165ba3e"} err="failed to get container status \"e922688f8823b2cc6bacf2a12c0bedea1a62e7155a3f56fa3d534e9b8165ba3e\": rpc error: code = NotFound desc = could not find container \"e922688f8823b2cc6bacf2a12c0bedea1a62e7155a3f56fa3d534e9b8165ba3e\": container with ID starting with e922688f8823b2cc6bacf2a12c0bedea1a62e7155a3f56fa3d534e9b8165ba3e not found: ID does not exist" Nov 28 10:18:51 crc kubenswrapper[4838]: I1128 10:18:51.887262 4838 scope.go:117] "RemoveContainer" containerID="822d5243ce8b193c4fb551e5e472d329d41f24450f9d6f270a6eda16010197d6" Nov 28 10:18:51 crc kubenswrapper[4838]: I1128 10:18:51.911653 4838 scope.go:117] "RemoveContainer" containerID="e020355e0736927bc17c9faa3ba0f126d89347c5f4ed51b81e584db36042fa25" Nov 28 10:18:51 crc kubenswrapper[4838]: I1128 10:18:51.936617 4838 scope.go:117] "RemoveContainer" containerID="822d5243ce8b193c4fb551e5e472d329d41f24450f9d6f270a6eda16010197d6" Nov 28 10:18:51 crc kubenswrapper[4838]: E1128 10:18:51.937075 4838 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"822d5243ce8b193c4fb551e5e472d329d41f24450f9d6f270a6eda16010197d6\": container with ID starting with 822d5243ce8b193c4fb551e5e472d329d41f24450f9d6f270a6eda16010197d6 not found: ID does not exist" containerID="822d5243ce8b193c4fb551e5e472d329d41f24450f9d6f270a6eda16010197d6" Nov 28 10:18:51 crc kubenswrapper[4838]: I1128 10:18:51.937128 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"822d5243ce8b193c4fb551e5e472d329d41f24450f9d6f270a6eda16010197d6"} err="failed to get container status \"822d5243ce8b193c4fb551e5e472d329d41f24450f9d6f270a6eda16010197d6\": rpc error: code = NotFound desc = could not find container \"822d5243ce8b193c4fb551e5e472d329d41f24450f9d6f270a6eda16010197d6\": container with ID starting with 822d5243ce8b193c4fb551e5e472d329d41f24450f9d6f270a6eda16010197d6 not found: ID does not exist" Nov 28 10:18:51 crc kubenswrapper[4838]: I1128 10:18:51.937165 4838 scope.go:117] "RemoveContainer" containerID="e020355e0736927bc17c9faa3ba0f126d89347c5f4ed51b81e584db36042fa25" Nov 28 10:18:51 crc kubenswrapper[4838]: E1128 10:18:51.937605 4838 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e020355e0736927bc17c9faa3ba0f126d89347c5f4ed51b81e584db36042fa25\": container with ID starting with e020355e0736927bc17c9faa3ba0f126d89347c5f4ed51b81e584db36042fa25 not found: ID does not exist" containerID="e020355e0736927bc17c9faa3ba0f126d89347c5f4ed51b81e584db36042fa25" Nov 28 10:18:51 crc kubenswrapper[4838]: I1128 10:18:51.937644 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e020355e0736927bc17c9faa3ba0f126d89347c5f4ed51b81e584db36042fa25"} err="failed to get container status \"e020355e0736927bc17c9faa3ba0f126d89347c5f4ed51b81e584db36042fa25\": rpc error: code = NotFound desc = could not find container \"e020355e0736927bc17c9faa3ba0f126d89347c5f4ed51b81e584db36042fa25\": container with ID starting with e020355e0736927bc17c9faa3ba0f126d89347c5f4ed51b81e584db36042fa25 not found: ID does not exist" Nov 28 10:18:51 crc kubenswrapper[4838]: I1128 10:18:51.942359 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9mmgz\" (UniqueName: \"kubernetes.io/projected/4b719d62-400b-4141-8687-9fb7bf60e64a-kube-api-access-9mmgz\") pod \"4b719d62-400b-4141-8687-9fb7bf60e64a\" (UID: \"4b719d62-400b-4141-8687-9fb7bf60e64a\") " Nov 28 10:18:51 crc kubenswrapper[4838]: I1128 10:18:51.942488 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4b719d62-400b-4141-8687-9fb7bf60e64a-config-data\") pod \"4b719d62-400b-4141-8687-9fb7bf60e64a\" (UID: \"4b719d62-400b-4141-8687-9fb7bf60e64a\") " Nov 28 10:18:51 crc kubenswrapper[4838]: I1128 10:18:51.942532 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1e67c380-458a-497c-9230-49b8875934fd-logs\") pod \"1e67c380-458a-497c-9230-49b8875934fd\" (UID: \"1e67c380-458a-497c-9230-49b8875934fd\") " Nov 28 10:18:51 crc kubenswrapper[4838]: I1128 10:18:51.942618 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1e67c380-458a-497c-9230-49b8875934fd-config-data\") pod \"1e67c380-458a-497c-9230-49b8875934fd\" (UID: \"1e67c380-458a-497c-9230-49b8875934fd\") " Nov 28 10:18:51 crc kubenswrapper[4838]: I1128 10:18:51.942675 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-th7wp\" (UniqueName: \"kubernetes.io/projected/1e67c380-458a-497c-9230-49b8875934fd-kube-api-access-th7wp\") pod \"1e67c380-458a-497c-9230-49b8875934fd\" (UID: \"1e67c380-458a-497c-9230-49b8875934fd\") " Nov 28 10:18:51 crc kubenswrapper[4838]: I1128 10:18:51.942771 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1e67c380-458a-497c-9230-49b8875934fd-combined-ca-bundle\") pod \"1e67c380-458a-497c-9230-49b8875934fd\" (UID: \"1e67c380-458a-497c-9230-49b8875934fd\") " Nov 28 10:18:51 crc kubenswrapper[4838]: I1128 10:18:51.942859 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4b719d62-400b-4141-8687-9fb7bf60e64a-combined-ca-bundle\") pod \"4b719d62-400b-4141-8687-9fb7bf60e64a\" (UID: \"4b719d62-400b-4141-8687-9fb7bf60e64a\") " Nov 28 10:18:51 crc kubenswrapper[4838]: I1128 10:18:51.945484 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1e67c380-458a-497c-9230-49b8875934fd-logs" (OuterVolumeSpecName: "logs") pod "1e67c380-458a-497c-9230-49b8875934fd" (UID: "1e67c380-458a-497c-9230-49b8875934fd"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 10:18:51 crc kubenswrapper[4838]: I1128 10:18:51.950071 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1e67c380-458a-497c-9230-49b8875934fd-kube-api-access-th7wp" (OuterVolumeSpecName: "kube-api-access-th7wp") pod "1e67c380-458a-497c-9230-49b8875934fd" (UID: "1e67c380-458a-497c-9230-49b8875934fd"). InnerVolumeSpecName "kube-api-access-th7wp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:18:51 crc kubenswrapper[4838]: I1128 10:18:51.962863 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4b719d62-400b-4141-8687-9fb7bf60e64a-kube-api-access-9mmgz" (OuterVolumeSpecName: "kube-api-access-9mmgz") pod "4b719d62-400b-4141-8687-9fb7bf60e64a" (UID: "4b719d62-400b-4141-8687-9fb7bf60e64a"). InnerVolumeSpecName "kube-api-access-9mmgz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:18:51 crc kubenswrapper[4838]: I1128 10:18:51.975879 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4b719d62-400b-4141-8687-9fb7bf60e64a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "4b719d62-400b-4141-8687-9fb7bf60e64a" (UID: "4b719d62-400b-4141-8687-9fb7bf60e64a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:18:51 crc kubenswrapper[4838]: I1128 10:18:51.976170 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1e67c380-458a-497c-9230-49b8875934fd-config-data" (OuterVolumeSpecName: "config-data") pod "1e67c380-458a-497c-9230-49b8875934fd" (UID: "1e67c380-458a-497c-9230-49b8875934fd"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:18:51 crc kubenswrapper[4838]: I1128 10:18:51.976892 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1e67c380-458a-497c-9230-49b8875934fd-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "1e67c380-458a-497c-9230-49b8875934fd" (UID: "1e67c380-458a-497c-9230-49b8875934fd"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:18:51 crc kubenswrapper[4838]: I1128 10:18:51.977761 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4b719d62-400b-4141-8687-9fb7bf60e64a-config-data" (OuterVolumeSpecName: "config-data") pod "4b719d62-400b-4141-8687-9fb7bf60e64a" (UID: "4b719d62-400b-4141-8687-9fb7bf60e64a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:18:52 crc kubenswrapper[4838]: I1128 10:18:52.045111 4838 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1e67c380-458a-497c-9230-49b8875934fd-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 10:18:52 crc kubenswrapper[4838]: I1128 10:18:52.045142 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-th7wp\" (UniqueName: \"kubernetes.io/projected/1e67c380-458a-497c-9230-49b8875934fd-kube-api-access-th7wp\") on node \"crc\" DevicePath \"\"" Nov 28 10:18:52 crc kubenswrapper[4838]: I1128 10:18:52.045155 4838 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1e67c380-458a-497c-9230-49b8875934fd-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 10:18:52 crc kubenswrapper[4838]: I1128 10:18:52.045166 4838 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4b719d62-400b-4141-8687-9fb7bf60e64a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 10:18:52 crc kubenswrapper[4838]: I1128 10:18:52.045180 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9mmgz\" (UniqueName: \"kubernetes.io/projected/4b719d62-400b-4141-8687-9fb7bf60e64a-kube-api-access-9mmgz\") on node \"crc\" DevicePath \"\"" Nov 28 10:18:52 crc kubenswrapper[4838]: I1128 10:18:52.045190 4838 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4b719d62-400b-4141-8687-9fb7bf60e64a-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 10:18:52 crc kubenswrapper[4838]: I1128 10:18:52.045202 4838 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1e67c380-458a-497c-9230-49b8875934fd-logs\") on node \"crc\" DevicePath \"\"" Nov 28 10:18:52 crc kubenswrapper[4838]: I1128 10:18:52.207772 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 10:18:52 crc kubenswrapper[4838]: I1128 10:18:52.216445 4838 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 10:18:52 crc kubenswrapper[4838]: I1128 10:18:52.225347 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 28 10:18:52 crc kubenswrapper[4838]: I1128 10:18:52.233822 4838 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Nov 28 10:18:52 crc kubenswrapper[4838]: I1128 10:18:52.244964 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 10:18:52 crc kubenswrapper[4838]: E1128 10:18:52.245513 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4b719d62-400b-4141-8687-9fb7bf60e64a" containerName="nova-scheduler-scheduler" Nov 28 10:18:52 crc kubenswrapper[4838]: I1128 10:18:52.245574 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="4b719d62-400b-4141-8687-9fb7bf60e64a" containerName="nova-scheduler-scheduler" Nov 28 10:18:52 crc kubenswrapper[4838]: E1128 10:18:52.245664 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1e67c380-458a-497c-9230-49b8875934fd" containerName="nova-api-log" Nov 28 10:18:52 crc kubenswrapper[4838]: I1128 10:18:52.245747 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="1e67c380-458a-497c-9230-49b8875934fd" containerName="nova-api-log" Nov 28 10:18:52 crc kubenswrapper[4838]: E1128 10:18:52.245817 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1e67c380-458a-497c-9230-49b8875934fd" containerName="nova-api-api" Nov 28 10:18:52 crc kubenswrapper[4838]: I1128 10:18:52.245867 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="1e67c380-458a-497c-9230-49b8875934fd" containerName="nova-api-api" Nov 28 10:18:52 crc kubenswrapper[4838]: I1128 10:18:52.246071 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="4b719d62-400b-4141-8687-9fb7bf60e64a" containerName="nova-scheduler-scheduler" Nov 28 10:18:52 crc kubenswrapper[4838]: I1128 10:18:52.246134 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="1e67c380-458a-497c-9230-49b8875934fd" containerName="nova-api-api" Nov 28 10:18:52 crc kubenswrapper[4838]: I1128 10:18:52.246191 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="1e67c380-458a-497c-9230-49b8875934fd" containerName="nova-api-log" Nov 28 10:18:52 crc kubenswrapper[4838]: I1128 10:18:52.246795 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 28 10:18:52 crc kubenswrapper[4838]: I1128 10:18:52.250091 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Nov 28 10:18:52 crc kubenswrapper[4838]: I1128 10:18:52.255069 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 28 10:18:52 crc kubenswrapper[4838]: I1128 10:18:52.256660 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 10:18:52 crc kubenswrapper[4838]: I1128 10:18:52.258551 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 28 10:18:52 crc kubenswrapper[4838]: I1128 10:18:52.289680 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 10:18:52 crc kubenswrapper[4838]: I1128 10:18:52.344930 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 28 10:18:52 crc kubenswrapper[4838]: I1128 10:18:52.350191 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6d174409-ecf7-48fb-81bd-869ac3596342-config-data\") pod \"nova-scheduler-0\" (UID: \"6d174409-ecf7-48fb-81bd-869ac3596342\") " pod="openstack/nova-scheduler-0" Nov 28 10:18:52 crc kubenswrapper[4838]: I1128 10:18:52.350357 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fbljl\" (UniqueName: \"kubernetes.io/projected/6d174409-ecf7-48fb-81bd-869ac3596342-kube-api-access-fbljl\") pod \"nova-scheduler-0\" (UID: \"6d174409-ecf7-48fb-81bd-869ac3596342\") " pod="openstack/nova-scheduler-0" Nov 28 10:18:52 crc kubenswrapper[4838]: I1128 10:18:52.350393 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6d174409-ecf7-48fb-81bd-869ac3596342-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"6d174409-ecf7-48fb-81bd-869ac3596342\") " pod="openstack/nova-scheduler-0" Nov 28 10:18:52 crc kubenswrapper[4838]: I1128 10:18:52.452030 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gpv9p\" (UniqueName: \"kubernetes.io/projected/08804858-2bd5-4d72-9bf3-a9989128bed2-kube-api-access-gpv9p\") pod \"nova-api-0\" (UID: \"08804858-2bd5-4d72-9bf3-a9989128bed2\") " pod="openstack/nova-api-0" Nov 28 10:18:52 crc kubenswrapper[4838]: I1128 10:18:52.452459 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/08804858-2bd5-4d72-9bf3-a9989128bed2-logs\") pod \"nova-api-0\" (UID: \"08804858-2bd5-4d72-9bf3-a9989128bed2\") " pod="openstack/nova-api-0" Nov 28 10:18:52 crc kubenswrapper[4838]: I1128 10:18:52.452675 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6d174409-ecf7-48fb-81bd-869ac3596342-config-data\") pod \"nova-scheduler-0\" (UID: \"6d174409-ecf7-48fb-81bd-869ac3596342\") " pod="openstack/nova-scheduler-0" Nov 28 10:18:52 crc kubenswrapper[4838]: I1128 10:18:52.453027 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fbljl\" (UniqueName: \"kubernetes.io/projected/6d174409-ecf7-48fb-81bd-869ac3596342-kube-api-access-fbljl\") pod \"nova-scheduler-0\" (UID: \"6d174409-ecf7-48fb-81bd-869ac3596342\") " pod="openstack/nova-scheduler-0" Nov 28 10:18:52 crc kubenswrapper[4838]: I1128 10:18:52.453202 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6d174409-ecf7-48fb-81bd-869ac3596342-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"6d174409-ecf7-48fb-81bd-869ac3596342\") " pod="openstack/nova-scheduler-0" Nov 28 10:18:52 crc kubenswrapper[4838]: I1128 10:18:52.453356 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/08804858-2bd5-4d72-9bf3-a9989128bed2-config-data\") pod \"nova-api-0\" (UID: \"08804858-2bd5-4d72-9bf3-a9989128bed2\") " pod="openstack/nova-api-0" Nov 28 10:18:52 crc kubenswrapper[4838]: I1128 10:18:52.453894 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/08804858-2bd5-4d72-9bf3-a9989128bed2-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"08804858-2bd5-4d72-9bf3-a9989128bed2\") " pod="openstack/nova-api-0" Nov 28 10:18:52 crc kubenswrapper[4838]: I1128 10:18:52.457880 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6d174409-ecf7-48fb-81bd-869ac3596342-config-data\") pod \"nova-scheduler-0\" (UID: \"6d174409-ecf7-48fb-81bd-869ac3596342\") " pod="openstack/nova-scheduler-0" Nov 28 10:18:52 crc kubenswrapper[4838]: I1128 10:18:52.459329 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6d174409-ecf7-48fb-81bd-869ac3596342-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"6d174409-ecf7-48fb-81bd-869ac3596342\") " pod="openstack/nova-scheduler-0" Nov 28 10:18:52 crc kubenswrapper[4838]: I1128 10:18:52.481187 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 28 10:18:52 crc kubenswrapper[4838]: I1128 10:18:52.481247 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 28 10:18:52 crc kubenswrapper[4838]: I1128 10:18:52.489153 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fbljl\" (UniqueName: \"kubernetes.io/projected/6d174409-ecf7-48fb-81bd-869ac3596342-kube-api-access-fbljl\") pod \"nova-scheduler-0\" (UID: \"6d174409-ecf7-48fb-81bd-869ac3596342\") " pod="openstack/nova-scheduler-0" Nov 28 10:18:52 crc kubenswrapper[4838]: I1128 10:18:52.556084 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gpv9p\" (UniqueName: \"kubernetes.io/projected/08804858-2bd5-4d72-9bf3-a9989128bed2-kube-api-access-gpv9p\") pod \"nova-api-0\" (UID: \"08804858-2bd5-4d72-9bf3-a9989128bed2\") " pod="openstack/nova-api-0" Nov 28 10:18:52 crc kubenswrapper[4838]: I1128 10:18:52.556175 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/08804858-2bd5-4d72-9bf3-a9989128bed2-logs\") pod \"nova-api-0\" (UID: \"08804858-2bd5-4d72-9bf3-a9989128bed2\") " pod="openstack/nova-api-0" Nov 28 10:18:52 crc kubenswrapper[4838]: I1128 10:18:52.556351 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/08804858-2bd5-4d72-9bf3-a9989128bed2-config-data\") pod \"nova-api-0\" (UID: \"08804858-2bd5-4d72-9bf3-a9989128bed2\") " pod="openstack/nova-api-0" Nov 28 10:18:52 crc kubenswrapper[4838]: I1128 10:18:52.556991 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/08804858-2bd5-4d72-9bf3-a9989128bed2-logs\") pod \"nova-api-0\" (UID: \"08804858-2bd5-4d72-9bf3-a9989128bed2\") " pod="openstack/nova-api-0" Nov 28 10:18:52 crc kubenswrapper[4838]: I1128 10:18:52.557494 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/08804858-2bd5-4d72-9bf3-a9989128bed2-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"08804858-2bd5-4d72-9bf3-a9989128bed2\") " pod="openstack/nova-api-0" Nov 28 10:18:52 crc kubenswrapper[4838]: I1128 10:18:52.561063 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/08804858-2bd5-4d72-9bf3-a9989128bed2-config-data\") pod \"nova-api-0\" (UID: \"08804858-2bd5-4d72-9bf3-a9989128bed2\") " pod="openstack/nova-api-0" Nov 28 10:18:52 crc kubenswrapper[4838]: I1128 10:18:52.564316 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/08804858-2bd5-4d72-9bf3-a9989128bed2-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"08804858-2bd5-4d72-9bf3-a9989128bed2\") " pod="openstack/nova-api-0" Nov 28 10:18:52 crc kubenswrapper[4838]: I1128 10:18:52.576644 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1e67c380-458a-497c-9230-49b8875934fd" path="/var/lib/kubelet/pods/1e67c380-458a-497c-9230-49b8875934fd/volumes" Nov 28 10:18:52 crc kubenswrapper[4838]: I1128 10:18:52.578019 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4b719d62-400b-4141-8687-9fb7bf60e64a" path="/var/lib/kubelet/pods/4b719d62-400b-4141-8687-9fb7bf60e64a/volumes" Nov 28 10:18:52 crc kubenswrapper[4838]: I1128 10:18:52.583019 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gpv9p\" (UniqueName: \"kubernetes.io/projected/08804858-2bd5-4d72-9bf3-a9989128bed2-kube-api-access-gpv9p\") pod \"nova-api-0\" (UID: \"08804858-2bd5-4d72-9bf3-a9989128bed2\") " pod="openstack/nova-api-0" Nov 28 10:18:52 crc kubenswrapper[4838]: I1128 10:18:52.595327 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 28 10:18:52 crc kubenswrapper[4838]: I1128 10:18:52.608041 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 10:18:52 crc kubenswrapper[4838]: I1128 10:18:52.890475 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 10:18:52 crc kubenswrapper[4838]: W1128 10:18:52.895846 4838 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6d174409_ecf7_48fb_81bd_869ac3596342.slice/crio-bcaeaf22a59309084b2482048582dc65a06453aacde5cc792269733cb18b7c27 WatchSource:0}: Error finding container bcaeaf22a59309084b2482048582dc65a06453aacde5cc792269733cb18b7c27: Status 404 returned error can't find the container with id bcaeaf22a59309084b2482048582dc65a06453aacde5cc792269733cb18b7c27 Nov 28 10:18:53 crc kubenswrapper[4838]: I1128 10:18:53.143667 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 28 10:18:53 crc kubenswrapper[4838]: W1128 10:18:53.149280 4838 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod08804858_2bd5_4d72_9bf3_a9989128bed2.slice/crio-271bbe562545d4b3313dbbe00a89f00e3128c383ede0f3d232b02ea798e26b71 WatchSource:0}: Error finding container 271bbe562545d4b3313dbbe00a89f00e3128c383ede0f3d232b02ea798e26b71: Status 404 returned error can't find the container with id 271bbe562545d4b3313dbbe00a89f00e3128c383ede0f3d232b02ea798e26b71 Nov 28 10:18:53 crc kubenswrapper[4838]: I1128 10:18:53.157601 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-conductor-0" Nov 28 10:18:53 crc kubenswrapper[4838]: I1128 10:18:53.765851 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 10:18:53 crc kubenswrapper[4838]: I1128 10:18:53.880001 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"08804858-2bd5-4d72-9bf3-a9989128bed2","Type":"ContainerStarted","Data":"c67ec7edbd8b25e8b0719e1488d6594bc0c6521159c17cf07b269110c74a7d07"} Nov 28 10:18:53 crc kubenswrapper[4838]: I1128 10:18:53.881316 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"08804858-2bd5-4d72-9bf3-a9989128bed2","Type":"ContainerStarted","Data":"7fc2eda0c1d5eb5cc2e81e20acea01be60ea76f80d72047dc977eb460c1c3500"} Nov 28 10:18:53 crc kubenswrapper[4838]: I1128 10:18:53.881337 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"08804858-2bd5-4d72-9bf3-a9989128bed2","Type":"ContainerStarted","Data":"271bbe562545d4b3313dbbe00a89f00e3128c383ede0f3d232b02ea798e26b71"} Nov 28 10:18:53 crc kubenswrapper[4838]: I1128 10:18:53.885081 4838 generic.go:334] "Generic (PLEG): container finished" podID="7a57f595-4e5d-4ba1-80b9-f088c464d19b" containerID="aed9a7a101be420df7f969904281c5a8cd1c385df3f5786132979ad3958d38e1" exitCode=0 Nov 28 10:18:53 crc kubenswrapper[4838]: I1128 10:18:53.885197 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 10:18:53 crc kubenswrapper[4838]: I1128 10:18:53.885287 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7a57f595-4e5d-4ba1-80b9-f088c464d19b","Type":"ContainerDied","Data":"aed9a7a101be420df7f969904281c5a8cd1c385df3f5786132979ad3958d38e1"} Nov 28 10:18:53 crc kubenswrapper[4838]: I1128 10:18:53.885358 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7a57f595-4e5d-4ba1-80b9-f088c464d19b","Type":"ContainerDied","Data":"9ca1f8251e5e15f3c32053ddcd9fd0ef42e248c210e6a0467e9dd0a4d3c994df"} Nov 28 10:18:53 crc kubenswrapper[4838]: I1128 10:18:53.885379 4838 scope.go:117] "RemoveContainer" containerID="8b0b0a562d211dc19cc048c9613d399453345ea08642ae5b2156129635eb26d4" Nov 28 10:18:53 crc kubenswrapper[4838]: I1128 10:18:53.886214 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7a57f595-4e5d-4ba1-80b9-f088c464d19b-run-httpd\") pod \"7a57f595-4e5d-4ba1-80b9-f088c464d19b\" (UID: \"7a57f595-4e5d-4ba1-80b9-f088c464d19b\") " Nov 28 10:18:53 crc kubenswrapper[4838]: I1128 10:18:53.886250 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/7a57f595-4e5d-4ba1-80b9-f088c464d19b-sg-core-conf-yaml\") pod \"7a57f595-4e5d-4ba1-80b9-f088c464d19b\" (UID: \"7a57f595-4e5d-4ba1-80b9-f088c464d19b\") " Nov 28 10:18:53 crc kubenswrapper[4838]: I1128 10:18:53.886321 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7a57f595-4e5d-4ba1-80b9-f088c464d19b-config-data\") pod \"7a57f595-4e5d-4ba1-80b9-f088c464d19b\" (UID: \"7a57f595-4e5d-4ba1-80b9-f088c464d19b\") " Nov 28 10:18:53 crc kubenswrapper[4838]: I1128 10:18:53.886391 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7a57f595-4e5d-4ba1-80b9-f088c464d19b-scripts\") pod \"7a57f595-4e5d-4ba1-80b9-f088c464d19b\" (UID: \"7a57f595-4e5d-4ba1-80b9-f088c464d19b\") " Nov 28 10:18:53 crc kubenswrapper[4838]: I1128 10:18:53.886458 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7a57f595-4e5d-4ba1-80b9-f088c464d19b-log-httpd\") pod \"7a57f595-4e5d-4ba1-80b9-f088c464d19b\" (UID: \"7a57f595-4e5d-4ba1-80b9-f088c464d19b\") " Nov 28 10:18:53 crc kubenswrapper[4838]: I1128 10:18:53.886482 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7a57f595-4e5d-4ba1-80b9-f088c464d19b-combined-ca-bundle\") pod \"7a57f595-4e5d-4ba1-80b9-f088c464d19b\" (UID: \"7a57f595-4e5d-4ba1-80b9-f088c464d19b\") " Nov 28 10:18:53 crc kubenswrapper[4838]: I1128 10:18:53.886518 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rmwm5\" (UniqueName: \"kubernetes.io/projected/7a57f595-4e5d-4ba1-80b9-f088c464d19b-kube-api-access-rmwm5\") pod \"7a57f595-4e5d-4ba1-80b9-f088c464d19b\" (UID: \"7a57f595-4e5d-4ba1-80b9-f088c464d19b\") " Nov 28 10:18:53 crc kubenswrapper[4838]: I1128 10:18:53.887786 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"6d174409-ecf7-48fb-81bd-869ac3596342","Type":"ContainerStarted","Data":"13687a1678d10d30ff799a7895572da78b797230489a8ab4682e7c5b7db10266"} Nov 28 10:18:53 crc kubenswrapper[4838]: I1128 10:18:53.887818 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"6d174409-ecf7-48fb-81bd-869ac3596342","Type":"ContainerStarted","Data":"bcaeaf22a59309084b2482048582dc65a06453aacde5cc792269733cb18b7c27"} Nov 28 10:18:53 crc kubenswrapper[4838]: I1128 10:18:53.888466 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7a57f595-4e5d-4ba1-80b9-f088c464d19b-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "7a57f595-4e5d-4ba1-80b9-f088c464d19b" (UID: "7a57f595-4e5d-4ba1-80b9-f088c464d19b"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 10:18:53 crc kubenswrapper[4838]: I1128 10:18:53.888848 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7a57f595-4e5d-4ba1-80b9-f088c464d19b-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "7a57f595-4e5d-4ba1-80b9-f088c464d19b" (UID: "7a57f595-4e5d-4ba1-80b9-f088c464d19b"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 10:18:53 crc kubenswrapper[4838]: I1128 10:18:53.892870 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7a57f595-4e5d-4ba1-80b9-f088c464d19b-kube-api-access-rmwm5" (OuterVolumeSpecName: "kube-api-access-rmwm5") pod "7a57f595-4e5d-4ba1-80b9-f088c464d19b" (UID: "7a57f595-4e5d-4ba1-80b9-f088c464d19b"). InnerVolumeSpecName "kube-api-access-rmwm5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:18:53 crc kubenswrapper[4838]: I1128 10:18:53.895826 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7a57f595-4e5d-4ba1-80b9-f088c464d19b-scripts" (OuterVolumeSpecName: "scripts") pod "7a57f595-4e5d-4ba1-80b9-f088c464d19b" (UID: "7a57f595-4e5d-4ba1-80b9-f088c464d19b"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:18:53 crc kubenswrapper[4838]: I1128 10:18:53.907074 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=1.907056142 podStartE2EDuration="1.907056142s" podCreationTimestamp="2025-11-28 10:18:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 10:18:53.897356463 +0000 UTC m=+1305.596330643" watchObservedRunningTime="2025-11-28 10:18:53.907056142 +0000 UTC m=+1305.606030312" Nov 28 10:18:53 crc kubenswrapper[4838]: I1128 10:18:53.933084 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7a57f595-4e5d-4ba1-80b9-f088c464d19b-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "7a57f595-4e5d-4ba1-80b9-f088c464d19b" (UID: "7a57f595-4e5d-4ba1-80b9-f088c464d19b"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:18:53 crc kubenswrapper[4838]: I1128 10:18:53.939683 4838 patch_prober.go:28] interesting pod/machine-config-daemon-5dxdd container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 10:18:53 crc kubenswrapper[4838]: I1128 10:18:53.939762 4838 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 10:18:53 crc kubenswrapper[4838]: I1128 10:18:53.939796 4838 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" Nov 28 10:18:53 crc kubenswrapper[4838]: I1128 10:18:53.940515 4838 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"1815c6b644c08c4a75da2a50900db223999a631363ba83a16dba3176b263bb61"} pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 10:18:53 crc kubenswrapper[4838]: I1128 10:18:53.940562 4838 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" containerName="machine-config-daemon" containerID="cri-o://1815c6b644c08c4a75da2a50900db223999a631363ba83a16dba3176b263bb61" gracePeriod=600 Nov 28 10:18:53 crc kubenswrapper[4838]: I1128 10:18:53.942542 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=1.942524246 podStartE2EDuration="1.942524246s" podCreationTimestamp="2025-11-28 10:18:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 10:18:53.929890316 +0000 UTC m=+1305.628864486" watchObservedRunningTime="2025-11-28 10:18:53.942524246 +0000 UTC m=+1305.641498416" Nov 28 10:18:53 crc kubenswrapper[4838]: I1128 10:18:53.988056 4838 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7a57f595-4e5d-4ba1-80b9-f088c464d19b-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 28 10:18:53 crc kubenswrapper[4838]: I1128 10:18:53.988167 4838 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/7a57f595-4e5d-4ba1-80b9-f088c464d19b-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 28 10:18:53 crc kubenswrapper[4838]: I1128 10:18:53.988223 4838 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7a57f595-4e5d-4ba1-80b9-f088c464d19b-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 10:18:53 crc kubenswrapper[4838]: I1128 10:18:53.988353 4838 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7a57f595-4e5d-4ba1-80b9-f088c464d19b-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 28 10:18:53 crc kubenswrapper[4838]: I1128 10:18:53.988410 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rmwm5\" (UniqueName: \"kubernetes.io/projected/7a57f595-4e5d-4ba1-80b9-f088c464d19b-kube-api-access-rmwm5\") on node \"crc\" DevicePath \"\"" Nov 28 10:18:53 crc kubenswrapper[4838]: I1128 10:18:53.994211 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7a57f595-4e5d-4ba1-80b9-f088c464d19b-config-data" (OuterVolumeSpecName: "config-data") pod "7a57f595-4e5d-4ba1-80b9-f088c464d19b" (UID: "7a57f595-4e5d-4ba1-80b9-f088c464d19b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:18:53 crc kubenswrapper[4838]: I1128 10:18:53.999072 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7a57f595-4e5d-4ba1-80b9-f088c464d19b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "7a57f595-4e5d-4ba1-80b9-f088c464d19b" (UID: "7a57f595-4e5d-4ba1-80b9-f088c464d19b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:18:54 crc kubenswrapper[4838]: I1128 10:18:54.003906 4838 scope.go:117] "RemoveContainer" containerID="344dfd80495c7c8e55b04f0caab130aed567f1b14b83eab62798042e5d86b78f" Nov 28 10:18:54 crc kubenswrapper[4838]: I1128 10:18:54.024240 4838 scope.go:117] "RemoveContainer" containerID="aed9a7a101be420df7f969904281c5a8cd1c385df3f5786132979ad3958d38e1" Nov 28 10:18:54 crc kubenswrapper[4838]: I1128 10:18:54.049312 4838 scope.go:117] "RemoveContainer" containerID="cdbde60a6c838ba6fb999d6f8f84bbd37709c136916740ee6799a3c32c048b6e" Nov 28 10:18:54 crc kubenswrapper[4838]: I1128 10:18:54.069977 4838 scope.go:117] "RemoveContainer" containerID="8b0b0a562d211dc19cc048c9613d399453345ea08642ae5b2156129635eb26d4" Nov 28 10:18:54 crc kubenswrapper[4838]: E1128 10:18:54.070439 4838 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8b0b0a562d211dc19cc048c9613d399453345ea08642ae5b2156129635eb26d4\": container with ID starting with 8b0b0a562d211dc19cc048c9613d399453345ea08642ae5b2156129635eb26d4 not found: ID does not exist" containerID="8b0b0a562d211dc19cc048c9613d399453345ea08642ae5b2156129635eb26d4" Nov 28 10:18:54 crc kubenswrapper[4838]: I1128 10:18:54.070478 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8b0b0a562d211dc19cc048c9613d399453345ea08642ae5b2156129635eb26d4"} err="failed to get container status \"8b0b0a562d211dc19cc048c9613d399453345ea08642ae5b2156129635eb26d4\": rpc error: code = NotFound desc = could not find container \"8b0b0a562d211dc19cc048c9613d399453345ea08642ae5b2156129635eb26d4\": container with ID starting with 8b0b0a562d211dc19cc048c9613d399453345ea08642ae5b2156129635eb26d4 not found: ID does not exist" Nov 28 10:18:54 crc kubenswrapper[4838]: I1128 10:18:54.070506 4838 scope.go:117] "RemoveContainer" containerID="344dfd80495c7c8e55b04f0caab130aed567f1b14b83eab62798042e5d86b78f" Nov 28 10:18:54 crc kubenswrapper[4838]: E1128 10:18:54.070880 4838 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"344dfd80495c7c8e55b04f0caab130aed567f1b14b83eab62798042e5d86b78f\": container with ID starting with 344dfd80495c7c8e55b04f0caab130aed567f1b14b83eab62798042e5d86b78f not found: ID does not exist" containerID="344dfd80495c7c8e55b04f0caab130aed567f1b14b83eab62798042e5d86b78f" Nov 28 10:18:54 crc kubenswrapper[4838]: I1128 10:18:54.070930 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"344dfd80495c7c8e55b04f0caab130aed567f1b14b83eab62798042e5d86b78f"} err="failed to get container status \"344dfd80495c7c8e55b04f0caab130aed567f1b14b83eab62798042e5d86b78f\": rpc error: code = NotFound desc = could not find container \"344dfd80495c7c8e55b04f0caab130aed567f1b14b83eab62798042e5d86b78f\": container with ID starting with 344dfd80495c7c8e55b04f0caab130aed567f1b14b83eab62798042e5d86b78f not found: ID does not exist" Nov 28 10:18:54 crc kubenswrapper[4838]: I1128 10:18:54.070974 4838 scope.go:117] "RemoveContainer" containerID="aed9a7a101be420df7f969904281c5a8cd1c385df3f5786132979ad3958d38e1" Nov 28 10:18:54 crc kubenswrapper[4838]: E1128 10:18:54.071383 4838 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"aed9a7a101be420df7f969904281c5a8cd1c385df3f5786132979ad3958d38e1\": container with ID starting with aed9a7a101be420df7f969904281c5a8cd1c385df3f5786132979ad3958d38e1 not found: ID does not exist" containerID="aed9a7a101be420df7f969904281c5a8cd1c385df3f5786132979ad3958d38e1" Nov 28 10:18:54 crc kubenswrapper[4838]: I1128 10:18:54.071449 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"aed9a7a101be420df7f969904281c5a8cd1c385df3f5786132979ad3958d38e1"} err="failed to get container status \"aed9a7a101be420df7f969904281c5a8cd1c385df3f5786132979ad3958d38e1\": rpc error: code = NotFound desc = could not find container \"aed9a7a101be420df7f969904281c5a8cd1c385df3f5786132979ad3958d38e1\": container with ID starting with aed9a7a101be420df7f969904281c5a8cd1c385df3f5786132979ad3958d38e1 not found: ID does not exist" Nov 28 10:18:54 crc kubenswrapper[4838]: I1128 10:18:54.071497 4838 scope.go:117] "RemoveContainer" containerID="cdbde60a6c838ba6fb999d6f8f84bbd37709c136916740ee6799a3c32c048b6e" Nov 28 10:18:54 crc kubenswrapper[4838]: E1128 10:18:54.072398 4838 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cdbde60a6c838ba6fb999d6f8f84bbd37709c136916740ee6799a3c32c048b6e\": container with ID starting with cdbde60a6c838ba6fb999d6f8f84bbd37709c136916740ee6799a3c32c048b6e not found: ID does not exist" containerID="cdbde60a6c838ba6fb999d6f8f84bbd37709c136916740ee6799a3c32c048b6e" Nov 28 10:18:54 crc kubenswrapper[4838]: I1128 10:18:54.072425 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cdbde60a6c838ba6fb999d6f8f84bbd37709c136916740ee6799a3c32c048b6e"} err="failed to get container status \"cdbde60a6c838ba6fb999d6f8f84bbd37709c136916740ee6799a3c32c048b6e\": rpc error: code = NotFound desc = could not find container \"cdbde60a6c838ba6fb999d6f8f84bbd37709c136916740ee6799a3c32c048b6e\": container with ID starting with cdbde60a6c838ba6fb999d6f8f84bbd37709c136916740ee6799a3c32c048b6e not found: ID does not exist" Nov 28 10:18:54 crc kubenswrapper[4838]: I1128 10:18:54.090397 4838 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7a57f595-4e5d-4ba1-80b9-f088c464d19b-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 10:18:54 crc kubenswrapper[4838]: I1128 10:18:54.090431 4838 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7a57f595-4e5d-4ba1-80b9-f088c464d19b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 10:18:54 crc kubenswrapper[4838]: I1128 10:18:54.259923 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 28 10:18:54 crc kubenswrapper[4838]: I1128 10:18:54.274490 4838 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 28 10:18:54 crc kubenswrapper[4838]: I1128 10:18:54.285309 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 28 10:18:54 crc kubenswrapper[4838]: E1128 10:18:54.286426 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7a57f595-4e5d-4ba1-80b9-f088c464d19b" containerName="ceilometer-notification-agent" Nov 28 10:18:54 crc kubenswrapper[4838]: I1128 10:18:54.286534 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="7a57f595-4e5d-4ba1-80b9-f088c464d19b" containerName="ceilometer-notification-agent" Nov 28 10:18:54 crc kubenswrapper[4838]: E1128 10:18:54.286639 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7a57f595-4e5d-4ba1-80b9-f088c464d19b" containerName="proxy-httpd" Nov 28 10:18:54 crc kubenswrapper[4838]: I1128 10:18:54.286900 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="7a57f595-4e5d-4ba1-80b9-f088c464d19b" containerName="proxy-httpd" Nov 28 10:18:54 crc kubenswrapper[4838]: E1128 10:18:54.286996 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7a57f595-4e5d-4ba1-80b9-f088c464d19b" containerName="ceilometer-central-agent" Nov 28 10:18:54 crc kubenswrapper[4838]: I1128 10:18:54.287081 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="7a57f595-4e5d-4ba1-80b9-f088c464d19b" containerName="ceilometer-central-agent" Nov 28 10:18:54 crc kubenswrapper[4838]: E1128 10:18:54.287158 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7a57f595-4e5d-4ba1-80b9-f088c464d19b" containerName="sg-core" Nov 28 10:18:54 crc kubenswrapper[4838]: I1128 10:18:54.287247 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="7a57f595-4e5d-4ba1-80b9-f088c464d19b" containerName="sg-core" Nov 28 10:18:54 crc kubenswrapper[4838]: I1128 10:18:54.287579 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="7a57f595-4e5d-4ba1-80b9-f088c464d19b" containerName="proxy-httpd" Nov 28 10:18:54 crc kubenswrapper[4838]: I1128 10:18:54.287711 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="7a57f595-4e5d-4ba1-80b9-f088c464d19b" containerName="ceilometer-central-agent" Nov 28 10:18:54 crc kubenswrapper[4838]: I1128 10:18:54.287876 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="7a57f595-4e5d-4ba1-80b9-f088c464d19b" containerName="sg-core" Nov 28 10:18:54 crc kubenswrapper[4838]: I1128 10:18:54.287950 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="7a57f595-4e5d-4ba1-80b9-f088c464d19b" containerName="ceilometer-notification-agent" Nov 28 10:18:54 crc kubenswrapper[4838]: I1128 10:18:54.290259 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 10:18:54 crc kubenswrapper[4838]: I1128 10:18:54.295179 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Nov 28 10:18:54 crc kubenswrapper[4838]: I1128 10:18:54.296230 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 28 10:18:54 crc kubenswrapper[4838]: I1128 10:18:54.296603 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 28 10:18:54 crc kubenswrapper[4838]: I1128 10:18:54.313853 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 28 10:18:54 crc kubenswrapper[4838]: I1128 10:18:54.395666 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6c566861-fff7-471f-b998-638650e497c7-run-httpd\") pod \"ceilometer-0\" (UID: \"6c566861-fff7-471f-b998-638650e497c7\") " pod="openstack/ceilometer-0" Nov 28 10:18:54 crc kubenswrapper[4838]: I1128 10:18:54.395713 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6c566861-fff7-471f-b998-638650e497c7-log-httpd\") pod \"ceilometer-0\" (UID: \"6c566861-fff7-471f-b998-638650e497c7\") " pod="openstack/ceilometer-0" Nov 28 10:18:54 crc kubenswrapper[4838]: I1128 10:18:54.395794 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6c566861-fff7-471f-b998-638650e497c7-scripts\") pod \"ceilometer-0\" (UID: \"6c566861-fff7-471f-b998-638650e497c7\") " pod="openstack/ceilometer-0" Nov 28 10:18:54 crc kubenswrapper[4838]: I1128 10:18:54.395816 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6c566861-fff7-471f-b998-638650e497c7-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"6c566861-fff7-471f-b998-638650e497c7\") " pod="openstack/ceilometer-0" Nov 28 10:18:54 crc kubenswrapper[4838]: I1128 10:18:54.395876 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/6c566861-fff7-471f-b998-638650e497c7-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"6c566861-fff7-471f-b998-638650e497c7\") " pod="openstack/ceilometer-0" Nov 28 10:18:54 crc kubenswrapper[4838]: I1128 10:18:54.395938 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zzj66\" (UniqueName: \"kubernetes.io/projected/6c566861-fff7-471f-b998-638650e497c7-kube-api-access-zzj66\") pod \"ceilometer-0\" (UID: \"6c566861-fff7-471f-b998-638650e497c7\") " pod="openstack/ceilometer-0" Nov 28 10:18:54 crc kubenswrapper[4838]: I1128 10:18:54.395963 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6c566861-fff7-471f-b998-638650e497c7-config-data\") pod \"ceilometer-0\" (UID: \"6c566861-fff7-471f-b998-638650e497c7\") " pod="openstack/ceilometer-0" Nov 28 10:18:54 crc kubenswrapper[4838]: I1128 10:18:54.396105 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/6c566861-fff7-471f-b998-638650e497c7-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"6c566861-fff7-471f-b998-638650e497c7\") " pod="openstack/ceilometer-0" Nov 28 10:18:54 crc kubenswrapper[4838]: I1128 10:18:54.497455 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6c566861-fff7-471f-b998-638650e497c7-run-httpd\") pod \"ceilometer-0\" (UID: \"6c566861-fff7-471f-b998-638650e497c7\") " pod="openstack/ceilometer-0" Nov 28 10:18:54 crc kubenswrapper[4838]: I1128 10:18:54.497502 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6c566861-fff7-471f-b998-638650e497c7-log-httpd\") pod \"ceilometer-0\" (UID: \"6c566861-fff7-471f-b998-638650e497c7\") " pod="openstack/ceilometer-0" Nov 28 10:18:54 crc kubenswrapper[4838]: I1128 10:18:54.497564 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6c566861-fff7-471f-b998-638650e497c7-scripts\") pod \"ceilometer-0\" (UID: \"6c566861-fff7-471f-b998-638650e497c7\") " pod="openstack/ceilometer-0" Nov 28 10:18:54 crc kubenswrapper[4838]: I1128 10:18:54.497591 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6c566861-fff7-471f-b998-638650e497c7-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"6c566861-fff7-471f-b998-638650e497c7\") " pod="openstack/ceilometer-0" Nov 28 10:18:54 crc kubenswrapper[4838]: I1128 10:18:54.497632 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/6c566861-fff7-471f-b998-638650e497c7-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"6c566861-fff7-471f-b998-638650e497c7\") " pod="openstack/ceilometer-0" Nov 28 10:18:54 crc kubenswrapper[4838]: I1128 10:18:54.497711 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zzj66\" (UniqueName: \"kubernetes.io/projected/6c566861-fff7-471f-b998-638650e497c7-kube-api-access-zzj66\") pod \"ceilometer-0\" (UID: \"6c566861-fff7-471f-b998-638650e497c7\") " pod="openstack/ceilometer-0" Nov 28 10:18:54 crc kubenswrapper[4838]: I1128 10:18:54.497762 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6c566861-fff7-471f-b998-638650e497c7-config-data\") pod \"ceilometer-0\" (UID: \"6c566861-fff7-471f-b998-638650e497c7\") " pod="openstack/ceilometer-0" Nov 28 10:18:54 crc kubenswrapper[4838]: I1128 10:18:54.497789 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/6c566861-fff7-471f-b998-638650e497c7-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"6c566861-fff7-471f-b998-638650e497c7\") " pod="openstack/ceilometer-0" Nov 28 10:18:54 crc kubenswrapper[4838]: I1128 10:18:54.498116 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6c566861-fff7-471f-b998-638650e497c7-run-httpd\") pod \"ceilometer-0\" (UID: \"6c566861-fff7-471f-b998-638650e497c7\") " pod="openstack/ceilometer-0" Nov 28 10:18:54 crc kubenswrapper[4838]: I1128 10:18:54.498658 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6c566861-fff7-471f-b998-638650e497c7-log-httpd\") pod \"ceilometer-0\" (UID: \"6c566861-fff7-471f-b998-638650e497c7\") " pod="openstack/ceilometer-0" Nov 28 10:18:54 crc kubenswrapper[4838]: I1128 10:18:54.503156 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6c566861-fff7-471f-b998-638650e497c7-config-data\") pod \"ceilometer-0\" (UID: \"6c566861-fff7-471f-b998-638650e497c7\") " pod="openstack/ceilometer-0" Nov 28 10:18:54 crc kubenswrapper[4838]: I1128 10:18:54.503364 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6c566861-fff7-471f-b998-638650e497c7-scripts\") pod \"ceilometer-0\" (UID: \"6c566861-fff7-471f-b998-638650e497c7\") " pod="openstack/ceilometer-0" Nov 28 10:18:54 crc kubenswrapper[4838]: I1128 10:18:54.504876 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/6c566861-fff7-471f-b998-638650e497c7-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"6c566861-fff7-471f-b998-638650e497c7\") " pod="openstack/ceilometer-0" Nov 28 10:18:54 crc kubenswrapper[4838]: I1128 10:18:54.512508 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6c566861-fff7-471f-b998-638650e497c7-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"6c566861-fff7-471f-b998-638650e497c7\") " pod="openstack/ceilometer-0" Nov 28 10:18:54 crc kubenswrapper[4838]: I1128 10:18:54.512800 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/6c566861-fff7-471f-b998-638650e497c7-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"6c566861-fff7-471f-b998-638650e497c7\") " pod="openstack/ceilometer-0" Nov 28 10:18:54 crc kubenswrapper[4838]: I1128 10:18:54.512950 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zzj66\" (UniqueName: \"kubernetes.io/projected/6c566861-fff7-471f-b998-638650e497c7-kube-api-access-zzj66\") pod \"ceilometer-0\" (UID: \"6c566861-fff7-471f-b998-638650e497c7\") " pod="openstack/ceilometer-0" Nov 28 10:18:54 crc kubenswrapper[4838]: I1128 10:18:54.573524 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7a57f595-4e5d-4ba1-80b9-f088c464d19b" path="/var/lib/kubelet/pods/7a57f595-4e5d-4ba1-80b9-f088c464d19b/volumes" Nov 28 10:18:54 crc kubenswrapper[4838]: I1128 10:18:54.622971 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 10:18:54 crc kubenswrapper[4838]: I1128 10:18:54.898915 4838 generic.go:334] "Generic (PLEG): container finished" podID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" containerID="1815c6b644c08c4a75da2a50900db223999a631363ba83a16dba3176b263bb61" exitCode=0 Nov 28 10:18:54 crc kubenswrapper[4838]: I1128 10:18:54.899082 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" event={"ID":"5c3daa53-8c4e-4e30-aeba-146602dd45cd","Type":"ContainerDied","Data":"1815c6b644c08c4a75da2a50900db223999a631363ba83a16dba3176b263bb61"} Nov 28 10:18:54 crc kubenswrapper[4838]: I1128 10:18:54.899308 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" event={"ID":"5c3daa53-8c4e-4e30-aeba-146602dd45cd","Type":"ContainerStarted","Data":"37eb1fd382bf7ac855fc0bf19ecaa14a9b60925b9775992a84755c27a44467c5"} Nov 28 10:18:54 crc kubenswrapper[4838]: I1128 10:18:54.899333 4838 scope.go:117] "RemoveContainer" containerID="3ce7b45b9fd71f6cdec20d6a8542bb19cf78bbe5928e243b6058e07f9eb4cc79" Nov 28 10:18:55 crc kubenswrapper[4838]: W1128 10:18:55.113525 4838 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6c566861_fff7_471f_b998_638650e497c7.slice/crio-3f275de27d9f8be994f9fc28d46629cd0a0729cf63162e5541d88869a1c22668 WatchSource:0}: Error finding container 3f275de27d9f8be994f9fc28d46629cd0a0729cf63162e5541d88869a1c22668: Status 404 returned error can't find the container with id 3f275de27d9f8be994f9fc28d46629cd0a0729cf63162e5541d88869a1c22668 Nov 28 10:18:55 crc kubenswrapper[4838]: I1128 10:18:55.120129 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 28 10:18:55 crc kubenswrapper[4838]: I1128 10:18:55.930306 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6c566861-fff7-471f-b998-638650e497c7","Type":"ContainerStarted","Data":"5e2f8a2b9bed7d2a647baef6fbda1c9af5a39cc97f3c54633cbb705e14ec5454"} Nov 28 10:18:55 crc kubenswrapper[4838]: I1128 10:18:55.930955 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6c566861-fff7-471f-b998-638650e497c7","Type":"ContainerStarted","Data":"3f275de27d9f8be994f9fc28d46629cd0a0729cf63162e5541d88869a1c22668"} Nov 28 10:18:56 crc kubenswrapper[4838]: I1128 10:18:56.943223 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6c566861-fff7-471f-b998-638650e497c7","Type":"ContainerStarted","Data":"a765c0b1796f21fae6de2cd439ca818f48fdb59edd054edc8a5b7764f60b2892"} Nov 28 10:18:57 crc kubenswrapper[4838]: I1128 10:18:57.482069 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 28 10:18:57 crc kubenswrapper[4838]: I1128 10:18:57.482413 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 28 10:18:57 crc kubenswrapper[4838]: I1128 10:18:57.596767 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Nov 28 10:18:57 crc kubenswrapper[4838]: I1128 10:18:57.954411 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6c566861-fff7-471f-b998-638650e497c7","Type":"ContainerStarted","Data":"f907c1ea6496e1551e5b27530c43487e8bad2fab6669a5e0f2973d6e2fb0d1d7"} Nov 28 10:18:58 crc kubenswrapper[4838]: I1128 10:18:58.508912 4838 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="a7e0e2c6-e63f-40c8-8db2-e6c738371e1a" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.176:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 28 10:18:58 crc kubenswrapper[4838]: I1128 10:18:58.508937 4838 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="a7e0e2c6-e63f-40c8-8db2-e6c738371e1a" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.176:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 28 10:18:59 crc kubenswrapper[4838]: I1128 10:18:59.505637 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Nov 28 10:18:59 crc kubenswrapper[4838]: I1128 10:18:59.986158 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6c566861-fff7-471f-b998-638650e497c7","Type":"ContainerStarted","Data":"0a3612b4718800ce6ad294116b9951ad87a13d0cf88c87cedc890c79eda69a61"} Nov 28 10:18:59 crc kubenswrapper[4838]: I1128 10:18:59.986538 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 28 10:19:00 crc kubenswrapper[4838]: I1128 10:19:00.018791 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.341610437 podStartE2EDuration="6.018760767s" podCreationTimestamp="2025-11-28 10:18:54 +0000 UTC" firstStartedPulling="2025-11-28 10:18:55.115172011 +0000 UTC m=+1306.814146181" lastFinishedPulling="2025-11-28 10:18:58.792322341 +0000 UTC m=+1310.491296511" observedRunningTime="2025-11-28 10:19:00.01345443 +0000 UTC m=+1311.712428670" watchObservedRunningTime="2025-11-28 10:19:00.018760767 +0000 UTC m=+1311.717734977" Nov 28 10:19:02 crc kubenswrapper[4838]: I1128 10:19:02.597256 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Nov 28 10:19:02 crc kubenswrapper[4838]: I1128 10:19:02.609518 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 28 10:19:02 crc kubenswrapper[4838]: I1128 10:19:02.609558 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 28 10:19:02 crc kubenswrapper[4838]: I1128 10:19:02.646147 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Nov 28 10:19:03 crc kubenswrapper[4838]: I1128 10:19:03.062459 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Nov 28 10:19:03 crc kubenswrapper[4838]: I1128 10:19:03.692011 4838 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="08804858-2bd5-4d72-9bf3-a9989128bed2" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.0.180:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 28 10:19:03 crc kubenswrapper[4838]: I1128 10:19:03.692047 4838 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="08804858-2bd5-4d72-9bf3-a9989128bed2" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.0.180:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 28 10:19:07 crc kubenswrapper[4838]: I1128 10:19:07.490239 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 28 10:19:07 crc kubenswrapper[4838]: I1128 10:19:07.490892 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 28 10:19:07 crc kubenswrapper[4838]: I1128 10:19:07.498380 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 28 10:19:07 crc kubenswrapper[4838]: I1128 10:19:07.502982 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 28 10:19:10 crc kubenswrapper[4838]: I1128 10:19:10.008580 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 28 10:19:10 crc kubenswrapper[4838]: I1128 10:19:10.083361 4838 generic.go:334] "Generic (PLEG): container finished" podID="44c808a6-3890-404e-8393-56982242d012" containerID="d30c169cc7a921e9480fae2d04340e41cef7b21788e7d827a7625c95699709f3" exitCode=137 Nov 28 10:19:10 crc kubenswrapper[4838]: I1128 10:19:10.083416 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"44c808a6-3890-404e-8393-56982242d012","Type":"ContainerDied","Data":"d30c169cc7a921e9480fae2d04340e41cef7b21788e7d827a7625c95699709f3"} Nov 28 10:19:10 crc kubenswrapper[4838]: I1128 10:19:10.083453 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"44c808a6-3890-404e-8393-56982242d012","Type":"ContainerDied","Data":"09e9da87e0853b6acfb709e507461964cd4b6af7a4b9d8867c3ed1fae981ed50"} Nov 28 10:19:10 crc kubenswrapper[4838]: I1128 10:19:10.083549 4838 scope.go:117] "RemoveContainer" containerID="d30c169cc7a921e9480fae2d04340e41cef7b21788e7d827a7625c95699709f3" Nov 28 10:19:10 crc kubenswrapper[4838]: I1128 10:19:10.083564 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 28 10:19:10 crc kubenswrapper[4838]: I1128 10:19:10.112153 4838 scope.go:117] "RemoveContainer" containerID="d30c169cc7a921e9480fae2d04340e41cef7b21788e7d827a7625c95699709f3" Nov 28 10:19:10 crc kubenswrapper[4838]: E1128 10:19:10.112749 4838 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d30c169cc7a921e9480fae2d04340e41cef7b21788e7d827a7625c95699709f3\": container with ID starting with d30c169cc7a921e9480fae2d04340e41cef7b21788e7d827a7625c95699709f3 not found: ID does not exist" containerID="d30c169cc7a921e9480fae2d04340e41cef7b21788e7d827a7625c95699709f3" Nov 28 10:19:10 crc kubenswrapper[4838]: I1128 10:19:10.112799 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d30c169cc7a921e9480fae2d04340e41cef7b21788e7d827a7625c95699709f3"} err="failed to get container status \"d30c169cc7a921e9480fae2d04340e41cef7b21788e7d827a7625c95699709f3\": rpc error: code = NotFound desc = could not find container \"d30c169cc7a921e9480fae2d04340e41cef7b21788e7d827a7625c95699709f3\": container with ID starting with d30c169cc7a921e9480fae2d04340e41cef7b21788e7d827a7625c95699709f3 not found: ID does not exist" Nov 28 10:19:10 crc kubenswrapper[4838]: I1128 10:19:10.140786 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/44c808a6-3890-404e-8393-56982242d012-combined-ca-bundle\") pod \"44c808a6-3890-404e-8393-56982242d012\" (UID: \"44c808a6-3890-404e-8393-56982242d012\") " Nov 28 10:19:10 crc kubenswrapper[4838]: I1128 10:19:10.140993 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/44c808a6-3890-404e-8393-56982242d012-config-data\") pod \"44c808a6-3890-404e-8393-56982242d012\" (UID: \"44c808a6-3890-404e-8393-56982242d012\") " Nov 28 10:19:10 crc kubenswrapper[4838]: I1128 10:19:10.141231 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-k4ll5\" (UniqueName: \"kubernetes.io/projected/44c808a6-3890-404e-8393-56982242d012-kube-api-access-k4ll5\") pod \"44c808a6-3890-404e-8393-56982242d012\" (UID: \"44c808a6-3890-404e-8393-56982242d012\") " Nov 28 10:19:10 crc kubenswrapper[4838]: I1128 10:19:10.146430 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/44c808a6-3890-404e-8393-56982242d012-kube-api-access-k4ll5" (OuterVolumeSpecName: "kube-api-access-k4ll5") pod "44c808a6-3890-404e-8393-56982242d012" (UID: "44c808a6-3890-404e-8393-56982242d012"). InnerVolumeSpecName "kube-api-access-k4ll5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:19:10 crc kubenswrapper[4838]: I1128 10:19:10.165944 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/44c808a6-3890-404e-8393-56982242d012-config-data" (OuterVolumeSpecName: "config-data") pod "44c808a6-3890-404e-8393-56982242d012" (UID: "44c808a6-3890-404e-8393-56982242d012"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:19:10 crc kubenswrapper[4838]: I1128 10:19:10.167567 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/44c808a6-3890-404e-8393-56982242d012-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "44c808a6-3890-404e-8393-56982242d012" (UID: "44c808a6-3890-404e-8393-56982242d012"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:19:10 crc kubenswrapper[4838]: I1128 10:19:10.243474 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-k4ll5\" (UniqueName: \"kubernetes.io/projected/44c808a6-3890-404e-8393-56982242d012-kube-api-access-k4ll5\") on node \"crc\" DevicePath \"\"" Nov 28 10:19:10 crc kubenswrapper[4838]: I1128 10:19:10.243524 4838 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/44c808a6-3890-404e-8393-56982242d012-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 10:19:10 crc kubenswrapper[4838]: I1128 10:19:10.243538 4838 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/44c808a6-3890-404e-8393-56982242d012-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 10:19:10 crc kubenswrapper[4838]: I1128 10:19:10.449796 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 28 10:19:10 crc kubenswrapper[4838]: I1128 10:19:10.464665 4838 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 28 10:19:10 crc kubenswrapper[4838]: I1128 10:19:10.474275 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 28 10:19:10 crc kubenswrapper[4838]: E1128 10:19:10.474753 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="44c808a6-3890-404e-8393-56982242d012" containerName="nova-cell1-novncproxy-novncproxy" Nov 28 10:19:10 crc kubenswrapper[4838]: I1128 10:19:10.474770 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="44c808a6-3890-404e-8393-56982242d012" containerName="nova-cell1-novncproxy-novncproxy" Nov 28 10:19:10 crc kubenswrapper[4838]: I1128 10:19:10.474981 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="44c808a6-3890-404e-8393-56982242d012" containerName="nova-cell1-novncproxy-novncproxy" Nov 28 10:19:10 crc kubenswrapper[4838]: I1128 10:19:10.475632 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 28 10:19:10 crc kubenswrapper[4838]: I1128 10:19:10.478099 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-public-svc" Nov 28 10:19:10 crc kubenswrapper[4838]: I1128 10:19:10.478555 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-vencrypt" Nov 28 10:19:10 crc kubenswrapper[4838]: I1128 10:19:10.483638 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Nov 28 10:19:10 crc kubenswrapper[4838]: I1128 10:19:10.484611 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 28 10:19:10 crc kubenswrapper[4838]: I1128 10:19:10.551019 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/71d3fada-848c-4e73-ad9e-f63e8fdde48e-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"71d3fada-848c-4e73-ad9e-f63e8fdde48e\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 10:19:10 crc kubenswrapper[4838]: I1128 10:19:10.551169 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/71d3fada-848c-4e73-ad9e-f63e8fdde48e-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"71d3fada-848c-4e73-ad9e-f63e8fdde48e\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 10:19:10 crc kubenswrapper[4838]: I1128 10:19:10.551243 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/71d3fada-848c-4e73-ad9e-f63e8fdde48e-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"71d3fada-848c-4e73-ad9e-f63e8fdde48e\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 10:19:10 crc kubenswrapper[4838]: I1128 10:19:10.551284 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nrbpn\" (UniqueName: \"kubernetes.io/projected/71d3fada-848c-4e73-ad9e-f63e8fdde48e-kube-api-access-nrbpn\") pod \"nova-cell1-novncproxy-0\" (UID: \"71d3fada-848c-4e73-ad9e-f63e8fdde48e\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 10:19:10 crc kubenswrapper[4838]: I1128 10:19:10.551344 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/71d3fada-848c-4e73-ad9e-f63e8fdde48e-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"71d3fada-848c-4e73-ad9e-f63e8fdde48e\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 10:19:10 crc kubenswrapper[4838]: I1128 10:19:10.575096 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="44c808a6-3890-404e-8393-56982242d012" path="/var/lib/kubelet/pods/44c808a6-3890-404e-8393-56982242d012/volumes" Nov 28 10:19:10 crc kubenswrapper[4838]: I1128 10:19:10.652843 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/71d3fada-848c-4e73-ad9e-f63e8fdde48e-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"71d3fada-848c-4e73-ad9e-f63e8fdde48e\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 10:19:10 crc kubenswrapper[4838]: I1128 10:19:10.652991 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/71d3fada-848c-4e73-ad9e-f63e8fdde48e-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"71d3fada-848c-4e73-ad9e-f63e8fdde48e\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 10:19:10 crc kubenswrapper[4838]: I1128 10:19:10.653064 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nrbpn\" (UniqueName: \"kubernetes.io/projected/71d3fada-848c-4e73-ad9e-f63e8fdde48e-kube-api-access-nrbpn\") pod \"nova-cell1-novncproxy-0\" (UID: \"71d3fada-848c-4e73-ad9e-f63e8fdde48e\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 10:19:10 crc kubenswrapper[4838]: I1128 10:19:10.653148 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/71d3fada-848c-4e73-ad9e-f63e8fdde48e-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"71d3fada-848c-4e73-ad9e-f63e8fdde48e\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 10:19:10 crc kubenswrapper[4838]: I1128 10:19:10.653234 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/71d3fada-848c-4e73-ad9e-f63e8fdde48e-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"71d3fada-848c-4e73-ad9e-f63e8fdde48e\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 10:19:10 crc kubenswrapper[4838]: I1128 10:19:10.659857 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/71d3fada-848c-4e73-ad9e-f63e8fdde48e-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"71d3fada-848c-4e73-ad9e-f63e8fdde48e\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 10:19:10 crc kubenswrapper[4838]: I1128 10:19:10.660037 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/71d3fada-848c-4e73-ad9e-f63e8fdde48e-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"71d3fada-848c-4e73-ad9e-f63e8fdde48e\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 10:19:10 crc kubenswrapper[4838]: I1128 10:19:10.663785 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/71d3fada-848c-4e73-ad9e-f63e8fdde48e-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"71d3fada-848c-4e73-ad9e-f63e8fdde48e\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 10:19:10 crc kubenswrapper[4838]: I1128 10:19:10.668800 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/71d3fada-848c-4e73-ad9e-f63e8fdde48e-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"71d3fada-848c-4e73-ad9e-f63e8fdde48e\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 10:19:10 crc kubenswrapper[4838]: I1128 10:19:10.680333 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nrbpn\" (UniqueName: \"kubernetes.io/projected/71d3fada-848c-4e73-ad9e-f63e8fdde48e-kube-api-access-nrbpn\") pod \"nova-cell1-novncproxy-0\" (UID: \"71d3fada-848c-4e73-ad9e-f63e8fdde48e\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 10:19:10 crc kubenswrapper[4838]: I1128 10:19:10.791227 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 28 10:19:11 crc kubenswrapper[4838]: I1128 10:19:11.315445 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 28 10:19:11 crc kubenswrapper[4838]: W1128 10:19:11.316610 4838 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod71d3fada_848c_4e73_ad9e_f63e8fdde48e.slice/crio-cad0744f682e6b46db2f75a11f2374b7738e54c74c4e07cc0845e4980c6e6b61 WatchSource:0}: Error finding container cad0744f682e6b46db2f75a11f2374b7738e54c74c4e07cc0845e4980c6e6b61: Status 404 returned error can't find the container with id cad0744f682e6b46db2f75a11f2374b7738e54c74c4e07cc0845e4980c6e6b61 Nov 28 10:19:12 crc kubenswrapper[4838]: I1128 10:19:12.112271 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"71d3fada-848c-4e73-ad9e-f63e8fdde48e","Type":"ContainerStarted","Data":"ffb5476692b971c07615eea1d1699b0e53adc3f05b43ac36ddda2ad25bf84ad9"} Nov 28 10:19:12 crc kubenswrapper[4838]: I1128 10:19:12.112633 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"71d3fada-848c-4e73-ad9e-f63e8fdde48e","Type":"ContainerStarted","Data":"cad0744f682e6b46db2f75a11f2374b7738e54c74c4e07cc0845e4980c6e6b61"} Nov 28 10:19:12 crc kubenswrapper[4838]: I1128 10:19:12.134635 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=2.1346092580000002 podStartE2EDuration="2.134609258s" podCreationTimestamp="2025-11-28 10:19:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 10:19:12.133315502 +0000 UTC m=+1323.832289712" watchObservedRunningTime="2025-11-28 10:19:12.134609258 +0000 UTC m=+1323.833583458" Nov 28 10:19:12 crc kubenswrapper[4838]: I1128 10:19:12.615159 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 28 10:19:12 crc kubenswrapper[4838]: I1128 10:19:12.616642 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 28 10:19:12 crc kubenswrapper[4838]: I1128 10:19:12.616969 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 28 10:19:12 crc kubenswrapper[4838]: I1128 10:19:12.618942 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 28 10:19:13 crc kubenswrapper[4838]: I1128 10:19:13.123090 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 28 10:19:13 crc kubenswrapper[4838]: I1128 10:19:13.126550 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 28 10:19:13 crc kubenswrapper[4838]: I1128 10:19:13.323754 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5b856c5697-mjgmt"] Nov 28 10:19:13 crc kubenswrapper[4838]: I1128 10:19:13.325546 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5b856c5697-mjgmt" Nov 28 10:19:13 crc kubenswrapper[4838]: I1128 10:19:13.334725 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5b856c5697-mjgmt"] Nov 28 10:19:13 crc kubenswrapper[4838]: I1128 10:19:13.409737 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d861e633-58c3-4190-b4bf-c113fe415368-ovsdbserver-nb\") pod \"dnsmasq-dns-5b856c5697-mjgmt\" (UID: \"d861e633-58c3-4190-b4bf-c113fe415368\") " pod="openstack/dnsmasq-dns-5b856c5697-mjgmt" Nov 28 10:19:13 crc kubenswrapper[4838]: I1128 10:19:13.409817 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d861e633-58c3-4190-b4bf-c113fe415368-config\") pod \"dnsmasq-dns-5b856c5697-mjgmt\" (UID: \"d861e633-58c3-4190-b4bf-c113fe415368\") " pod="openstack/dnsmasq-dns-5b856c5697-mjgmt" Nov 28 10:19:13 crc kubenswrapper[4838]: I1128 10:19:13.409872 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d861e633-58c3-4190-b4bf-c113fe415368-dns-svc\") pod \"dnsmasq-dns-5b856c5697-mjgmt\" (UID: \"d861e633-58c3-4190-b4bf-c113fe415368\") " pod="openstack/dnsmasq-dns-5b856c5697-mjgmt" Nov 28 10:19:13 crc kubenswrapper[4838]: I1128 10:19:13.409905 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d861e633-58c3-4190-b4bf-c113fe415368-ovsdbserver-sb\") pod \"dnsmasq-dns-5b856c5697-mjgmt\" (UID: \"d861e633-58c3-4190-b4bf-c113fe415368\") " pod="openstack/dnsmasq-dns-5b856c5697-mjgmt" Nov 28 10:19:13 crc kubenswrapper[4838]: I1128 10:19:13.409931 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pfm6l\" (UniqueName: \"kubernetes.io/projected/d861e633-58c3-4190-b4bf-c113fe415368-kube-api-access-pfm6l\") pod \"dnsmasq-dns-5b856c5697-mjgmt\" (UID: \"d861e633-58c3-4190-b4bf-c113fe415368\") " pod="openstack/dnsmasq-dns-5b856c5697-mjgmt" Nov 28 10:19:13 crc kubenswrapper[4838]: I1128 10:19:13.512555 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d861e633-58c3-4190-b4bf-c113fe415368-ovsdbserver-nb\") pod \"dnsmasq-dns-5b856c5697-mjgmt\" (UID: \"d861e633-58c3-4190-b4bf-c113fe415368\") " pod="openstack/dnsmasq-dns-5b856c5697-mjgmt" Nov 28 10:19:13 crc kubenswrapper[4838]: I1128 10:19:13.512613 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d861e633-58c3-4190-b4bf-c113fe415368-config\") pod \"dnsmasq-dns-5b856c5697-mjgmt\" (UID: \"d861e633-58c3-4190-b4bf-c113fe415368\") " pod="openstack/dnsmasq-dns-5b856c5697-mjgmt" Nov 28 10:19:13 crc kubenswrapper[4838]: I1128 10:19:13.512642 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d861e633-58c3-4190-b4bf-c113fe415368-dns-svc\") pod \"dnsmasq-dns-5b856c5697-mjgmt\" (UID: \"d861e633-58c3-4190-b4bf-c113fe415368\") " pod="openstack/dnsmasq-dns-5b856c5697-mjgmt" Nov 28 10:19:13 crc kubenswrapper[4838]: I1128 10:19:13.512658 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d861e633-58c3-4190-b4bf-c113fe415368-ovsdbserver-sb\") pod \"dnsmasq-dns-5b856c5697-mjgmt\" (UID: \"d861e633-58c3-4190-b4bf-c113fe415368\") " pod="openstack/dnsmasq-dns-5b856c5697-mjgmt" Nov 28 10:19:13 crc kubenswrapper[4838]: I1128 10:19:13.512681 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pfm6l\" (UniqueName: \"kubernetes.io/projected/d861e633-58c3-4190-b4bf-c113fe415368-kube-api-access-pfm6l\") pod \"dnsmasq-dns-5b856c5697-mjgmt\" (UID: \"d861e633-58c3-4190-b4bf-c113fe415368\") " pod="openstack/dnsmasq-dns-5b856c5697-mjgmt" Nov 28 10:19:13 crc kubenswrapper[4838]: I1128 10:19:13.513679 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d861e633-58c3-4190-b4bf-c113fe415368-ovsdbserver-nb\") pod \"dnsmasq-dns-5b856c5697-mjgmt\" (UID: \"d861e633-58c3-4190-b4bf-c113fe415368\") " pod="openstack/dnsmasq-dns-5b856c5697-mjgmt" Nov 28 10:19:13 crc kubenswrapper[4838]: I1128 10:19:13.513703 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d861e633-58c3-4190-b4bf-c113fe415368-dns-svc\") pod \"dnsmasq-dns-5b856c5697-mjgmt\" (UID: \"d861e633-58c3-4190-b4bf-c113fe415368\") " pod="openstack/dnsmasq-dns-5b856c5697-mjgmt" Nov 28 10:19:13 crc kubenswrapper[4838]: I1128 10:19:13.513863 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d861e633-58c3-4190-b4bf-c113fe415368-config\") pod \"dnsmasq-dns-5b856c5697-mjgmt\" (UID: \"d861e633-58c3-4190-b4bf-c113fe415368\") " pod="openstack/dnsmasq-dns-5b856c5697-mjgmt" Nov 28 10:19:13 crc kubenswrapper[4838]: I1128 10:19:13.513898 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d861e633-58c3-4190-b4bf-c113fe415368-ovsdbserver-sb\") pod \"dnsmasq-dns-5b856c5697-mjgmt\" (UID: \"d861e633-58c3-4190-b4bf-c113fe415368\") " pod="openstack/dnsmasq-dns-5b856c5697-mjgmt" Nov 28 10:19:13 crc kubenswrapper[4838]: I1128 10:19:13.536452 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pfm6l\" (UniqueName: \"kubernetes.io/projected/d861e633-58c3-4190-b4bf-c113fe415368-kube-api-access-pfm6l\") pod \"dnsmasq-dns-5b856c5697-mjgmt\" (UID: \"d861e633-58c3-4190-b4bf-c113fe415368\") " pod="openstack/dnsmasq-dns-5b856c5697-mjgmt" Nov 28 10:19:13 crc kubenswrapper[4838]: I1128 10:19:13.683358 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5b856c5697-mjgmt" Nov 28 10:19:14 crc kubenswrapper[4838]: I1128 10:19:14.179975 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5b856c5697-mjgmt"] Nov 28 10:19:15 crc kubenswrapper[4838]: I1128 10:19:15.150918 4838 generic.go:334] "Generic (PLEG): container finished" podID="d861e633-58c3-4190-b4bf-c113fe415368" containerID="856d7fb9a5ac41586e57db7f605cd0bfdc2f48f6c0c5e7fa6b0efd53ff6c9a5c" exitCode=0 Nov 28 10:19:15 crc kubenswrapper[4838]: I1128 10:19:15.151008 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5b856c5697-mjgmt" event={"ID":"d861e633-58c3-4190-b4bf-c113fe415368","Type":"ContainerDied","Data":"856d7fb9a5ac41586e57db7f605cd0bfdc2f48f6c0c5e7fa6b0efd53ff6c9a5c"} Nov 28 10:19:15 crc kubenswrapper[4838]: I1128 10:19:15.151418 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5b856c5697-mjgmt" event={"ID":"d861e633-58c3-4190-b4bf-c113fe415368","Type":"ContainerStarted","Data":"e0a84c13b189d18971b400bda7cd3d54d2e7b411085a22568684fea3e9aef536"} Nov 28 10:19:15 crc kubenswrapper[4838]: I1128 10:19:15.513869 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 28 10:19:15 crc kubenswrapper[4838]: I1128 10:19:15.514359 4838 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="6c566861-fff7-471f-b998-638650e497c7" containerName="ceilometer-central-agent" containerID="cri-o://5e2f8a2b9bed7d2a647baef6fbda1c9af5a39cc97f3c54633cbb705e14ec5454" gracePeriod=30 Nov 28 10:19:15 crc kubenswrapper[4838]: I1128 10:19:15.514482 4838 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="6c566861-fff7-471f-b998-638650e497c7" containerName="proxy-httpd" containerID="cri-o://0a3612b4718800ce6ad294116b9951ad87a13d0cf88c87cedc890c79eda69a61" gracePeriod=30 Nov 28 10:19:15 crc kubenswrapper[4838]: I1128 10:19:15.514632 4838 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="6c566861-fff7-471f-b998-638650e497c7" containerName="sg-core" containerID="cri-o://f907c1ea6496e1551e5b27530c43487e8bad2fab6669a5e0f2973d6e2fb0d1d7" gracePeriod=30 Nov 28 10:19:15 crc kubenswrapper[4838]: I1128 10:19:15.514877 4838 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="6c566861-fff7-471f-b998-638650e497c7" containerName="ceilometer-notification-agent" containerID="cri-o://a765c0b1796f21fae6de2cd439ca818f48fdb59edd054edc8a5b7764f60b2892" gracePeriod=30 Nov 28 10:19:15 crc kubenswrapper[4838]: I1128 10:19:15.521183 4838 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ceilometer-0" podUID="6c566861-fff7-471f-b998-638650e497c7" containerName="proxy-httpd" probeResult="failure" output="Get \"https://10.217.0.181:3000/\": EOF" Nov 28 10:19:15 crc kubenswrapper[4838]: I1128 10:19:15.756477 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 28 10:19:15 crc kubenswrapper[4838]: I1128 10:19:15.791675 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Nov 28 10:19:16 crc kubenswrapper[4838]: I1128 10:19:16.182355 4838 generic.go:334] "Generic (PLEG): container finished" podID="6c566861-fff7-471f-b998-638650e497c7" containerID="0a3612b4718800ce6ad294116b9951ad87a13d0cf88c87cedc890c79eda69a61" exitCode=0 Nov 28 10:19:16 crc kubenswrapper[4838]: I1128 10:19:16.182396 4838 generic.go:334] "Generic (PLEG): container finished" podID="6c566861-fff7-471f-b998-638650e497c7" containerID="f907c1ea6496e1551e5b27530c43487e8bad2fab6669a5e0f2973d6e2fb0d1d7" exitCode=2 Nov 28 10:19:16 crc kubenswrapper[4838]: I1128 10:19:16.182406 4838 generic.go:334] "Generic (PLEG): container finished" podID="6c566861-fff7-471f-b998-638650e497c7" containerID="5e2f8a2b9bed7d2a647baef6fbda1c9af5a39cc97f3c54633cbb705e14ec5454" exitCode=0 Nov 28 10:19:16 crc kubenswrapper[4838]: I1128 10:19:16.182433 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6c566861-fff7-471f-b998-638650e497c7","Type":"ContainerDied","Data":"0a3612b4718800ce6ad294116b9951ad87a13d0cf88c87cedc890c79eda69a61"} Nov 28 10:19:16 crc kubenswrapper[4838]: I1128 10:19:16.182498 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6c566861-fff7-471f-b998-638650e497c7","Type":"ContainerDied","Data":"f907c1ea6496e1551e5b27530c43487e8bad2fab6669a5e0f2973d6e2fb0d1d7"} Nov 28 10:19:16 crc kubenswrapper[4838]: I1128 10:19:16.182513 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6c566861-fff7-471f-b998-638650e497c7","Type":"ContainerDied","Data":"5e2f8a2b9bed7d2a647baef6fbda1c9af5a39cc97f3c54633cbb705e14ec5454"} Nov 28 10:19:16 crc kubenswrapper[4838]: I1128 10:19:16.185351 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5b856c5697-mjgmt" event={"ID":"d861e633-58c3-4190-b4bf-c113fe415368","Type":"ContainerStarted","Data":"1dc6b5d8ba0342ebc40a6054a39bb36a6a8085273a805c2e3e99f6f4d39ace9a"} Nov 28 10:19:16 crc kubenswrapper[4838]: I1128 10:19:16.185454 4838 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="08804858-2bd5-4d72-9bf3-a9989128bed2" containerName="nova-api-log" containerID="cri-o://7fc2eda0c1d5eb5cc2e81e20acea01be60ea76f80d72047dc977eb460c1c3500" gracePeriod=30 Nov 28 10:19:16 crc kubenswrapper[4838]: I1128 10:19:16.185528 4838 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="08804858-2bd5-4d72-9bf3-a9989128bed2" containerName="nova-api-api" containerID="cri-o://c67ec7edbd8b25e8b0719e1488d6594bc0c6521159c17cf07b269110c74a7d07" gracePeriod=30 Nov 28 10:19:16 crc kubenswrapper[4838]: I1128 10:19:16.185928 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5b856c5697-mjgmt" Nov 28 10:19:16 crc kubenswrapper[4838]: I1128 10:19:16.220921 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5b856c5697-mjgmt" podStartSLOduration=3.220904754 podStartE2EDuration="3.220904754s" podCreationTimestamp="2025-11-28 10:19:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 10:19:16.214179017 +0000 UTC m=+1327.913153177" watchObservedRunningTime="2025-11-28 10:19:16.220904754 +0000 UTC m=+1327.919878924" Nov 28 10:19:17 crc kubenswrapper[4838]: I1128 10:19:17.195956 4838 generic.go:334] "Generic (PLEG): container finished" podID="08804858-2bd5-4d72-9bf3-a9989128bed2" containerID="7fc2eda0c1d5eb5cc2e81e20acea01be60ea76f80d72047dc977eb460c1c3500" exitCode=143 Nov 28 10:19:17 crc kubenswrapper[4838]: I1128 10:19:17.196041 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"08804858-2bd5-4d72-9bf3-a9989128bed2","Type":"ContainerDied","Data":"7fc2eda0c1d5eb5cc2e81e20acea01be60ea76f80d72047dc977eb460c1c3500"} Nov 28 10:19:17 crc kubenswrapper[4838]: I1128 10:19:17.631544 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 10:19:17 crc kubenswrapper[4838]: I1128 10:19:17.776213 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6c566861-fff7-471f-b998-638650e497c7-combined-ca-bundle\") pod \"6c566861-fff7-471f-b998-638650e497c7\" (UID: \"6c566861-fff7-471f-b998-638650e497c7\") " Nov 28 10:19:17 crc kubenswrapper[4838]: I1128 10:19:17.776316 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6c566861-fff7-471f-b998-638650e497c7-scripts\") pod \"6c566861-fff7-471f-b998-638650e497c7\" (UID: \"6c566861-fff7-471f-b998-638650e497c7\") " Nov 28 10:19:17 crc kubenswrapper[4838]: I1128 10:19:17.776350 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zzj66\" (UniqueName: \"kubernetes.io/projected/6c566861-fff7-471f-b998-638650e497c7-kube-api-access-zzj66\") pod \"6c566861-fff7-471f-b998-638650e497c7\" (UID: \"6c566861-fff7-471f-b998-638650e497c7\") " Nov 28 10:19:17 crc kubenswrapper[4838]: I1128 10:19:17.776432 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6c566861-fff7-471f-b998-638650e497c7-config-data\") pod \"6c566861-fff7-471f-b998-638650e497c7\" (UID: \"6c566861-fff7-471f-b998-638650e497c7\") " Nov 28 10:19:17 crc kubenswrapper[4838]: I1128 10:19:17.776482 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6c566861-fff7-471f-b998-638650e497c7-log-httpd\") pod \"6c566861-fff7-471f-b998-638650e497c7\" (UID: \"6c566861-fff7-471f-b998-638650e497c7\") " Nov 28 10:19:17 crc kubenswrapper[4838]: I1128 10:19:17.776514 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/6c566861-fff7-471f-b998-638650e497c7-ceilometer-tls-certs\") pod \"6c566861-fff7-471f-b998-638650e497c7\" (UID: \"6c566861-fff7-471f-b998-638650e497c7\") " Nov 28 10:19:17 crc kubenswrapper[4838]: I1128 10:19:17.776595 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/6c566861-fff7-471f-b998-638650e497c7-sg-core-conf-yaml\") pod \"6c566861-fff7-471f-b998-638650e497c7\" (UID: \"6c566861-fff7-471f-b998-638650e497c7\") " Nov 28 10:19:17 crc kubenswrapper[4838]: I1128 10:19:17.776628 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6c566861-fff7-471f-b998-638650e497c7-run-httpd\") pod \"6c566861-fff7-471f-b998-638650e497c7\" (UID: \"6c566861-fff7-471f-b998-638650e497c7\") " Nov 28 10:19:17 crc kubenswrapper[4838]: I1128 10:19:17.777367 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6c566861-fff7-471f-b998-638650e497c7-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "6c566861-fff7-471f-b998-638650e497c7" (UID: "6c566861-fff7-471f-b998-638650e497c7"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 10:19:17 crc kubenswrapper[4838]: I1128 10:19:17.777617 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6c566861-fff7-471f-b998-638650e497c7-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "6c566861-fff7-471f-b998-638650e497c7" (UID: "6c566861-fff7-471f-b998-638650e497c7"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 10:19:17 crc kubenswrapper[4838]: I1128 10:19:17.782058 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6c566861-fff7-471f-b998-638650e497c7-kube-api-access-zzj66" (OuterVolumeSpecName: "kube-api-access-zzj66") pod "6c566861-fff7-471f-b998-638650e497c7" (UID: "6c566861-fff7-471f-b998-638650e497c7"). InnerVolumeSpecName "kube-api-access-zzj66". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:19:17 crc kubenswrapper[4838]: I1128 10:19:17.784179 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6c566861-fff7-471f-b998-638650e497c7-scripts" (OuterVolumeSpecName: "scripts") pod "6c566861-fff7-471f-b998-638650e497c7" (UID: "6c566861-fff7-471f-b998-638650e497c7"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:19:17 crc kubenswrapper[4838]: I1128 10:19:17.849858 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6c566861-fff7-471f-b998-638650e497c7-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "6c566861-fff7-471f-b998-638650e497c7" (UID: "6c566861-fff7-471f-b998-638650e497c7"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:19:17 crc kubenswrapper[4838]: I1128 10:19:17.866193 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6c566861-fff7-471f-b998-638650e497c7-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "6c566861-fff7-471f-b998-638650e497c7" (UID: "6c566861-fff7-471f-b998-638650e497c7"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:19:17 crc kubenswrapper[4838]: I1128 10:19:17.878537 4838 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6c566861-fff7-471f-b998-638650e497c7-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 10:19:17 crc kubenswrapper[4838]: I1128 10:19:17.878568 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zzj66\" (UniqueName: \"kubernetes.io/projected/6c566861-fff7-471f-b998-638650e497c7-kube-api-access-zzj66\") on node \"crc\" DevicePath \"\"" Nov 28 10:19:17 crc kubenswrapper[4838]: I1128 10:19:17.878581 4838 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6c566861-fff7-471f-b998-638650e497c7-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 28 10:19:17 crc kubenswrapper[4838]: I1128 10:19:17.878590 4838 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/6c566861-fff7-471f-b998-638650e497c7-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 10:19:17 crc kubenswrapper[4838]: I1128 10:19:17.878599 4838 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/6c566861-fff7-471f-b998-638650e497c7-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 28 10:19:17 crc kubenswrapper[4838]: I1128 10:19:17.878606 4838 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6c566861-fff7-471f-b998-638650e497c7-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 28 10:19:17 crc kubenswrapper[4838]: I1128 10:19:17.905188 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6c566861-fff7-471f-b998-638650e497c7-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6c566861-fff7-471f-b998-638650e497c7" (UID: "6c566861-fff7-471f-b998-638650e497c7"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:19:17 crc kubenswrapper[4838]: I1128 10:19:17.909966 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6c566861-fff7-471f-b998-638650e497c7-config-data" (OuterVolumeSpecName: "config-data") pod "6c566861-fff7-471f-b998-638650e497c7" (UID: "6c566861-fff7-471f-b998-638650e497c7"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:19:17 crc kubenswrapper[4838]: I1128 10:19:17.980023 4838 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6c566861-fff7-471f-b998-638650e497c7-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 10:19:17 crc kubenswrapper[4838]: I1128 10:19:17.980055 4838 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6c566861-fff7-471f-b998-638650e497c7-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 10:19:18 crc kubenswrapper[4838]: I1128 10:19:18.210956 4838 generic.go:334] "Generic (PLEG): container finished" podID="6c566861-fff7-471f-b998-638650e497c7" containerID="a765c0b1796f21fae6de2cd439ca818f48fdb59edd054edc8a5b7764f60b2892" exitCode=0 Nov 28 10:19:18 crc kubenswrapper[4838]: I1128 10:19:18.211161 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6c566861-fff7-471f-b998-638650e497c7","Type":"ContainerDied","Data":"a765c0b1796f21fae6de2cd439ca818f48fdb59edd054edc8a5b7764f60b2892"} Nov 28 10:19:18 crc kubenswrapper[4838]: I1128 10:19:18.211903 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6c566861-fff7-471f-b998-638650e497c7","Type":"ContainerDied","Data":"3f275de27d9f8be994f9fc28d46629cd0a0729cf63162e5541d88869a1c22668"} Nov 28 10:19:18 crc kubenswrapper[4838]: I1128 10:19:18.211953 4838 scope.go:117] "RemoveContainer" containerID="0a3612b4718800ce6ad294116b9951ad87a13d0cf88c87cedc890c79eda69a61" Nov 28 10:19:18 crc kubenswrapper[4838]: I1128 10:19:18.211263 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 10:19:18 crc kubenswrapper[4838]: I1128 10:19:18.247157 4838 scope.go:117] "RemoveContainer" containerID="f907c1ea6496e1551e5b27530c43487e8bad2fab6669a5e0f2973d6e2fb0d1d7" Nov 28 10:19:18 crc kubenswrapper[4838]: I1128 10:19:18.277830 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 28 10:19:18 crc kubenswrapper[4838]: I1128 10:19:18.292193 4838 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 28 10:19:18 crc kubenswrapper[4838]: I1128 10:19:18.308708 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 28 10:19:18 crc kubenswrapper[4838]: E1128 10:19:18.309270 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6c566861-fff7-471f-b998-638650e497c7" containerName="ceilometer-central-agent" Nov 28 10:19:18 crc kubenswrapper[4838]: I1128 10:19:18.309291 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="6c566861-fff7-471f-b998-638650e497c7" containerName="ceilometer-central-agent" Nov 28 10:19:18 crc kubenswrapper[4838]: E1128 10:19:18.309322 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6c566861-fff7-471f-b998-638650e497c7" containerName="proxy-httpd" Nov 28 10:19:18 crc kubenswrapper[4838]: I1128 10:19:18.309330 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="6c566861-fff7-471f-b998-638650e497c7" containerName="proxy-httpd" Nov 28 10:19:18 crc kubenswrapper[4838]: E1128 10:19:18.309352 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6c566861-fff7-471f-b998-638650e497c7" containerName="sg-core" Nov 28 10:19:18 crc kubenswrapper[4838]: I1128 10:19:18.309359 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="6c566861-fff7-471f-b998-638650e497c7" containerName="sg-core" Nov 28 10:19:18 crc kubenswrapper[4838]: E1128 10:19:18.309374 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6c566861-fff7-471f-b998-638650e497c7" containerName="ceilometer-notification-agent" Nov 28 10:19:18 crc kubenswrapper[4838]: I1128 10:19:18.309382 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="6c566861-fff7-471f-b998-638650e497c7" containerName="ceilometer-notification-agent" Nov 28 10:19:18 crc kubenswrapper[4838]: I1128 10:19:18.309605 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="6c566861-fff7-471f-b998-638650e497c7" containerName="proxy-httpd" Nov 28 10:19:18 crc kubenswrapper[4838]: I1128 10:19:18.309626 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="6c566861-fff7-471f-b998-638650e497c7" containerName="ceilometer-notification-agent" Nov 28 10:19:18 crc kubenswrapper[4838]: I1128 10:19:18.309643 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="6c566861-fff7-471f-b998-638650e497c7" containerName="sg-core" Nov 28 10:19:18 crc kubenswrapper[4838]: I1128 10:19:18.309664 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="6c566861-fff7-471f-b998-638650e497c7" containerName="ceilometer-central-agent" Nov 28 10:19:18 crc kubenswrapper[4838]: I1128 10:19:18.311671 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 10:19:18 crc kubenswrapper[4838]: I1128 10:19:18.313936 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 28 10:19:18 crc kubenswrapper[4838]: I1128 10:19:18.314192 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Nov 28 10:19:18 crc kubenswrapper[4838]: I1128 10:19:18.314503 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 28 10:19:18 crc kubenswrapper[4838]: I1128 10:19:18.327441 4838 scope.go:117] "RemoveContainer" containerID="a765c0b1796f21fae6de2cd439ca818f48fdb59edd054edc8a5b7764f60b2892" Nov 28 10:19:18 crc kubenswrapper[4838]: I1128 10:19:18.331068 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 28 10:19:18 crc kubenswrapper[4838]: I1128 10:19:18.356433 4838 scope.go:117] "RemoveContainer" containerID="5e2f8a2b9bed7d2a647baef6fbda1c9af5a39cc97f3c54633cbb705e14ec5454" Nov 28 10:19:18 crc kubenswrapper[4838]: I1128 10:19:18.378011 4838 scope.go:117] "RemoveContainer" containerID="0a3612b4718800ce6ad294116b9951ad87a13d0cf88c87cedc890c79eda69a61" Nov 28 10:19:18 crc kubenswrapper[4838]: E1128 10:19:18.378384 4838 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0a3612b4718800ce6ad294116b9951ad87a13d0cf88c87cedc890c79eda69a61\": container with ID starting with 0a3612b4718800ce6ad294116b9951ad87a13d0cf88c87cedc890c79eda69a61 not found: ID does not exist" containerID="0a3612b4718800ce6ad294116b9951ad87a13d0cf88c87cedc890c79eda69a61" Nov 28 10:19:18 crc kubenswrapper[4838]: I1128 10:19:18.378430 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0a3612b4718800ce6ad294116b9951ad87a13d0cf88c87cedc890c79eda69a61"} err="failed to get container status \"0a3612b4718800ce6ad294116b9951ad87a13d0cf88c87cedc890c79eda69a61\": rpc error: code = NotFound desc = could not find container \"0a3612b4718800ce6ad294116b9951ad87a13d0cf88c87cedc890c79eda69a61\": container with ID starting with 0a3612b4718800ce6ad294116b9951ad87a13d0cf88c87cedc890c79eda69a61 not found: ID does not exist" Nov 28 10:19:18 crc kubenswrapper[4838]: I1128 10:19:18.378462 4838 scope.go:117] "RemoveContainer" containerID="f907c1ea6496e1551e5b27530c43487e8bad2fab6669a5e0f2973d6e2fb0d1d7" Nov 28 10:19:18 crc kubenswrapper[4838]: E1128 10:19:18.378834 4838 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f907c1ea6496e1551e5b27530c43487e8bad2fab6669a5e0f2973d6e2fb0d1d7\": container with ID starting with f907c1ea6496e1551e5b27530c43487e8bad2fab6669a5e0f2973d6e2fb0d1d7 not found: ID does not exist" containerID="f907c1ea6496e1551e5b27530c43487e8bad2fab6669a5e0f2973d6e2fb0d1d7" Nov 28 10:19:18 crc kubenswrapper[4838]: I1128 10:19:18.378861 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f907c1ea6496e1551e5b27530c43487e8bad2fab6669a5e0f2973d6e2fb0d1d7"} err="failed to get container status \"f907c1ea6496e1551e5b27530c43487e8bad2fab6669a5e0f2973d6e2fb0d1d7\": rpc error: code = NotFound desc = could not find container \"f907c1ea6496e1551e5b27530c43487e8bad2fab6669a5e0f2973d6e2fb0d1d7\": container with ID starting with f907c1ea6496e1551e5b27530c43487e8bad2fab6669a5e0f2973d6e2fb0d1d7 not found: ID does not exist" Nov 28 10:19:18 crc kubenswrapper[4838]: I1128 10:19:18.378877 4838 scope.go:117] "RemoveContainer" containerID="a765c0b1796f21fae6de2cd439ca818f48fdb59edd054edc8a5b7764f60b2892" Nov 28 10:19:18 crc kubenswrapper[4838]: E1128 10:19:18.379322 4838 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a765c0b1796f21fae6de2cd439ca818f48fdb59edd054edc8a5b7764f60b2892\": container with ID starting with a765c0b1796f21fae6de2cd439ca818f48fdb59edd054edc8a5b7764f60b2892 not found: ID does not exist" containerID="a765c0b1796f21fae6de2cd439ca818f48fdb59edd054edc8a5b7764f60b2892" Nov 28 10:19:18 crc kubenswrapper[4838]: I1128 10:19:18.379349 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a765c0b1796f21fae6de2cd439ca818f48fdb59edd054edc8a5b7764f60b2892"} err="failed to get container status \"a765c0b1796f21fae6de2cd439ca818f48fdb59edd054edc8a5b7764f60b2892\": rpc error: code = NotFound desc = could not find container \"a765c0b1796f21fae6de2cd439ca818f48fdb59edd054edc8a5b7764f60b2892\": container with ID starting with a765c0b1796f21fae6de2cd439ca818f48fdb59edd054edc8a5b7764f60b2892 not found: ID does not exist" Nov 28 10:19:18 crc kubenswrapper[4838]: I1128 10:19:18.379366 4838 scope.go:117] "RemoveContainer" containerID="5e2f8a2b9bed7d2a647baef6fbda1c9af5a39cc97f3c54633cbb705e14ec5454" Nov 28 10:19:18 crc kubenswrapper[4838]: E1128 10:19:18.379652 4838 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5e2f8a2b9bed7d2a647baef6fbda1c9af5a39cc97f3c54633cbb705e14ec5454\": container with ID starting with 5e2f8a2b9bed7d2a647baef6fbda1c9af5a39cc97f3c54633cbb705e14ec5454 not found: ID does not exist" containerID="5e2f8a2b9bed7d2a647baef6fbda1c9af5a39cc97f3c54633cbb705e14ec5454" Nov 28 10:19:18 crc kubenswrapper[4838]: I1128 10:19:18.379692 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5e2f8a2b9bed7d2a647baef6fbda1c9af5a39cc97f3c54633cbb705e14ec5454"} err="failed to get container status \"5e2f8a2b9bed7d2a647baef6fbda1c9af5a39cc97f3c54633cbb705e14ec5454\": rpc error: code = NotFound desc = could not find container \"5e2f8a2b9bed7d2a647baef6fbda1c9af5a39cc97f3c54633cbb705e14ec5454\": container with ID starting with 5e2f8a2b9bed7d2a647baef6fbda1c9af5a39cc97f3c54633cbb705e14ec5454 not found: ID does not exist" Nov 28 10:19:18 crc kubenswrapper[4838]: I1128 10:19:18.491214 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c2e0f281-41b3-4f17-a174-41ae4ef2c53e-config-data\") pod \"ceilometer-0\" (UID: \"c2e0f281-41b3-4f17-a174-41ae4ef2c53e\") " pod="openstack/ceilometer-0" Nov 28 10:19:18 crc kubenswrapper[4838]: I1128 10:19:18.491338 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/c2e0f281-41b3-4f17-a174-41ae4ef2c53e-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"c2e0f281-41b3-4f17-a174-41ae4ef2c53e\") " pod="openstack/ceilometer-0" Nov 28 10:19:18 crc kubenswrapper[4838]: I1128 10:19:18.491381 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-97wqq\" (UniqueName: \"kubernetes.io/projected/c2e0f281-41b3-4f17-a174-41ae4ef2c53e-kube-api-access-97wqq\") pod \"ceilometer-0\" (UID: \"c2e0f281-41b3-4f17-a174-41ae4ef2c53e\") " pod="openstack/ceilometer-0" Nov 28 10:19:18 crc kubenswrapper[4838]: I1128 10:19:18.491403 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c2e0f281-41b3-4f17-a174-41ae4ef2c53e-scripts\") pod \"ceilometer-0\" (UID: \"c2e0f281-41b3-4f17-a174-41ae4ef2c53e\") " pod="openstack/ceilometer-0" Nov 28 10:19:18 crc kubenswrapper[4838]: I1128 10:19:18.491563 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c2e0f281-41b3-4f17-a174-41ae4ef2c53e-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"c2e0f281-41b3-4f17-a174-41ae4ef2c53e\") " pod="openstack/ceilometer-0" Nov 28 10:19:18 crc kubenswrapper[4838]: I1128 10:19:18.491644 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c2e0f281-41b3-4f17-a174-41ae4ef2c53e-run-httpd\") pod \"ceilometer-0\" (UID: \"c2e0f281-41b3-4f17-a174-41ae4ef2c53e\") " pod="openstack/ceilometer-0" Nov 28 10:19:18 crc kubenswrapper[4838]: I1128 10:19:18.491688 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c2e0f281-41b3-4f17-a174-41ae4ef2c53e-log-httpd\") pod \"ceilometer-0\" (UID: \"c2e0f281-41b3-4f17-a174-41ae4ef2c53e\") " pod="openstack/ceilometer-0" Nov 28 10:19:18 crc kubenswrapper[4838]: I1128 10:19:18.491770 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c2e0f281-41b3-4f17-a174-41ae4ef2c53e-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"c2e0f281-41b3-4f17-a174-41ae4ef2c53e\") " pod="openstack/ceilometer-0" Nov 28 10:19:18 crc kubenswrapper[4838]: I1128 10:19:18.574474 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6c566861-fff7-471f-b998-638650e497c7" path="/var/lib/kubelet/pods/6c566861-fff7-471f-b998-638650e497c7/volumes" Nov 28 10:19:18 crc kubenswrapper[4838]: I1128 10:19:18.593459 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c2e0f281-41b3-4f17-a174-41ae4ef2c53e-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"c2e0f281-41b3-4f17-a174-41ae4ef2c53e\") " pod="openstack/ceilometer-0" Nov 28 10:19:18 crc kubenswrapper[4838]: I1128 10:19:18.593700 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c2e0f281-41b3-4f17-a174-41ae4ef2c53e-config-data\") pod \"ceilometer-0\" (UID: \"c2e0f281-41b3-4f17-a174-41ae4ef2c53e\") " pod="openstack/ceilometer-0" Nov 28 10:19:18 crc kubenswrapper[4838]: I1128 10:19:18.593825 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/c2e0f281-41b3-4f17-a174-41ae4ef2c53e-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"c2e0f281-41b3-4f17-a174-41ae4ef2c53e\") " pod="openstack/ceilometer-0" Nov 28 10:19:18 crc kubenswrapper[4838]: I1128 10:19:18.593880 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-97wqq\" (UniqueName: \"kubernetes.io/projected/c2e0f281-41b3-4f17-a174-41ae4ef2c53e-kube-api-access-97wqq\") pod \"ceilometer-0\" (UID: \"c2e0f281-41b3-4f17-a174-41ae4ef2c53e\") " pod="openstack/ceilometer-0" Nov 28 10:19:18 crc kubenswrapper[4838]: I1128 10:19:18.593921 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c2e0f281-41b3-4f17-a174-41ae4ef2c53e-scripts\") pod \"ceilometer-0\" (UID: \"c2e0f281-41b3-4f17-a174-41ae4ef2c53e\") " pod="openstack/ceilometer-0" Nov 28 10:19:18 crc kubenswrapper[4838]: I1128 10:19:18.593997 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c2e0f281-41b3-4f17-a174-41ae4ef2c53e-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"c2e0f281-41b3-4f17-a174-41ae4ef2c53e\") " pod="openstack/ceilometer-0" Nov 28 10:19:18 crc kubenswrapper[4838]: I1128 10:19:18.594052 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c2e0f281-41b3-4f17-a174-41ae4ef2c53e-run-httpd\") pod \"ceilometer-0\" (UID: \"c2e0f281-41b3-4f17-a174-41ae4ef2c53e\") " pod="openstack/ceilometer-0" Nov 28 10:19:18 crc kubenswrapper[4838]: I1128 10:19:18.594087 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c2e0f281-41b3-4f17-a174-41ae4ef2c53e-log-httpd\") pod \"ceilometer-0\" (UID: \"c2e0f281-41b3-4f17-a174-41ae4ef2c53e\") " pod="openstack/ceilometer-0" Nov 28 10:19:18 crc kubenswrapper[4838]: I1128 10:19:18.594858 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c2e0f281-41b3-4f17-a174-41ae4ef2c53e-log-httpd\") pod \"ceilometer-0\" (UID: \"c2e0f281-41b3-4f17-a174-41ae4ef2c53e\") " pod="openstack/ceilometer-0" Nov 28 10:19:18 crc kubenswrapper[4838]: I1128 10:19:18.595044 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c2e0f281-41b3-4f17-a174-41ae4ef2c53e-run-httpd\") pod \"ceilometer-0\" (UID: \"c2e0f281-41b3-4f17-a174-41ae4ef2c53e\") " pod="openstack/ceilometer-0" Nov 28 10:19:18 crc kubenswrapper[4838]: I1128 10:19:18.599627 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/c2e0f281-41b3-4f17-a174-41ae4ef2c53e-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"c2e0f281-41b3-4f17-a174-41ae4ef2c53e\") " pod="openstack/ceilometer-0" Nov 28 10:19:18 crc kubenswrapper[4838]: I1128 10:19:18.600079 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c2e0f281-41b3-4f17-a174-41ae4ef2c53e-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"c2e0f281-41b3-4f17-a174-41ae4ef2c53e\") " pod="openstack/ceilometer-0" Nov 28 10:19:18 crc kubenswrapper[4838]: I1128 10:19:18.603255 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c2e0f281-41b3-4f17-a174-41ae4ef2c53e-config-data\") pod \"ceilometer-0\" (UID: \"c2e0f281-41b3-4f17-a174-41ae4ef2c53e\") " pod="openstack/ceilometer-0" Nov 28 10:19:18 crc kubenswrapper[4838]: I1128 10:19:18.604901 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c2e0f281-41b3-4f17-a174-41ae4ef2c53e-scripts\") pod \"ceilometer-0\" (UID: \"c2e0f281-41b3-4f17-a174-41ae4ef2c53e\") " pod="openstack/ceilometer-0" Nov 28 10:19:18 crc kubenswrapper[4838]: I1128 10:19:18.605220 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c2e0f281-41b3-4f17-a174-41ae4ef2c53e-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"c2e0f281-41b3-4f17-a174-41ae4ef2c53e\") " pod="openstack/ceilometer-0" Nov 28 10:19:18 crc kubenswrapper[4838]: I1128 10:19:18.617312 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-97wqq\" (UniqueName: \"kubernetes.io/projected/c2e0f281-41b3-4f17-a174-41ae4ef2c53e-kube-api-access-97wqq\") pod \"ceilometer-0\" (UID: \"c2e0f281-41b3-4f17-a174-41ae4ef2c53e\") " pod="openstack/ceilometer-0" Nov 28 10:19:18 crc kubenswrapper[4838]: I1128 10:19:18.643147 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 10:19:19 crc kubenswrapper[4838]: I1128 10:19:19.109331 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 28 10:19:19 crc kubenswrapper[4838]: W1128 10:19:19.131810 4838 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc2e0f281_41b3_4f17_a174_41ae4ef2c53e.slice/crio-2a4b6677704a43ccabd6b2c733c318551ddd51e78452ce9485f099d3e9073a0f WatchSource:0}: Error finding container 2a4b6677704a43ccabd6b2c733c318551ddd51e78452ce9485f099d3e9073a0f: Status 404 returned error can't find the container with id 2a4b6677704a43ccabd6b2c733c318551ddd51e78452ce9485f099d3e9073a0f Nov 28 10:19:19 crc kubenswrapper[4838]: I1128 10:19:19.225211 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c2e0f281-41b3-4f17-a174-41ae4ef2c53e","Type":"ContainerStarted","Data":"2a4b6677704a43ccabd6b2c733c318551ddd51e78452ce9485f099d3e9073a0f"} Nov 28 10:19:19 crc kubenswrapper[4838]: I1128 10:19:19.946819 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 10:19:20 crc kubenswrapper[4838]: I1128 10:19:20.129083 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/08804858-2bd5-4d72-9bf3-a9989128bed2-logs\") pod \"08804858-2bd5-4d72-9bf3-a9989128bed2\" (UID: \"08804858-2bd5-4d72-9bf3-a9989128bed2\") " Nov 28 10:19:20 crc kubenswrapper[4838]: I1128 10:19:20.129225 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gpv9p\" (UniqueName: \"kubernetes.io/projected/08804858-2bd5-4d72-9bf3-a9989128bed2-kube-api-access-gpv9p\") pod \"08804858-2bd5-4d72-9bf3-a9989128bed2\" (UID: \"08804858-2bd5-4d72-9bf3-a9989128bed2\") " Nov 28 10:19:20 crc kubenswrapper[4838]: I1128 10:19:20.129277 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/08804858-2bd5-4d72-9bf3-a9989128bed2-combined-ca-bundle\") pod \"08804858-2bd5-4d72-9bf3-a9989128bed2\" (UID: \"08804858-2bd5-4d72-9bf3-a9989128bed2\") " Nov 28 10:19:20 crc kubenswrapper[4838]: I1128 10:19:20.129360 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/08804858-2bd5-4d72-9bf3-a9989128bed2-config-data\") pod \"08804858-2bd5-4d72-9bf3-a9989128bed2\" (UID: \"08804858-2bd5-4d72-9bf3-a9989128bed2\") " Nov 28 10:19:20 crc kubenswrapper[4838]: I1128 10:19:20.130286 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/08804858-2bd5-4d72-9bf3-a9989128bed2-logs" (OuterVolumeSpecName: "logs") pod "08804858-2bd5-4d72-9bf3-a9989128bed2" (UID: "08804858-2bd5-4d72-9bf3-a9989128bed2"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 10:19:20 crc kubenswrapper[4838]: I1128 10:19:20.140701 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/08804858-2bd5-4d72-9bf3-a9989128bed2-kube-api-access-gpv9p" (OuterVolumeSpecName: "kube-api-access-gpv9p") pod "08804858-2bd5-4d72-9bf3-a9989128bed2" (UID: "08804858-2bd5-4d72-9bf3-a9989128bed2"). InnerVolumeSpecName "kube-api-access-gpv9p". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:19:20 crc kubenswrapper[4838]: I1128 10:19:20.162134 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/08804858-2bd5-4d72-9bf3-a9989128bed2-config-data" (OuterVolumeSpecName: "config-data") pod "08804858-2bd5-4d72-9bf3-a9989128bed2" (UID: "08804858-2bd5-4d72-9bf3-a9989128bed2"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:19:20 crc kubenswrapper[4838]: I1128 10:19:20.167026 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/08804858-2bd5-4d72-9bf3-a9989128bed2-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "08804858-2bd5-4d72-9bf3-a9989128bed2" (UID: "08804858-2bd5-4d72-9bf3-a9989128bed2"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:19:20 crc kubenswrapper[4838]: I1128 10:19:20.232046 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gpv9p\" (UniqueName: \"kubernetes.io/projected/08804858-2bd5-4d72-9bf3-a9989128bed2-kube-api-access-gpv9p\") on node \"crc\" DevicePath \"\"" Nov 28 10:19:20 crc kubenswrapper[4838]: I1128 10:19:20.232120 4838 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/08804858-2bd5-4d72-9bf3-a9989128bed2-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 10:19:20 crc kubenswrapper[4838]: I1128 10:19:20.232132 4838 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/08804858-2bd5-4d72-9bf3-a9989128bed2-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 10:19:20 crc kubenswrapper[4838]: I1128 10:19:20.232143 4838 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/08804858-2bd5-4d72-9bf3-a9989128bed2-logs\") on node \"crc\" DevicePath \"\"" Nov 28 10:19:20 crc kubenswrapper[4838]: I1128 10:19:20.236130 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c2e0f281-41b3-4f17-a174-41ae4ef2c53e","Type":"ContainerStarted","Data":"77edfdbed06620222b72edf06d1eb7713e03f7de01681ff4a3829524297f906e"} Nov 28 10:19:20 crc kubenswrapper[4838]: I1128 10:19:20.238328 4838 generic.go:334] "Generic (PLEG): container finished" podID="08804858-2bd5-4d72-9bf3-a9989128bed2" containerID="c67ec7edbd8b25e8b0719e1488d6594bc0c6521159c17cf07b269110c74a7d07" exitCode=0 Nov 28 10:19:20 crc kubenswrapper[4838]: I1128 10:19:20.238352 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"08804858-2bd5-4d72-9bf3-a9989128bed2","Type":"ContainerDied","Data":"c67ec7edbd8b25e8b0719e1488d6594bc0c6521159c17cf07b269110c74a7d07"} Nov 28 10:19:20 crc kubenswrapper[4838]: I1128 10:19:20.238370 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"08804858-2bd5-4d72-9bf3-a9989128bed2","Type":"ContainerDied","Data":"271bbe562545d4b3313dbbe00a89f00e3128c383ede0f3d232b02ea798e26b71"} Nov 28 10:19:20 crc kubenswrapper[4838]: I1128 10:19:20.238387 4838 scope.go:117] "RemoveContainer" containerID="c67ec7edbd8b25e8b0719e1488d6594bc0c6521159c17cf07b269110c74a7d07" Nov 28 10:19:20 crc kubenswrapper[4838]: I1128 10:19:20.238440 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 10:19:20 crc kubenswrapper[4838]: I1128 10:19:20.261172 4838 scope.go:117] "RemoveContainer" containerID="7fc2eda0c1d5eb5cc2e81e20acea01be60ea76f80d72047dc977eb460c1c3500" Nov 28 10:19:20 crc kubenswrapper[4838]: I1128 10:19:20.283167 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 28 10:19:20 crc kubenswrapper[4838]: I1128 10:19:20.298140 4838 scope.go:117] "RemoveContainer" containerID="c67ec7edbd8b25e8b0719e1488d6594bc0c6521159c17cf07b269110c74a7d07" Nov 28 10:19:20 crc kubenswrapper[4838]: E1128 10:19:20.298754 4838 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c67ec7edbd8b25e8b0719e1488d6594bc0c6521159c17cf07b269110c74a7d07\": container with ID starting with c67ec7edbd8b25e8b0719e1488d6594bc0c6521159c17cf07b269110c74a7d07 not found: ID does not exist" containerID="c67ec7edbd8b25e8b0719e1488d6594bc0c6521159c17cf07b269110c74a7d07" Nov 28 10:19:20 crc kubenswrapper[4838]: I1128 10:19:20.298800 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c67ec7edbd8b25e8b0719e1488d6594bc0c6521159c17cf07b269110c74a7d07"} err="failed to get container status \"c67ec7edbd8b25e8b0719e1488d6594bc0c6521159c17cf07b269110c74a7d07\": rpc error: code = NotFound desc = could not find container \"c67ec7edbd8b25e8b0719e1488d6594bc0c6521159c17cf07b269110c74a7d07\": container with ID starting with c67ec7edbd8b25e8b0719e1488d6594bc0c6521159c17cf07b269110c74a7d07 not found: ID does not exist" Nov 28 10:19:20 crc kubenswrapper[4838]: I1128 10:19:20.298834 4838 scope.go:117] "RemoveContainer" containerID="7fc2eda0c1d5eb5cc2e81e20acea01be60ea76f80d72047dc977eb460c1c3500" Nov 28 10:19:20 crc kubenswrapper[4838]: E1128 10:19:20.299296 4838 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7fc2eda0c1d5eb5cc2e81e20acea01be60ea76f80d72047dc977eb460c1c3500\": container with ID starting with 7fc2eda0c1d5eb5cc2e81e20acea01be60ea76f80d72047dc977eb460c1c3500 not found: ID does not exist" containerID="7fc2eda0c1d5eb5cc2e81e20acea01be60ea76f80d72047dc977eb460c1c3500" Nov 28 10:19:20 crc kubenswrapper[4838]: I1128 10:19:20.299348 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7fc2eda0c1d5eb5cc2e81e20acea01be60ea76f80d72047dc977eb460c1c3500"} err="failed to get container status \"7fc2eda0c1d5eb5cc2e81e20acea01be60ea76f80d72047dc977eb460c1c3500\": rpc error: code = NotFound desc = could not find container \"7fc2eda0c1d5eb5cc2e81e20acea01be60ea76f80d72047dc977eb460c1c3500\": container with ID starting with 7fc2eda0c1d5eb5cc2e81e20acea01be60ea76f80d72047dc977eb460c1c3500 not found: ID does not exist" Nov 28 10:19:20 crc kubenswrapper[4838]: I1128 10:19:20.304902 4838 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Nov 28 10:19:20 crc kubenswrapper[4838]: I1128 10:19:20.313570 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 28 10:19:20 crc kubenswrapper[4838]: E1128 10:19:20.314003 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="08804858-2bd5-4d72-9bf3-a9989128bed2" containerName="nova-api-api" Nov 28 10:19:20 crc kubenswrapper[4838]: I1128 10:19:20.314027 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="08804858-2bd5-4d72-9bf3-a9989128bed2" containerName="nova-api-api" Nov 28 10:19:20 crc kubenswrapper[4838]: E1128 10:19:20.314050 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="08804858-2bd5-4d72-9bf3-a9989128bed2" containerName="nova-api-log" Nov 28 10:19:20 crc kubenswrapper[4838]: I1128 10:19:20.314059 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="08804858-2bd5-4d72-9bf3-a9989128bed2" containerName="nova-api-log" Nov 28 10:19:20 crc kubenswrapper[4838]: I1128 10:19:20.314306 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="08804858-2bd5-4d72-9bf3-a9989128bed2" containerName="nova-api-api" Nov 28 10:19:20 crc kubenswrapper[4838]: I1128 10:19:20.314339 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="08804858-2bd5-4d72-9bf3-a9989128bed2" containerName="nova-api-log" Nov 28 10:19:20 crc kubenswrapper[4838]: I1128 10:19:20.315487 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 10:19:20 crc kubenswrapper[4838]: I1128 10:19:20.322552 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 28 10:19:20 crc kubenswrapper[4838]: I1128 10:19:20.322822 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-public-svc" Nov 28 10:19:20 crc kubenswrapper[4838]: I1128 10:19:20.323161 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-internal-svc" Nov 28 10:19:20 crc kubenswrapper[4838]: I1128 10:19:20.328692 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 28 10:19:20 crc kubenswrapper[4838]: I1128 10:19:20.440473 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zwhtd\" (UniqueName: \"kubernetes.io/projected/880569c4-9eff-4c85-b5c1-5fbe25142cf2-kube-api-access-zwhtd\") pod \"nova-api-0\" (UID: \"880569c4-9eff-4c85-b5c1-5fbe25142cf2\") " pod="openstack/nova-api-0" Nov 28 10:19:20 crc kubenswrapper[4838]: I1128 10:19:20.440536 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/880569c4-9eff-4c85-b5c1-5fbe25142cf2-public-tls-certs\") pod \"nova-api-0\" (UID: \"880569c4-9eff-4c85-b5c1-5fbe25142cf2\") " pod="openstack/nova-api-0" Nov 28 10:19:20 crc kubenswrapper[4838]: I1128 10:19:20.440563 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/880569c4-9eff-4c85-b5c1-5fbe25142cf2-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"880569c4-9eff-4c85-b5c1-5fbe25142cf2\") " pod="openstack/nova-api-0" Nov 28 10:19:20 crc kubenswrapper[4838]: I1128 10:19:20.440594 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/880569c4-9eff-4c85-b5c1-5fbe25142cf2-internal-tls-certs\") pod \"nova-api-0\" (UID: \"880569c4-9eff-4c85-b5c1-5fbe25142cf2\") " pod="openstack/nova-api-0" Nov 28 10:19:20 crc kubenswrapper[4838]: I1128 10:19:20.440627 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/880569c4-9eff-4c85-b5c1-5fbe25142cf2-logs\") pod \"nova-api-0\" (UID: \"880569c4-9eff-4c85-b5c1-5fbe25142cf2\") " pod="openstack/nova-api-0" Nov 28 10:19:20 crc kubenswrapper[4838]: I1128 10:19:20.440667 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/880569c4-9eff-4c85-b5c1-5fbe25142cf2-config-data\") pod \"nova-api-0\" (UID: \"880569c4-9eff-4c85-b5c1-5fbe25142cf2\") " pod="openstack/nova-api-0" Nov 28 10:19:20 crc kubenswrapper[4838]: I1128 10:19:20.543178 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zwhtd\" (UniqueName: \"kubernetes.io/projected/880569c4-9eff-4c85-b5c1-5fbe25142cf2-kube-api-access-zwhtd\") pod \"nova-api-0\" (UID: \"880569c4-9eff-4c85-b5c1-5fbe25142cf2\") " pod="openstack/nova-api-0" Nov 28 10:19:20 crc kubenswrapper[4838]: I1128 10:19:20.543307 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/880569c4-9eff-4c85-b5c1-5fbe25142cf2-public-tls-certs\") pod \"nova-api-0\" (UID: \"880569c4-9eff-4c85-b5c1-5fbe25142cf2\") " pod="openstack/nova-api-0" Nov 28 10:19:20 crc kubenswrapper[4838]: I1128 10:19:20.543370 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/880569c4-9eff-4c85-b5c1-5fbe25142cf2-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"880569c4-9eff-4c85-b5c1-5fbe25142cf2\") " pod="openstack/nova-api-0" Nov 28 10:19:20 crc kubenswrapper[4838]: I1128 10:19:20.543429 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/880569c4-9eff-4c85-b5c1-5fbe25142cf2-internal-tls-certs\") pod \"nova-api-0\" (UID: \"880569c4-9eff-4c85-b5c1-5fbe25142cf2\") " pod="openstack/nova-api-0" Nov 28 10:19:20 crc kubenswrapper[4838]: I1128 10:19:20.543647 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/880569c4-9eff-4c85-b5c1-5fbe25142cf2-logs\") pod \"nova-api-0\" (UID: \"880569c4-9eff-4c85-b5c1-5fbe25142cf2\") " pod="openstack/nova-api-0" Nov 28 10:19:20 crc kubenswrapper[4838]: I1128 10:19:20.543903 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/880569c4-9eff-4c85-b5c1-5fbe25142cf2-config-data\") pod \"nova-api-0\" (UID: \"880569c4-9eff-4c85-b5c1-5fbe25142cf2\") " pod="openstack/nova-api-0" Nov 28 10:19:20 crc kubenswrapper[4838]: I1128 10:19:20.545707 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/880569c4-9eff-4c85-b5c1-5fbe25142cf2-logs\") pod \"nova-api-0\" (UID: \"880569c4-9eff-4c85-b5c1-5fbe25142cf2\") " pod="openstack/nova-api-0" Nov 28 10:19:20 crc kubenswrapper[4838]: I1128 10:19:20.550150 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/880569c4-9eff-4c85-b5c1-5fbe25142cf2-config-data\") pod \"nova-api-0\" (UID: \"880569c4-9eff-4c85-b5c1-5fbe25142cf2\") " pod="openstack/nova-api-0" Nov 28 10:19:20 crc kubenswrapper[4838]: I1128 10:19:20.552079 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/880569c4-9eff-4c85-b5c1-5fbe25142cf2-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"880569c4-9eff-4c85-b5c1-5fbe25142cf2\") " pod="openstack/nova-api-0" Nov 28 10:19:20 crc kubenswrapper[4838]: I1128 10:19:20.553173 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/880569c4-9eff-4c85-b5c1-5fbe25142cf2-public-tls-certs\") pod \"nova-api-0\" (UID: \"880569c4-9eff-4c85-b5c1-5fbe25142cf2\") " pod="openstack/nova-api-0" Nov 28 10:19:20 crc kubenswrapper[4838]: I1128 10:19:20.553831 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/880569c4-9eff-4c85-b5c1-5fbe25142cf2-internal-tls-certs\") pod \"nova-api-0\" (UID: \"880569c4-9eff-4c85-b5c1-5fbe25142cf2\") " pod="openstack/nova-api-0" Nov 28 10:19:20 crc kubenswrapper[4838]: I1128 10:19:20.569281 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zwhtd\" (UniqueName: \"kubernetes.io/projected/880569c4-9eff-4c85-b5c1-5fbe25142cf2-kube-api-access-zwhtd\") pod \"nova-api-0\" (UID: \"880569c4-9eff-4c85-b5c1-5fbe25142cf2\") " pod="openstack/nova-api-0" Nov 28 10:19:20 crc kubenswrapper[4838]: I1128 10:19:20.579478 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="08804858-2bd5-4d72-9bf3-a9989128bed2" path="/var/lib/kubelet/pods/08804858-2bd5-4d72-9bf3-a9989128bed2/volumes" Nov 28 10:19:20 crc kubenswrapper[4838]: I1128 10:19:20.648152 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 10:19:20 crc kubenswrapper[4838]: I1128 10:19:20.792635 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-cell1-novncproxy-0" Nov 28 10:19:20 crc kubenswrapper[4838]: I1128 10:19:20.817839 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-cell1-novncproxy-0" Nov 28 10:19:21 crc kubenswrapper[4838]: I1128 10:19:21.250831 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c2e0f281-41b3-4f17-a174-41ae4ef2c53e","Type":"ContainerStarted","Data":"d3ee886cd3bec35334f9ae02a3b24c96867dd9914979da62aaa9f71103066c0b"} Nov 28 10:19:21 crc kubenswrapper[4838]: I1128 10:19:21.295847 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 28 10:19:21 crc kubenswrapper[4838]: W1128 10:19:21.299936 4838 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod880569c4_9eff_4c85_b5c1_5fbe25142cf2.slice/crio-c03aee80da35a716ea02b2229d3e4363f4fdb6fac4b1ec9779b504e01bed7d9f WatchSource:0}: Error finding container c03aee80da35a716ea02b2229d3e4363f4fdb6fac4b1ec9779b504e01bed7d9f: Status 404 returned error can't find the container with id c03aee80da35a716ea02b2229d3e4363f4fdb6fac4b1ec9779b504e01bed7d9f Nov 28 10:19:21 crc kubenswrapper[4838]: I1128 10:19:21.302295 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-novncproxy-0" Nov 28 10:19:21 crc kubenswrapper[4838]: I1128 10:19:21.535146 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-cell-mapping-cv5n2"] Nov 28 10:19:21 crc kubenswrapper[4838]: I1128 10:19:21.536248 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-cv5n2" Nov 28 10:19:21 crc kubenswrapper[4838]: I1128 10:19:21.539678 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-scripts" Nov 28 10:19:21 crc kubenswrapper[4838]: I1128 10:19:21.541477 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-config-data" Nov 28 10:19:21 crc kubenswrapper[4838]: I1128 10:19:21.542963 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-cv5n2"] Nov 28 10:19:21 crc kubenswrapper[4838]: I1128 10:19:21.564504 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-thbnp\" (UniqueName: \"kubernetes.io/projected/768b2764-c429-421c-8def-15e4caee8eb8-kube-api-access-thbnp\") pod \"nova-cell1-cell-mapping-cv5n2\" (UID: \"768b2764-c429-421c-8def-15e4caee8eb8\") " pod="openstack/nova-cell1-cell-mapping-cv5n2" Nov 28 10:19:21 crc kubenswrapper[4838]: I1128 10:19:21.564552 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/768b2764-c429-421c-8def-15e4caee8eb8-scripts\") pod \"nova-cell1-cell-mapping-cv5n2\" (UID: \"768b2764-c429-421c-8def-15e4caee8eb8\") " pod="openstack/nova-cell1-cell-mapping-cv5n2" Nov 28 10:19:21 crc kubenswrapper[4838]: I1128 10:19:21.564918 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/768b2764-c429-421c-8def-15e4caee8eb8-config-data\") pod \"nova-cell1-cell-mapping-cv5n2\" (UID: \"768b2764-c429-421c-8def-15e4caee8eb8\") " pod="openstack/nova-cell1-cell-mapping-cv5n2" Nov 28 10:19:21 crc kubenswrapper[4838]: I1128 10:19:21.564981 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/768b2764-c429-421c-8def-15e4caee8eb8-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-cv5n2\" (UID: \"768b2764-c429-421c-8def-15e4caee8eb8\") " pod="openstack/nova-cell1-cell-mapping-cv5n2" Nov 28 10:19:21 crc kubenswrapper[4838]: I1128 10:19:21.665523 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-thbnp\" (UniqueName: \"kubernetes.io/projected/768b2764-c429-421c-8def-15e4caee8eb8-kube-api-access-thbnp\") pod \"nova-cell1-cell-mapping-cv5n2\" (UID: \"768b2764-c429-421c-8def-15e4caee8eb8\") " pod="openstack/nova-cell1-cell-mapping-cv5n2" Nov 28 10:19:21 crc kubenswrapper[4838]: I1128 10:19:21.665565 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/768b2764-c429-421c-8def-15e4caee8eb8-scripts\") pod \"nova-cell1-cell-mapping-cv5n2\" (UID: \"768b2764-c429-421c-8def-15e4caee8eb8\") " pod="openstack/nova-cell1-cell-mapping-cv5n2" Nov 28 10:19:21 crc kubenswrapper[4838]: I1128 10:19:21.665665 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/768b2764-c429-421c-8def-15e4caee8eb8-config-data\") pod \"nova-cell1-cell-mapping-cv5n2\" (UID: \"768b2764-c429-421c-8def-15e4caee8eb8\") " pod="openstack/nova-cell1-cell-mapping-cv5n2" Nov 28 10:19:21 crc kubenswrapper[4838]: I1128 10:19:21.665682 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/768b2764-c429-421c-8def-15e4caee8eb8-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-cv5n2\" (UID: \"768b2764-c429-421c-8def-15e4caee8eb8\") " pod="openstack/nova-cell1-cell-mapping-cv5n2" Nov 28 10:19:21 crc kubenswrapper[4838]: I1128 10:19:21.673446 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/768b2764-c429-421c-8def-15e4caee8eb8-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-cv5n2\" (UID: \"768b2764-c429-421c-8def-15e4caee8eb8\") " pod="openstack/nova-cell1-cell-mapping-cv5n2" Nov 28 10:19:21 crc kubenswrapper[4838]: I1128 10:19:21.682780 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/768b2764-c429-421c-8def-15e4caee8eb8-config-data\") pod \"nova-cell1-cell-mapping-cv5n2\" (UID: \"768b2764-c429-421c-8def-15e4caee8eb8\") " pod="openstack/nova-cell1-cell-mapping-cv5n2" Nov 28 10:19:21 crc kubenswrapper[4838]: I1128 10:19:21.686125 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/768b2764-c429-421c-8def-15e4caee8eb8-scripts\") pod \"nova-cell1-cell-mapping-cv5n2\" (UID: \"768b2764-c429-421c-8def-15e4caee8eb8\") " pod="openstack/nova-cell1-cell-mapping-cv5n2" Nov 28 10:19:21 crc kubenswrapper[4838]: I1128 10:19:21.691895 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-thbnp\" (UniqueName: \"kubernetes.io/projected/768b2764-c429-421c-8def-15e4caee8eb8-kube-api-access-thbnp\") pod \"nova-cell1-cell-mapping-cv5n2\" (UID: \"768b2764-c429-421c-8def-15e4caee8eb8\") " pod="openstack/nova-cell1-cell-mapping-cv5n2" Nov 28 10:19:21 crc kubenswrapper[4838]: I1128 10:19:21.761584 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-cv5n2" Nov 28 10:19:22 crc kubenswrapper[4838]: I1128 10:19:22.225914 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-cv5n2"] Nov 28 10:19:22 crc kubenswrapper[4838]: W1128 10:19:22.232246 4838 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod768b2764_c429_421c_8def_15e4caee8eb8.slice/crio-25248f70ee55ee4f90ccd5fe2ddd59139c0f6d796b107d14d7b8f20944168438 WatchSource:0}: Error finding container 25248f70ee55ee4f90ccd5fe2ddd59139c0f6d796b107d14d7b8f20944168438: Status 404 returned error can't find the container with id 25248f70ee55ee4f90ccd5fe2ddd59139c0f6d796b107d14d7b8f20944168438 Nov 28 10:19:22 crc kubenswrapper[4838]: I1128 10:19:22.261736 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-cv5n2" event={"ID":"768b2764-c429-421c-8def-15e4caee8eb8","Type":"ContainerStarted","Data":"25248f70ee55ee4f90ccd5fe2ddd59139c0f6d796b107d14d7b8f20944168438"} Nov 28 10:19:22 crc kubenswrapper[4838]: I1128 10:19:22.264995 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c2e0f281-41b3-4f17-a174-41ae4ef2c53e","Type":"ContainerStarted","Data":"544a52c665677008f1adc4145d8513585f2965df639fdc1b0e28b4b61bcdad5f"} Nov 28 10:19:22 crc kubenswrapper[4838]: I1128 10:19:22.267259 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"880569c4-9eff-4c85-b5c1-5fbe25142cf2","Type":"ContainerStarted","Data":"350674883c059262e029c742231f8144d71d04adb20049e32b0655ca06464c0d"} Nov 28 10:19:22 crc kubenswrapper[4838]: I1128 10:19:22.267297 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"880569c4-9eff-4c85-b5c1-5fbe25142cf2","Type":"ContainerStarted","Data":"35561e0e918da48928b6ae232b7f71f291ada55d90d49374555fadd1a6631f68"} Nov 28 10:19:22 crc kubenswrapper[4838]: I1128 10:19:22.267309 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"880569c4-9eff-4c85-b5c1-5fbe25142cf2","Type":"ContainerStarted","Data":"c03aee80da35a716ea02b2229d3e4363f4fdb6fac4b1ec9779b504e01bed7d9f"} Nov 28 10:19:22 crc kubenswrapper[4838]: I1128 10:19:22.299249 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.2992257240000002 podStartE2EDuration="2.299225724s" podCreationTimestamp="2025-11-28 10:19:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 10:19:22.297913518 +0000 UTC m=+1333.996887728" watchObservedRunningTime="2025-11-28 10:19:22.299225724 +0000 UTC m=+1333.998199934" Nov 28 10:19:23 crc kubenswrapper[4838]: I1128 10:19:23.281606 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-cv5n2" event={"ID":"768b2764-c429-421c-8def-15e4caee8eb8","Type":"ContainerStarted","Data":"998d9df52c8721870fc8421468bc009017bdfaa5ae767988063ae7fdca6086a7"} Nov 28 10:19:23 crc kubenswrapper[4838]: I1128 10:19:23.303284 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-cell-mapping-cv5n2" podStartSLOduration=2.303232934 podStartE2EDuration="2.303232934s" podCreationTimestamp="2025-11-28 10:19:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 10:19:23.303043388 +0000 UTC m=+1335.002017578" watchObservedRunningTime="2025-11-28 10:19:23.303232934 +0000 UTC m=+1335.002207104" Nov 28 10:19:23 crc kubenswrapper[4838]: I1128 10:19:23.684952 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-5b856c5697-mjgmt" Nov 28 10:19:23 crc kubenswrapper[4838]: I1128 10:19:23.762162 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-566b5b7845-d47fb"] Nov 28 10:19:23 crc kubenswrapper[4838]: I1128 10:19:23.762761 4838 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-566b5b7845-d47fb" podUID="6b7969ed-0cb5-4696-95e0-3cd08d3fef1a" containerName="dnsmasq-dns" containerID="cri-o://59326f45508a6ad9a3c8b44fafadd82350df901c91732d8795919b376058b40a" gracePeriod=10 Nov 28 10:19:24 crc kubenswrapper[4838]: I1128 10:19:24.241773 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-566b5b7845-d47fb" Nov 28 10:19:24 crc kubenswrapper[4838]: I1128 10:19:24.292264 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c2e0f281-41b3-4f17-a174-41ae4ef2c53e","Type":"ContainerStarted","Data":"714aabd613ec3e8cc00d9bc3feaa2c0a645d14266b0871be587d939c62ef8fc6"} Nov 28 10:19:24 crc kubenswrapper[4838]: I1128 10:19:24.292521 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 28 10:19:24 crc kubenswrapper[4838]: I1128 10:19:24.295061 4838 generic.go:334] "Generic (PLEG): container finished" podID="6b7969ed-0cb5-4696-95e0-3cd08d3fef1a" containerID="59326f45508a6ad9a3c8b44fafadd82350df901c91732d8795919b376058b40a" exitCode=0 Nov 28 10:19:24 crc kubenswrapper[4838]: I1128 10:19:24.295109 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-566b5b7845-d47fb" event={"ID":"6b7969ed-0cb5-4696-95e0-3cd08d3fef1a","Type":"ContainerDied","Data":"59326f45508a6ad9a3c8b44fafadd82350df901c91732d8795919b376058b40a"} Nov 28 10:19:24 crc kubenswrapper[4838]: I1128 10:19:24.295149 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-566b5b7845-d47fb" event={"ID":"6b7969ed-0cb5-4696-95e0-3cd08d3fef1a","Type":"ContainerDied","Data":"a27cbbb0ce6ef4635d6cdb3a76ab27c2ed631e1ed5db8655b58859d665f96756"} Nov 28 10:19:24 crc kubenswrapper[4838]: I1128 10:19:24.295171 4838 scope.go:117] "RemoveContainer" containerID="59326f45508a6ad9a3c8b44fafadd82350df901c91732d8795919b376058b40a" Nov 28 10:19:24 crc kubenswrapper[4838]: I1128 10:19:24.295116 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-566b5b7845-d47fb" Nov 28 10:19:24 crc kubenswrapper[4838]: I1128 10:19:24.321306 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.174420737 podStartE2EDuration="6.321288252s" podCreationTimestamp="2025-11-28 10:19:18 +0000 UTC" firstStartedPulling="2025-11-28 10:19:19.134264936 +0000 UTC m=+1330.833239106" lastFinishedPulling="2025-11-28 10:19:23.281132461 +0000 UTC m=+1334.980106621" observedRunningTime="2025-11-28 10:19:24.316938732 +0000 UTC m=+1336.015912902" watchObservedRunningTime="2025-11-28 10:19:24.321288252 +0000 UTC m=+1336.020262412" Nov 28 10:19:24 crc kubenswrapper[4838]: I1128 10:19:24.336881 4838 scope.go:117] "RemoveContainer" containerID="d1d95500c3c42b167a4676e3bb8cd2b37319fc347148b90bd7805f680c08efec" Nov 28 10:19:24 crc kubenswrapper[4838]: I1128 10:19:24.363706 4838 scope.go:117] "RemoveContainer" containerID="59326f45508a6ad9a3c8b44fafadd82350df901c91732d8795919b376058b40a" Nov 28 10:19:24 crc kubenswrapper[4838]: E1128 10:19:24.382262 4838 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"59326f45508a6ad9a3c8b44fafadd82350df901c91732d8795919b376058b40a\": container with ID starting with 59326f45508a6ad9a3c8b44fafadd82350df901c91732d8795919b376058b40a not found: ID does not exist" containerID="59326f45508a6ad9a3c8b44fafadd82350df901c91732d8795919b376058b40a" Nov 28 10:19:24 crc kubenswrapper[4838]: I1128 10:19:24.382303 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"59326f45508a6ad9a3c8b44fafadd82350df901c91732d8795919b376058b40a"} err="failed to get container status \"59326f45508a6ad9a3c8b44fafadd82350df901c91732d8795919b376058b40a\": rpc error: code = NotFound desc = could not find container \"59326f45508a6ad9a3c8b44fafadd82350df901c91732d8795919b376058b40a\": container with ID starting with 59326f45508a6ad9a3c8b44fafadd82350df901c91732d8795919b376058b40a not found: ID does not exist" Nov 28 10:19:24 crc kubenswrapper[4838]: I1128 10:19:24.382327 4838 scope.go:117] "RemoveContainer" containerID="d1d95500c3c42b167a4676e3bb8cd2b37319fc347148b90bd7805f680c08efec" Nov 28 10:19:24 crc kubenswrapper[4838]: E1128 10:19:24.383802 4838 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d1d95500c3c42b167a4676e3bb8cd2b37319fc347148b90bd7805f680c08efec\": container with ID starting with d1d95500c3c42b167a4676e3bb8cd2b37319fc347148b90bd7805f680c08efec not found: ID does not exist" containerID="d1d95500c3c42b167a4676e3bb8cd2b37319fc347148b90bd7805f680c08efec" Nov 28 10:19:24 crc kubenswrapper[4838]: I1128 10:19:24.383837 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d1d95500c3c42b167a4676e3bb8cd2b37319fc347148b90bd7805f680c08efec"} err="failed to get container status \"d1d95500c3c42b167a4676e3bb8cd2b37319fc347148b90bd7805f680c08efec\": rpc error: code = NotFound desc = could not find container \"d1d95500c3c42b167a4676e3bb8cd2b37319fc347148b90bd7805f680c08efec\": container with ID starting with d1d95500c3c42b167a4676e3bb8cd2b37319fc347148b90bd7805f680c08efec not found: ID does not exist" Nov 28 10:19:24 crc kubenswrapper[4838]: I1128 10:19:24.417533 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/6b7969ed-0cb5-4696-95e0-3cd08d3fef1a-ovsdbserver-nb\") pod \"6b7969ed-0cb5-4696-95e0-3cd08d3fef1a\" (UID: \"6b7969ed-0cb5-4696-95e0-3cd08d3fef1a\") " Nov 28 10:19:24 crc kubenswrapper[4838]: I1128 10:19:24.417694 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g9hx9\" (UniqueName: \"kubernetes.io/projected/6b7969ed-0cb5-4696-95e0-3cd08d3fef1a-kube-api-access-g9hx9\") pod \"6b7969ed-0cb5-4696-95e0-3cd08d3fef1a\" (UID: \"6b7969ed-0cb5-4696-95e0-3cd08d3fef1a\") " Nov 28 10:19:24 crc kubenswrapper[4838]: I1128 10:19:24.417825 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6b7969ed-0cb5-4696-95e0-3cd08d3fef1a-dns-svc\") pod \"6b7969ed-0cb5-4696-95e0-3cd08d3fef1a\" (UID: \"6b7969ed-0cb5-4696-95e0-3cd08d3fef1a\") " Nov 28 10:19:24 crc kubenswrapper[4838]: I1128 10:19:24.417909 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6b7969ed-0cb5-4696-95e0-3cd08d3fef1a-ovsdbserver-sb\") pod \"6b7969ed-0cb5-4696-95e0-3cd08d3fef1a\" (UID: \"6b7969ed-0cb5-4696-95e0-3cd08d3fef1a\") " Nov 28 10:19:24 crc kubenswrapper[4838]: I1128 10:19:24.417933 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6b7969ed-0cb5-4696-95e0-3cd08d3fef1a-config\") pod \"6b7969ed-0cb5-4696-95e0-3cd08d3fef1a\" (UID: \"6b7969ed-0cb5-4696-95e0-3cd08d3fef1a\") " Nov 28 10:19:24 crc kubenswrapper[4838]: I1128 10:19:24.423111 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6b7969ed-0cb5-4696-95e0-3cd08d3fef1a-kube-api-access-g9hx9" (OuterVolumeSpecName: "kube-api-access-g9hx9") pod "6b7969ed-0cb5-4696-95e0-3cd08d3fef1a" (UID: "6b7969ed-0cb5-4696-95e0-3cd08d3fef1a"). InnerVolumeSpecName "kube-api-access-g9hx9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:19:24 crc kubenswrapper[4838]: I1128 10:19:24.480393 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6b7969ed-0cb5-4696-95e0-3cd08d3fef1a-config" (OuterVolumeSpecName: "config") pod "6b7969ed-0cb5-4696-95e0-3cd08d3fef1a" (UID: "6b7969ed-0cb5-4696-95e0-3cd08d3fef1a"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 10:19:24 crc kubenswrapper[4838]: I1128 10:19:24.484161 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6b7969ed-0cb5-4696-95e0-3cd08d3fef1a-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "6b7969ed-0cb5-4696-95e0-3cd08d3fef1a" (UID: "6b7969ed-0cb5-4696-95e0-3cd08d3fef1a"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 10:19:24 crc kubenswrapper[4838]: I1128 10:19:24.486523 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6b7969ed-0cb5-4696-95e0-3cd08d3fef1a-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "6b7969ed-0cb5-4696-95e0-3cd08d3fef1a" (UID: "6b7969ed-0cb5-4696-95e0-3cd08d3fef1a"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 10:19:24 crc kubenswrapper[4838]: I1128 10:19:24.491708 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6b7969ed-0cb5-4696-95e0-3cd08d3fef1a-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "6b7969ed-0cb5-4696-95e0-3cd08d3fef1a" (UID: "6b7969ed-0cb5-4696-95e0-3cd08d3fef1a"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 10:19:24 crc kubenswrapper[4838]: I1128 10:19:24.519597 4838 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6b7969ed-0cb5-4696-95e0-3cd08d3fef1a-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 28 10:19:24 crc kubenswrapper[4838]: I1128 10:19:24.520416 4838 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6b7969ed-0cb5-4696-95e0-3cd08d3fef1a-config\") on node \"crc\" DevicePath \"\"" Nov 28 10:19:24 crc kubenswrapper[4838]: I1128 10:19:24.520518 4838 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/6b7969ed-0cb5-4696-95e0-3cd08d3fef1a-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 28 10:19:24 crc kubenswrapper[4838]: I1128 10:19:24.520593 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g9hx9\" (UniqueName: \"kubernetes.io/projected/6b7969ed-0cb5-4696-95e0-3cd08d3fef1a-kube-api-access-g9hx9\") on node \"crc\" DevicePath \"\"" Nov 28 10:19:24 crc kubenswrapper[4838]: I1128 10:19:24.520674 4838 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6b7969ed-0cb5-4696-95e0-3cd08d3fef1a-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 10:19:24 crc kubenswrapper[4838]: I1128 10:19:24.620951 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-566b5b7845-d47fb"] Nov 28 10:19:24 crc kubenswrapper[4838]: I1128 10:19:24.632530 4838 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-566b5b7845-d47fb"] Nov 28 10:19:26 crc kubenswrapper[4838]: I1128 10:19:26.581778 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6b7969ed-0cb5-4696-95e0-3cd08d3fef1a" path="/var/lib/kubelet/pods/6b7969ed-0cb5-4696-95e0-3cd08d3fef1a/volumes" Nov 28 10:19:27 crc kubenswrapper[4838]: I1128 10:19:27.356165 4838 generic.go:334] "Generic (PLEG): container finished" podID="768b2764-c429-421c-8def-15e4caee8eb8" containerID="998d9df52c8721870fc8421468bc009017bdfaa5ae767988063ae7fdca6086a7" exitCode=0 Nov 28 10:19:27 crc kubenswrapper[4838]: I1128 10:19:27.356275 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-cv5n2" event={"ID":"768b2764-c429-421c-8def-15e4caee8eb8","Type":"ContainerDied","Data":"998d9df52c8721870fc8421468bc009017bdfaa5ae767988063ae7fdca6086a7"} Nov 28 10:19:28 crc kubenswrapper[4838]: I1128 10:19:28.819161 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-cv5n2" Nov 28 10:19:29 crc kubenswrapper[4838]: I1128 10:19:29.010336 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-thbnp\" (UniqueName: \"kubernetes.io/projected/768b2764-c429-421c-8def-15e4caee8eb8-kube-api-access-thbnp\") pod \"768b2764-c429-421c-8def-15e4caee8eb8\" (UID: \"768b2764-c429-421c-8def-15e4caee8eb8\") " Nov 28 10:19:29 crc kubenswrapper[4838]: I1128 10:19:29.010738 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/768b2764-c429-421c-8def-15e4caee8eb8-combined-ca-bundle\") pod \"768b2764-c429-421c-8def-15e4caee8eb8\" (UID: \"768b2764-c429-421c-8def-15e4caee8eb8\") " Nov 28 10:19:29 crc kubenswrapper[4838]: I1128 10:19:29.010984 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/768b2764-c429-421c-8def-15e4caee8eb8-config-data\") pod \"768b2764-c429-421c-8def-15e4caee8eb8\" (UID: \"768b2764-c429-421c-8def-15e4caee8eb8\") " Nov 28 10:19:29 crc kubenswrapper[4838]: I1128 10:19:29.011116 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/768b2764-c429-421c-8def-15e4caee8eb8-scripts\") pod \"768b2764-c429-421c-8def-15e4caee8eb8\" (UID: \"768b2764-c429-421c-8def-15e4caee8eb8\") " Nov 28 10:19:29 crc kubenswrapper[4838]: I1128 10:19:29.018465 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/768b2764-c429-421c-8def-15e4caee8eb8-scripts" (OuterVolumeSpecName: "scripts") pod "768b2764-c429-421c-8def-15e4caee8eb8" (UID: "768b2764-c429-421c-8def-15e4caee8eb8"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:19:29 crc kubenswrapper[4838]: I1128 10:19:29.019305 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/768b2764-c429-421c-8def-15e4caee8eb8-kube-api-access-thbnp" (OuterVolumeSpecName: "kube-api-access-thbnp") pod "768b2764-c429-421c-8def-15e4caee8eb8" (UID: "768b2764-c429-421c-8def-15e4caee8eb8"). InnerVolumeSpecName "kube-api-access-thbnp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:19:29 crc kubenswrapper[4838]: I1128 10:19:29.049007 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/768b2764-c429-421c-8def-15e4caee8eb8-config-data" (OuterVolumeSpecName: "config-data") pod "768b2764-c429-421c-8def-15e4caee8eb8" (UID: "768b2764-c429-421c-8def-15e4caee8eb8"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:19:29 crc kubenswrapper[4838]: I1128 10:19:29.067103 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/768b2764-c429-421c-8def-15e4caee8eb8-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "768b2764-c429-421c-8def-15e4caee8eb8" (UID: "768b2764-c429-421c-8def-15e4caee8eb8"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:19:29 crc kubenswrapper[4838]: I1128 10:19:29.113964 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-thbnp\" (UniqueName: \"kubernetes.io/projected/768b2764-c429-421c-8def-15e4caee8eb8-kube-api-access-thbnp\") on node \"crc\" DevicePath \"\"" Nov 28 10:19:29 crc kubenswrapper[4838]: I1128 10:19:29.114024 4838 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/768b2764-c429-421c-8def-15e4caee8eb8-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 10:19:29 crc kubenswrapper[4838]: I1128 10:19:29.114046 4838 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/768b2764-c429-421c-8def-15e4caee8eb8-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 10:19:29 crc kubenswrapper[4838]: I1128 10:19:29.114064 4838 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/768b2764-c429-421c-8def-15e4caee8eb8-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 10:19:29 crc kubenswrapper[4838]: I1128 10:19:29.381861 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-cv5n2" event={"ID":"768b2764-c429-421c-8def-15e4caee8eb8","Type":"ContainerDied","Data":"25248f70ee55ee4f90ccd5fe2ddd59139c0f6d796b107d14d7b8f20944168438"} Nov 28 10:19:29 crc kubenswrapper[4838]: I1128 10:19:29.381913 4838 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="25248f70ee55ee4f90ccd5fe2ddd59139c0f6d796b107d14d7b8f20944168438" Nov 28 10:19:29 crc kubenswrapper[4838]: I1128 10:19:29.381948 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-cv5n2" Nov 28 10:19:29 crc kubenswrapper[4838]: I1128 10:19:29.581458 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 10:19:29 crc kubenswrapper[4838]: I1128 10:19:29.581851 4838 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="6d174409-ecf7-48fb-81bd-869ac3596342" containerName="nova-scheduler-scheduler" containerID="cri-o://13687a1678d10d30ff799a7895572da78b797230489a8ab4682e7c5b7db10266" gracePeriod=30 Nov 28 10:19:29 crc kubenswrapper[4838]: I1128 10:19:29.600058 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 28 10:19:29 crc kubenswrapper[4838]: I1128 10:19:29.600678 4838 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="880569c4-9eff-4c85-b5c1-5fbe25142cf2" containerName="nova-api-log" containerID="cri-o://35561e0e918da48928b6ae232b7f71f291ada55d90d49374555fadd1a6631f68" gracePeriod=30 Nov 28 10:19:29 crc kubenswrapper[4838]: I1128 10:19:29.601055 4838 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="880569c4-9eff-4c85-b5c1-5fbe25142cf2" containerName="nova-api-api" containerID="cri-o://350674883c059262e029c742231f8144d71d04adb20049e32b0655ca06464c0d" gracePeriod=30 Nov 28 10:19:29 crc kubenswrapper[4838]: I1128 10:19:29.617307 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 10:19:29 crc kubenswrapper[4838]: I1128 10:19:29.617693 4838 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="a7e0e2c6-e63f-40c8-8db2-e6c738371e1a" containerName="nova-metadata-log" containerID="cri-o://931e5c14cb8a43f4a31e0931a3d13a23e05d2b737e6ce291e48d165f15dea03f" gracePeriod=30 Nov 28 10:19:29 crc kubenswrapper[4838]: I1128 10:19:29.617926 4838 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="a7e0e2c6-e63f-40c8-8db2-e6c738371e1a" containerName="nova-metadata-metadata" containerID="cri-o://ac9417a62df7e5d09d56e9f2081e1f4926e36360fbd4f73a8b599f3615ebe4ad" gracePeriod=30 Nov 28 10:19:29 crc kubenswrapper[4838]: E1128 10:19:29.777707 4838 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod880569c4_9eff_4c85_b5c1_5fbe25142cf2.slice/crio-conmon-35561e0e918da48928b6ae232b7f71f291ada55d90d49374555fadd1a6631f68.scope\": RecentStats: unable to find data in memory cache]" Nov 28 10:19:30 crc kubenswrapper[4838]: I1128 10:19:30.220951 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 10:19:30 crc kubenswrapper[4838]: I1128 10:19:30.359588 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zwhtd\" (UniqueName: \"kubernetes.io/projected/880569c4-9eff-4c85-b5c1-5fbe25142cf2-kube-api-access-zwhtd\") pod \"880569c4-9eff-4c85-b5c1-5fbe25142cf2\" (UID: \"880569c4-9eff-4c85-b5c1-5fbe25142cf2\") " Nov 28 10:19:30 crc kubenswrapper[4838]: I1128 10:19:30.359665 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/880569c4-9eff-4c85-b5c1-5fbe25142cf2-public-tls-certs\") pod \"880569c4-9eff-4c85-b5c1-5fbe25142cf2\" (UID: \"880569c4-9eff-4c85-b5c1-5fbe25142cf2\") " Nov 28 10:19:30 crc kubenswrapper[4838]: I1128 10:19:30.359742 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/880569c4-9eff-4c85-b5c1-5fbe25142cf2-internal-tls-certs\") pod \"880569c4-9eff-4c85-b5c1-5fbe25142cf2\" (UID: \"880569c4-9eff-4c85-b5c1-5fbe25142cf2\") " Nov 28 10:19:30 crc kubenswrapper[4838]: I1128 10:19:30.359777 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/880569c4-9eff-4c85-b5c1-5fbe25142cf2-combined-ca-bundle\") pod \"880569c4-9eff-4c85-b5c1-5fbe25142cf2\" (UID: \"880569c4-9eff-4c85-b5c1-5fbe25142cf2\") " Nov 28 10:19:30 crc kubenswrapper[4838]: I1128 10:19:30.359799 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/880569c4-9eff-4c85-b5c1-5fbe25142cf2-logs\") pod \"880569c4-9eff-4c85-b5c1-5fbe25142cf2\" (UID: \"880569c4-9eff-4c85-b5c1-5fbe25142cf2\") " Nov 28 10:19:30 crc kubenswrapper[4838]: I1128 10:19:30.359838 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/880569c4-9eff-4c85-b5c1-5fbe25142cf2-config-data\") pod \"880569c4-9eff-4c85-b5c1-5fbe25142cf2\" (UID: \"880569c4-9eff-4c85-b5c1-5fbe25142cf2\") " Nov 28 10:19:30 crc kubenswrapper[4838]: I1128 10:19:30.361316 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/880569c4-9eff-4c85-b5c1-5fbe25142cf2-logs" (OuterVolumeSpecName: "logs") pod "880569c4-9eff-4c85-b5c1-5fbe25142cf2" (UID: "880569c4-9eff-4c85-b5c1-5fbe25142cf2"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 10:19:30 crc kubenswrapper[4838]: I1128 10:19:30.365458 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/880569c4-9eff-4c85-b5c1-5fbe25142cf2-kube-api-access-zwhtd" (OuterVolumeSpecName: "kube-api-access-zwhtd") pod "880569c4-9eff-4c85-b5c1-5fbe25142cf2" (UID: "880569c4-9eff-4c85-b5c1-5fbe25142cf2"). InnerVolumeSpecName "kube-api-access-zwhtd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:19:30 crc kubenswrapper[4838]: I1128 10:19:30.392381 4838 generic.go:334] "Generic (PLEG): container finished" podID="880569c4-9eff-4c85-b5c1-5fbe25142cf2" containerID="350674883c059262e029c742231f8144d71d04adb20049e32b0655ca06464c0d" exitCode=0 Nov 28 10:19:30 crc kubenswrapper[4838]: I1128 10:19:30.392427 4838 generic.go:334] "Generic (PLEG): container finished" podID="880569c4-9eff-4c85-b5c1-5fbe25142cf2" containerID="35561e0e918da48928b6ae232b7f71f291ada55d90d49374555fadd1a6631f68" exitCode=143 Nov 28 10:19:30 crc kubenswrapper[4838]: I1128 10:19:30.392493 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 10:19:30 crc kubenswrapper[4838]: I1128 10:19:30.392909 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"880569c4-9eff-4c85-b5c1-5fbe25142cf2","Type":"ContainerDied","Data":"350674883c059262e029c742231f8144d71d04adb20049e32b0655ca06464c0d"} Nov 28 10:19:30 crc kubenswrapper[4838]: I1128 10:19:30.392977 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"880569c4-9eff-4c85-b5c1-5fbe25142cf2","Type":"ContainerDied","Data":"35561e0e918da48928b6ae232b7f71f291ada55d90d49374555fadd1a6631f68"} Nov 28 10:19:30 crc kubenswrapper[4838]: I1128 10:19:30.392995 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"880569c4-9eff-4c85-b5c1-5fbe25142cf2","Type":"ContainerDied","Data":"c03aee80da35a716ea02b2229d3e4363f4fdb6fac4b1ec9779b504e01bed7d9f"} Nov 28 10:19:30 crc kubenswrapper[4838]: I1128 10:19:30.393014 4838 scope.go:117] "RemoveContainer" containerID="350674883c059262e029c742231f8144d71d04adb20049e32b0655ca06464c0d" Nov 28 10:19:30 crc kubenswrapper[4838]: I1128 10:19:30.396827 4838 generic.go:334] "Generic (PLEG): container finished" podID="a7e0e2c6-e63f-40c8-8db2-e6c738371e1a" containerID="931e5c14cb8a43f4a31e0931a3d13a23e05d2b737e6ce291e48d165f15dea03f" exitCode=143 Nov 28 10:19:30 crc kubenswrapper[4838]: I1128 10:19:30.396880 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"a7e0e2c6-e63f-40c8-8db2-e6c738371e1a","Type":"ContainerDied","Data":"931e5c14cb8a43f4a31e0931a3d13a23e05d2b737e6ce291e48d165f15dea03f"} Nov 28 10:19:30 crc kubenswrapper[4838]: I1128 10:19:30.401314 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/880569c4-9eff-4c85-b5c1-5fbe25142cf2-config-data" (OuterVolumeSpecName: "config-data") pod "880569c4-9eff-4c85-b5c1-5fbe25142cf2" (UID: "880569c4-9eff-4c85-b5c1-5fbe25142cf2"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:19:30 crc kubenswrapper[4838]: I1128 10:19:30.405086 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/880569c4-9eff-4c85-b5c1-5fbe25142cf2-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "880569c4-9eff-4c85-b5c1-5fbe25142cf2" (UID: "880569c4-9eff-4c85-b5c1-5fbe25142cf2"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:19:30 crc kubenswrapper[4838]: I1128 10:19:30.421186 4838 scope.go:117] "RemoveContainer" containerID="35561e0e918da48928b6ae232b7f71f291ada55d90d49374555fadd1a6631f68" Nov 28 10:19:30 crc kubenswrapper[4838]: I1128 10:19:30.425579 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/880569c4-9eff-4c85-b5c1-5fbe25142cf2-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "880569c4-9eff-4c85-b5c1-5fbe25142cf2" (UID: "880569c4-9eff-4c85-b5c1-5fbe25142cf2"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:19:30 crc kubenswrapper[4838]: I1128 10:19:30.438168 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/880569c4-9eff-4c85-b5c1-5fbe25142cf2-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "880569c4-9eff-4c85-b5c1-5fbe25142cf2" (UID: "880569c4-9eff-4c85-b5c1-5fbe25142cf2"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:19:30 crc kubenswrapper[4838]: I1128 10:19:30.453841 4838 scope.go:117] "RemoveContainer" containerID="350674883c059262e029c742231f8144d71d04adb20049e32b0655ca06464c0d" Nov 28 10:19:30 crc kubenswrapper[4838]: E1128 10:19:30.454256 4838 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"350674883c059262e029c742231f8144d71d04adb20049e32b0655ca06464c0d\": container with ID starting with 350674883c059262e029c742231f8144d71d04adb20049e32b0655ca06464c0d not found: ID does not exist" containerID="350674883c059262e029c742231f8144d71d04adb20049e32b0655ca06464c0d" Nov 28 10:19:30 crc kubenswrapper[4838]: I1128 10:19:30.454295 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"350674883c059262e029c742231f8144d71d04adb20049e32b0655ca06464c0d"} err="failed to get container status \"350674883c059262e029c742231f8144d71d04adb20049e32b0655ca06464c0d\": rpc error: code = NotFound desc = could not find container \"350674883c059262e029c742231f8144d71d04adb20049e32b0655ca06464c0d\": container with ID starting with 350674883c059262e029c742231f8144d71d04adb20049e32b0655ca06464c0d not found: ID does not exist" Nov 28 10:19:30 crc kubenswrapper[4838]: I1128 10:19:30.454318 4838 scope.go:117] "RemoveContainer" containerID="35561e0e918da48928b6ae232b7f71f291ada55d90d49374555fadd1a6631f68" Nov 28 10:19:30 crc kubenswrapper[4838]: E1128 10:19:30.454618 4838 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"35561e0e918da48928b6ae232b7f71f291ada55d90d49374555fadd1a6631f68\": container with ID starting with 35561e0e918da48928b6ae232b7f71f291ada55d90d49374555fadd1a6631f68 not found: ID does not exist" containerID="35561e0e918da48928b6ae232b7f71f291ada55d90d49374555fadd1a6631f68" Nov 28 10:19:30 crc kubenswrapper[4838]: I1128 10:19:30.454656 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"35561e0e918da48928b6ae232b7f71f291ada55d90d49374555fadd1a6631f68"} err="failed to get container status \"35561e0e918da48928b6ae232b7f71f291ada55d90d49374555fadd1a6631f68\": rpc error: code = NotFound desc = could not find container \"35561e0e918da48928b6ae232b7f71f291ada55d90d49374555fadd1a6631f68\": container with ID starting with 35561e0e918da48928b6ae232b7f71f291ada55d90d49374555fadd1a6631f68 not found: ID does not exist" Nov 28 10:19:30 crc kubenswrapper[4838]: I1128 10:19:30.454698 4838 scope.go:117] "RemoveContainer" containerID="350674883c059262e029c742231f8144d71d04adb20049e32b0655ca06464c0d" Nov 28 10:19:30 crc kubenswrapper[4838]: I1128 10:19:30.455023 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"350674883c059262e029c742231f8144d71d04adb20049e32b0655ca06464c0d"} err="failed to get container status \"350674883c059262e029c742231f8144d71d04adb20049e32b0655ca06464c0d\": rpc error: code = NotFound desc = could not find container \"350674883c059262e029c742231f8144d71d04adb20049e32b0655ca06464c0d\": container with ID starting with 350674883c059262e029c742231f8144d71d04adb20049e32b0655ca06464c0d not found: ID does not exist" Nov 28 10:19:30 crc kubenswrapper[4838]: I1128 10:19:30.455050 4838 scope.go:117] "RemoveContainer" containerID="35561e0e918da48928b6ae232b7f71f291ada55d90d49374555fadd1a6631f68" Nov 28 10:19:30 crc kubenswrapper[4838]: I1128 10:19:30.455279 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"35561e0e918da48928b6ae232b7f71f291ada55d90d49374555fadd1a6631f68"} err="failed to get container status \"35561e0e918da48928b6ae232b7f71f291ada55d90d49374555fadd1a6631f68\": rpc error: code = NotFound desc = could not find container \"35561e0e918da48928b6ae232b7f71f291ada55d90d49374555fadd1a6631f68\": container with ID starting with 35561e0e918da48928b6ae232b7f71f291ada55d90d49374555fadd1a6631f68 not found: ID does not exist" Nov 28 10:19:30 crc kubenswrapper[4838]: I1128 10:19:30.462755 4838 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/880569c4-9eff-4c85-b5c1-5fbe25142cf2-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 10:19:30 crc kubenswrapper[4838]: I1128 10:19:30.462783 4838 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/880569c4-9eff-4c85-b5c1-5fbe25142cf2-logs\") on node \"crc\" DevicePath \"\"" Nov 28 10:19:30 crc kubenswrapper[4838]: I1128 10:19:30.462796 4838 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/880569c4-9eff-4c85-b5c1-5fbe25142cf2-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 10:19:30 crc kubenswrapper[4838]: I1128 10:19:30.462809 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zwhtd\" (UniqueName: \"kubernetes.io/projected/880569c4-9eff-4c85-b5c1-5fbe25142cf2-kube-api-access-zwhtd\") on node \"crc\" DevicePath \"\"" Nov 28 10:19:30 crc kubenswrapper[4838]: I1128 10:19:30.462819 4838 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/880569c4-9eff-4c85-b5c1-5fbe25142cf2-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 10:19:30 crc kubenswrapper[4838]: I1128 10:19:30.462831 4838 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/880569c4-9eff-4c85-b5c1-5fbe25142cf2-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 10:19:30 crc kubenswrapper[4838]: I1128 10:19:30.715455 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 28 10:19:30 crc kubenswrapper[4838]: I1128 10:19:30.722980 4838 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Nov 28 10:19:30 crc kubenswrapper[4838]: I1128 10:19:30.740881 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 28 10:19:30 crc kubenswrapper[4838]: E1128 10:19:30.741265 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6b7969ed-0cb5-4696-95e0-3cd08d3fef1a" containerName="dnsmasq-dns" Nov 28 10:19:30 crc kubenswrapper[4838]: I1128 10:19:30.741302 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="6b7969ed-0cb5-4696-95e0-3cd08d3fef1a" containerName="dnsmasq-dns" Nov 28 10:19:30 crc kubenswrapper[4838]: E1128 10:19:30.741324 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="880569c4-9eff-4c85-b5c1-5fbe25142cf2" containerName="nova-api-log" Nov 28 10:19:30 crc kubenswrapper[4838]: I1128 10:19:30.741331 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="880569c4-9eff-4c85-b5c1-5fbe25142cf2" containerName="nova-api-log" Nov 28 10:19:30 crc kubenswrapper[4838]: E1128 10:19:30.741342 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="880569c4-9eff-4c85-b5c1-5fbe25142cf2" containerName="nova-api-api" Nov 28 10:19:30 crc kubenswrapper[4838]: I1128 10:19:30.741347 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="880569c4-9eff-4c85-b5c1-5fbe25142cf2" containerName="nova-api-api" Nov 28 10:19:30 crc kubenswrapper[4838]: E1128 10:19:30.741359 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6b7969ed-0cb5-4696-95e0-3cd08d3fef1a" containerName="init" Nov 28 10:19:30 crc kubenswrapper[4838]: I1128 10:19:30.741365 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="6b7969ed-0cb5-4696-95e0-3cd08d3fef1a" containerName="init" Nov 28 10:19:30 crc kubenswrapper[4838]: E1128 10:19:30.741374 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="768b2764-c429-421c-8def-15e4caee8eb8" containerName="nova-manage" Nov 28 10:19:30 crc kubenswrapper[4838]: I1128 10:19:30.741380 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="768b2764-c429-421c-8def-15e4caee8eb8" containerName="nova-manage" Nov 28 10:19:30 crc kubenswrapper[4838]: I1128 10:19:30.741552 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="6b7969ed-0cb5-4696-95e0-3cd08d3fef1a" containerName="dnsmasq-dns" Nov 28 10:19:30 crc kubenswrapper[4838]: I1128 10:19:30.741564 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="768b2764-c429-421c-8def-15e4caee8eb8" containerName="nova-manage" Nov 28 10:19:30 crc kubenswrapper[4838]: I1128 10:19:30.741575 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="880569c4-9eff-4c85-b5c1-5fbe25142cf2" containerName="nova-api-log" Nov 28 10:19:30 crc kubenswrapper[4838]: I1128 10:19:30.741588 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="880569c4-9eff-4c85-b5c1-5fbe25142cf2" containerName="nova-api-api" Nov 28 10:19:30 crc kubenswrapper[4838]: I1128 10:19:30.742704 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 10:19:30 crc kubenswrapper[4838]: I1128 10:19:30.749101 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 28 10:19:30 crc kubenswrapper[4838]: I1128 10:19:30.749953 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-internal-svc" Nov 28 10:19:30 crc kubenswrapper[4838]: I1128 10:19:30.750775 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-public-svc" Nov 28 10:19:30 crc kubenswrapper[4838]: I1128 10:19:30.767555 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/3305f5c4-7a09-439f-bf0f-534b3dea0b05-public-tls-certs\") pod \"nova-api-0\" (UID: \"3305f5c4-7a09-439f-bf0f-534b3dea0b05\") " pod="openstack/nova-api-0" Nov 28 10:19:30 crc kubenswrapper[4838]: I1128 10:19:30.767616 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/3305f5c4-7a09-439f-bf0f-534b3dea0b05-internal-tls-certs\") pod \"nova-api-0\" (UID: \"3305f5c4-7a09-439f-bf0f-534b3dea0b05\") " pod="openstack/nova-api-0" Nov 28 10:19:30 crc kubenswrapper[4838]: I1128 10:19:30.767687 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3305f5c4-7a09-439f-bf0f-534b3dea0b05-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"3305f5c4-7a09-439f-bf0f-534b3dea0b05\") " pod="openstack/nova-api-0" Nov 28 10:19:30 crc kubenswrapper[4838]: I1128 10:19:30.767735 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l92b9\" (UniqueName: \"kubernetes.io/projected/3305f5c4-7a09-439f-bf0f-534b3dea0b05-kube-api-access-l92b9\") pod \"nova-api-0\" (UID: \"3305f5c4-7a09-439f-bf0f-534b3dea0b05\") " pod="openstack/nova-api-0" Nov 28 10:19:30 crc kubenswrapper[4838]: I1128 10:19:30.767768 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3305f5c4-7a09-439f-bf0f-534b3dea0b05-config-data\") pod \"nova-api-0\" (UID: \"3305f5c4-7a09-439f-bf0f-534b3dea0b05\") " pod="openstack/nova-api-0" Nov 28 10:19:30 crc kubenswrapper[4838]: I1128 10:19:30.767835 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3305f5c4-7a09-439f-bf0f-534b3dea0b05-logs\") pod \"nova-api-0\" (UID: \"3305f5c4-7a09-439f-bf0f-534b3dea0b05\") " pod="openstack/nova-api-0" Nov 28 10:19:30 crc kubenswrapper[4838]: I1128 10:19:30.781198 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 28 10:19:30 crc kubenswrapper[4838]: I1128 10:19:30.868412 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/3305f5c4-7a09-439f-bf0f-534b3dea0b05-public-tls-certs\") pod \"nova-api-0\" (UID: \"3305f5c4-7a09-439f-bf0f-534b3dea0b05\") " pod="openstack/nova-api-0" Nov 28 10:19:30 crc kubenswrapper[4838]: I1128 10:19:30.868472 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/3305f5c4-7a09-439f-bf0f-534b3dea0b05-internal-tls-certs\") pod \"nova-api-0\" (UID: \"3305f5c4-7a09-439f-bf0f-534b3dea0b05\") " pod="openstack/nova-api-0" Nov 28 10:19:30 crc kubenswrapper[4838]: I1128 10:19:30.868537 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3305f5c4-7a09-439f-bf0f-534b3dea0b05-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"3305f5c4-7a09-439f-bf0f-534b3dea0b05\") " pod="openstack/nova-api-0" Nov 28 10:19:30 crc kubenswrapper[4838]: I1128 10:19:30.868574 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l92b9\" (UniqueName: \"kubernetes.io/projected/3305f5c4-7a09-439f-bf0f-534b3dea0b05-kube-api-access-l92b9\") pod \"nova-api-0\" (UID: \"3305f5c4-7a09-439f-bf0f-534b3dea0b05\") " pod="openstack/nova-api-0" Nov 28 10:19:30 crc kubenswrapper[4838]: I1128 10:19:30.868606 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3305f5c4-7a09-439f-bf0f-534b3dea0b05-config-data\") pod \"nova-api-0\" (UID: \"3305f5c4-7a09-439f-bf0f-534b3dea0b05\") " pod="openstack/nova-api-0" Nov 28 10:19:30 crc kubenswrapper[4838]: I1128 10:19:30.868673 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3305f5c4-7a09-439f-bf0f-534b3dea0b05-logs\") pod \"nova-api-0\" (UID: \"3305f5c4-7a09-439f-bf0f-534b3dea0b05\") " pod="openstack/nova-api-0" Nov 28 10:19:30 crc kubenswrapper[4838]: I1128 10:19:30.869148 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3305f5c4-7a09-439f-bf0f-534b3dea0b05-logs\") pod \"nova-api-0\" (UID: \"3305f5c4-7a09-439f-bf0f-534b3dea0b05\") " pod="openstack/nova-api-0" Nov 28 10:19:30 crc kubenswrapper[4838]: I1128 10:19:30.873677 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/3305f5c4-7a09-439f-bf0f-534b3dea0b05-internal-tls-certs\") pod \"nova-api-0\" (UID: \"3305f5c4-7a09-439f-bf0f-534b3dea0b05\") " pod="openstack/nova-api-0" Nov 28 10:19:30 crc kubenswrapper[4838]: I1128 10:19:30.874645 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3305f5c4-7a09-439f-bf0f-534b3dea0b05-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"3305f5c4-7a09-439f-bf0f-534b3dea0b05\") " pod="openstack/nova-api-0" Nov 28 10:19:30 crc kubenswrapper[4838]: I1128 10:19:30.876610 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3305f5c4-7a09-439f-bf0f-534b3dea0b05-config-data\") pod \"nova-api-0\" (UID: \"3305f5c4-7a09-439f-bf0f-534b3dea0b05\") " pod="openstack/nova-api-0" Nov 28 10:19:30 crc kubenswrapper[4838]: I1128 10:19:30.878182 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/3305f5c4-7a09-439f-bf0f-534b3dea0b05-public-tls-certs\") pod \"nova-api-0\" (UID: \"3305f5c4-7a09-439f-bf0f-534b3dea0b05\") " pod="openstack/nova-api-0" Nov 28 10:19:30 crc kubenswrapper[4838]: I1128 10:19:30.890826 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l92b9\" (UniqueName: \"kubernetes.io/projected/3305f5c4-7a09-439f-bf0f-534b3dea0b05-kube-api-access-l92b9\") pod \"nova-api-0\" (UID: \"3305f5c4-7a09-439f-bf0f-534b3dea0b05\") " pod="openstack/nova-api-0" Nov 28 10:19:31 crc kubenswrapper[4838]: I1128 10:19:31.060455 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 10:19:31 crc kubenswrapper[4838]: I1128 10:19:31.421973 4838 generic.go:334] "Generic (PLEG): container finished" podID="6d174409-ecf7-48fb-81bd-869ac3596342" containerID="13687a1678d10d30ff799a7895572da78b797230489a8ab4682e7c5b7db10266" exitCode=0 Nov 28 10:19:31 crc kubenswrapper[4838]: I1128 10:19:31.422192 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"6d174409-ecf7-48fb-81bd-869ac3596342","Type":"ContainerDied","Data":"13687a1678d10d30ff799a7895572da78b797230489a8ab4682e7c5b7db10266"} Nov 28 10:19:31 crc kubenswrapper[4838]: I1128 10:19:31.546219 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 28 10:19:31 crc kubenswrapper[4838]: W1128 10:19:31.550155 4838 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3305f5c4_7a09_439f_bf0f_534b3dea0b05.slice/crio-38c92b60a6108dc7f1a52692a6931564575c9d8674441de9382d70be310ed9cc WatchSource:0}: Error finding container 38c92b60a6108dc7f1a52692a6931564575c9d8674441de9382d70be310ed9cc: Status 404 returned error can't find the container with id 38c92b60a6108dc7f1a52692a6931564575c9d8674441de9382d70be310ed9cc Nov 28 10:19:31 crc kubenswrapper[4838]: I1128 10:19:31.556330 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 28 10:19:31 crc kubenswrapper[4838]: I1128 10:19:31.691962 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6d174409-ecf7-48fb-81bd-869ac3596342-config-data\") pod \"6d174409-ecf7-48fb-81bd-869ac3596342\" (UID: \"6d174409-ecf7-48fb-81bd-869ac3596342\") " Nov 28 10:19:31 crc kubenswrapper[4838]: I1128 10:19:31.692178 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fbljl\" (UniqueName: \"kubernetes.io/projected/6d174409-ecf7-48fb-81bd-869ac3596342-kube-api-access-fbljl\") pod \"6d174409-ecf7-48fb-81bd-869ac3596342\" (UID: \"6d174409-ecf7-48fb-81bd-869ac3596342\") " Nov 28 10:19:31 crc kubenswrapper[4838]: I1128 10:19:31.692279 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6d174409-ecf7-48fb-81bd-869ac3596342-combined-ca-bundle\") pod \"6d174409-ecf7-48fb-81bd-869ac3596342\" (UID: \"6d174409-ecf7-48fb-81bd-869ac3596342\") " Nov 28 10:19:31 crc kubenswrapper[4838]: I1128 10:19:31.700218 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6d174409-ecf7-48fb-81bd-869ac3596342-kube-api-access-fbljl" (OuterVolumeSpecName: "kube-api-access-fbljl") pod "6d174409-ecf7-48fb-81bd-869ac3596342" (UID: "6d174409-ecf7-48fb-81bd-869ac3596342"). InnerVolumeSpecName "kube-api-access-fbljl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:19:31 crc kubenswrapper[4838]: I1128 10:19:31.734896 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6d174409-ecf7-48fb-81bd-869ac3596342-config-data" (OuterVolumeSpecName: "config-data") pod "6d174409-ecf7-48fb-81bd-869ac3596342" (UID: "6d174409-ecf7-48fb-81bd-869ac3596342"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:19:31 crc kubenswrapper[4838]: I1128 10:19:31.746904 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6d174409-ecf7-48fb-81bd-869ac3596342-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6d174409-ecf7-48fb-81bd-869ac3596342" (UID: "6d174409-ecf7-48fb-81bd-869ac3596342"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:19:31 crc kubenswrapper[4838]: I1128 10:19:31.809535 4838 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6d174409-ecf7-48fb-81bd-869ac3596342-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 10:19:31 crc kubenswrapper[4838]: I1128 10:19:31.809582 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fbljl\" (UniqueName: \"kubernetes.io/projected/6d174409-ecf7-48fb-81bd-869ac3596342-kube-api-access-fbljl\") on node \"crc\" DevicePath \"\"" Nov 28 10:19:31 crc kubenswrapper[4838]: I1128 10:19:31.809601 4838 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6d174409-ecf7-48fb-81bd-869ac3596342-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 10:19:32 crc kubenswrapper[4838]: I1128 10:19:32.433194 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"6d174409-ecf7-48fb-81bd-869ac3596342","Type":"ContainerDied","Data":"bcaeaf22a59309084b2482048582dc65a06453aacde5cc792269733cb18b7c27"} Nov 28 10:19:32 crc kubenswrapper[4838]: I1128 10:19:32.433246 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 28 10:19:32 crc kubenswrapper[4838]: I1128 10:19:32.433265 4838 scope.go:117] "RemoveContainer" containerID="13687a1678d10d30ff799a7895572da78b797230489a8ab4682e7c5b7db10266" Nov 28 10:19:32 crc kubenswrapper[4838]: I1128 10:19:32.437553 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"3305f5c4-7a09-439f-bf0f-534b3dea0b05","Type":"ContainerStarted","Data":"c714ad3de81ddb3d26485efa5c7fddee1fa29b3612d7f3782a49c41715edd52c"} Nov 28 10:19:32 crc kubenswrapper[4838]: I1128 10:19:32.437841 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"3305f5c4-7a09-439f-bf0f-534b3dea0b05","Type":"ContainerStarted","Data":"f102789fd28bd946c7cc721296d441fca08bf29ee0cb6b4f8e7573a9f7f82497"} Nov 28 10:19:32 crc kubenswrapper[4838]: I1128 10:19:32.437851 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"3305f5c4-7a09-439f-bf0f-534b3dea0b05","Type":"ContainerStarted","Data":"38c92b60a6108dc7f1a52692a6931564575c9d8674441de9382d70be310ed9cc"} Nov 28 10:19:32 crc kubenswrapper[4838]: I1128 10:19:32.458637 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.458620476 podStartE2EDuration="2.458620476s" podCreationTimestamp="2025-11-28 10:19:30 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 10:19:32.457793353 +0000 UTC m=+1344.156767543" watchObservedRunningTime="2025-11-28 10:19:32.458620476 +0000 UTC m=+1344.157594666" Nov 28 10:19:32 crc kubenswrapper[4838]: I1128 10:19:32.489784 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 10:19:32 crc kubenswrapper[4838]: I1128 10:19:32.528780 4838 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 10:19:32 crc kubenswrapper[4838]: I1128 10:19:32.560780 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 10:19:32 crc kubenswrapper[4838]: E1128 10:19:32.561245 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6d174409-ecf7-48fb-81bd-869ac3596342" containerName="nova-scheduler-scheduler" Nov 28 10:19:32 crc kubenswrapper[4838]: I1128 10:19:32.561269 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="6d174409-ecf7-48fb-81bd-869ac3596342" containerName="nova-scheduler-scheduler" Nov 28 10:19:32 crc kubenswrapper[4838]: I1128 10:19:32.561482 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="6d174409-ecf7-48fb-81bd-869ac3596342" containerName="nova-scheduler-scheduler" Nov 28 10:19:32 crc kubenswrapper[4838]: I1128 10:19:32.565866 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 28 10:19:32 crc kubenswrapper[4838]: I1128 10:19:32.568781 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Nov 28 10:19:32 crc kubenswrapper[4838]: I1128 10:19:32.586373 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6d174409-ecf7-48fb-81bd-869ac3596342" path="/var/lib/kubelet/pods/6d174409-ecf7-48fb-81bd-869ac3596342/volumes" Nov 28 10:19:32 crc kubenswrapper[4838]: I1128 10:19:32.587356 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="880569c4-9eff-4c85-b5c1-5fbe25142cf2" path="/var/lib/kubelet/pods/880569c4-9eff-4c85-b5c1-5fbe25142cf2/volumes" Nov 28 10:19:32 crc kubenswrapper[4838]: I1128 10:19:32.588909 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 10:19:32 crc kubenswrapper[4838]: I1128 10:19:32.626604 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3a05bed3-8b30-4959-9ee2-4b25a928b0e5-config-data\") pod \"nova-scheduler-0\" (UID: \"3a05bed3-8b30-4959-9ee2-4b25a928b0e5\") " pod="openstack/nova-scheduler-0" Nov 28 10:19:32 crc kubenswrapper[4838]: I1128 10:19:32.626680 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3a05bed3-8b30-4959-9ee2-4b25a928b0e5-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"3a05bed3-8b30-4959-9ee2-4b25a928b0e5\") " pod="openstack/nova-scheduler-0" Nov 28 10:19:32 crc kubenswrapper[4838]: I1128 10:19:32.626922 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w6nbn\" (UniqueName: \"kubernetes.io/projected/3a05bed3-8b30-4959-9ee2-4b25a928b0e5-kube-api-access-w6nbn\") pod \"nova-scheduler-0\" (UID: \"3a05bed3-8b30-4959-9ee2-4b25a928b0e5\") " pod="openstack/nova-scheduler-0" Nov 28 10:19:32 crc kubenswrapper[4838]: I1128 10:19:32.728303 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3a05bed3-8b30-4959-9ee2-4b25a928b0e5-config-data\") pod \"nova-scheduler-0\" (UID: \"3a05bed3-8b30-4959-9ee2-4b25a928b0e5\") " pod="openstack/nova-scheduler-0" Nov 28 10:19:32 crc kubenswrapper[4838]: I1128 10:19:32.728374 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3a05bed3-8b30-4959-9ee2-4b25a928b0e5-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"3a05bed3-8b30-4959-9ee2-4b25a928b0e5\") " pod="openstack/nova-scheduler-0" Nov 28 10:19:32 crc kubenswrapper[4838]: I1128 10:19:32.728453 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w6nbn\" (UniqueName: \"kubernetes.io/projected/3a05bed3-8b30-4959-9ee2-4b25a928b0e5-kube-api-access-w6nbn\") pod \"nova-scheduler-0\" (UID: \"3a05bed3-8b30-4959-9ee2-4b25a928b0e5\") " pod="openstack/nova-scheduler-0" Nov 28 10:19:32 crc kubenswrapper[4838]: I1128 10:19:32.733522 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3a05bed3-8b30-4959-9ee2-4b25a928b0e5-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"3a05bed3-8b30-4959-9ee2-4b25a928b0e5\") " pod="openstack/nova-scheduler-0" Nov 28 10:19:32 crc kubenswrapper[4838]: I1128 10:19:32.748968 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3a05bed3-8b30-4959-9ee2-4b25a928b0e5-config-data\") pod \"nova-scheduler-0\" (UID: \"3a05bed3-8b30-4959-9ee2-4b25a928b0e5\") " pod="openstack/nova-scheduler-0" Nov 28 10:19:32 crc kubenswrapper[4838]: I1128 10:19:32.750526 4838 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="a7e0e2c6-e63f-40c8-8db2-e6c738371e1a" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.176:8775/\": read tcp 10.217.0.2:37406->10.217.0.176:8775: read: connection reset by peer" Nov 28 10:19:32 crc kubenswrapper[4838]: I1128 10:19:32.750554 4838 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="a7e0e2c6-e63f-40c8-8db2-e6c738371e1a" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.176:8775/\": read tcp 10.217.0.2:37420->10.217.0.176:8775: read: connection reset by peer" Nov 28 10:19:32 crc kubenswrapper[4838]: I1128 10:19:32.755081 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w6nbn\" (UniqueName: \"kubernetes.io/projected/3a05bed3-8b30-4959-9ee2-4b25a928b0e5-kube-api-access-w6nbn\") pod \"nova-scheduler-0\" (UID: \"3a05bed3-8b30-4959-9ee2-4b25a928b0e5\") " pod="openstack/nova-scheduler-0" Nov 28 10:19:32 crc kubenswrapper[4838]: I1128 10:19:32.900947 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 28 10:19:33 crc kubenswrapper[4838]: I1128 10:19:33.230767 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 10:19:33 crc kubenswrapper[4838]: I1128 10:19:33.342585 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a7e0e2c6-e63f-40c8-8db2-e6c738371e1a-combined-ca-bundle\") pod \"a7e0e2c6-e63f-40c8-8db2-e6c738371e1a\" (UID: \"a7e0e2c6-e63f-40c8-8db2-e6c738371e1a\") " Nov 28 10:19:33 crc kubenswrapper[4838]: I1128 10:19:33.342649 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a7e0e2c6-e63f-40c8-8db2-e6c738371e1a-logs\") pod \"a7e0e2c6-e63f-40c8-8db2-e6c738371e1a\" (UID: \"a7e0e2c6-e63f-40c8-8db2-e6c738371e1a\") " Nov 28 10:19:33 crc kubenswrapper[4838]: I1128 10:19:33.342820 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a7e0e2c6-e63f-40c8-8db2-e6c738371e1a-config-data\") pod \"a7e0e2c6-e63f-40c8-8db2-e6c738371e1a\" (UID: \"a7e0e2c6-e63f-40c8-8db2-e6c738371e1a\") " Nov 28 10:19:33 crc kubenswrapper[4838]: I1128 10:19:33.342939 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/a7e0e2c6-e63f-40c8-8db2-e6c738371e1a-nova-metadata-tls-certs\") pod \"a7e0e2c6-e63f-40c8-8db2-e6c738371e1a\" (UID: \"a7e0e2c6-e63f-40c8-8db2-e6c738371e1a\") " Nov 28 10:19:33 crc kubenswrapper[4838]: I1128 10:19:33.343008 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-82mgz\" (UniqueName: \"kubernetes.io/projected/a7e0e2c6-e63f-40c8-8db2-e6c738371e1a-kube-api-access-82mgz\") pod \"a7e0e2c6-e63f-40c8-8db2-e6c738371e1a\" (UID: \"a7e0e2c6-e63f-40c8-8db2-e6c738371e1a\") " Nov 28 10:19:33 crc kubenswrapper[4838]: I1128 10:19:33.344707 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a7e0e2c6-e63f-40c8-8db2-e6c738371e1a-logs" (OuterVolumeSpecName: "logs") pod "a7e0e2c6-e63f-40c8-8db2-e6c738371e1a" (UID: "a7e0e2c6-e63f-40c8-8db2-e6c738371e1a"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 10:19:33 crc kubenswrapper[4838]: I1128 10:19:33.348404 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a7e0e2c6-e63f-40c8-8db2-e6c738371e1a-kube-api-access-82mgz" (OuterVolumeSpecName: "kube-api-access-82mgz") pod "a7e0e2c6-e63f-40c8-8db2-e6c738371e1a" (UID: "a7e0e2c6-e63f-40c8-8db2-e6c738371e1a"). InnerVolumeSpecName "kube-api-access-82mgz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:19:33 crc kubenswrapper[4838]: I1128 10:19:33.368901 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a7e0e2c6-e63f-40c8-8db2-e6c738371e1a-config-data" (OuterVolumeSpecName: "config-data") pod "a7e0e2c6-e63f-40c8-8db2-e6c738371e1a" (UID: "a7e0e2c6-e63f-40c8-8db2-e6c738371e1a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:19:33 crc kubenswrapper[4838]: I1128 10:19:33.380007 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a7e0e2c6-e63f-40c8-8db2-e6c738371e1a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a7e0e2c6-e63f-40c8-8db2-e6c738371e1a" (UID: "a7e0e2c6-e63f-40c8-8db2-e6c738371e1a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:19:33 crc kubenswrapper[4838]: I1128 10:19:33.399586 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 10:19:33 crc kubenswrapper[4838]: I1128 10:19:33.404681 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a7e0e2c6-e63f-40c8-8db2-e6c738371e1a-nova-metadata-tls-certs" (OuterVolumeSpecName: "nova-metadata-tls-certs") pod "a7e0e2c6-e63f-40c8-8db2-e6c738371e1a" (UID: "a7e0e2c6-e63f-40c8-8db2-e6c738371e1a"). InnerVolumeSpecName "nova-metadata-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:19:33 crc kubenswrapper[4838]: W1128 10:19:33.411151 4838 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3a05bed3_8b30_4959_9ee2_4b25a928b0e5.slice/crio-10dc7359723d02f42e12e6dca87ac6c431ffcb5eb4087cfbd51f1728b5d9fb94 WatchSource:0}: Error finding container 10dc7359723d02f42e12e6dca87ac6c431ffcb5eb4087cfbd51f1728b5d9fb94: Status 404 returned error can't find the container with id 10dc7359723d02f42e12e6dca87ac6c431ffcb5eb4087cfbd51f1728b5d9fb94 Nov 28 10:19:33 crc kubenswrapper[4838]: I1128 10:19:33.445955 4838 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a7e0e2c6-e63f-40c8-8db2-e6c738371e1a-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 10:19:33 crc kubenswrapper[4838]: I1128 10:19:33.446005 4838 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/a7e0e2c6-e63f-40c8-8db2-e6c738371e1a-nova-metadata-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 10:19:33 crc kubenswrapper[4838]: I1128 10:19:33.446015 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-82mgz\" (UniqueName: \"kubernetes.io/projected/a7e0e2c6-e63f-40c8-8db2-e6c738371e1a-kube-api-access-82mgz\") on node \"crc\" DevicePath \"\"" Nov 28 10:19:33 crc kubenswrapper[4838]: I1128 10:19:33.446023 4838 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a7e0e2c6-e63f-40c8-8db2-e6c738371e1a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 10:19:33 crc kubenswrapper[4838]: I1128 10:19:33.446079 4838 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a7e0e2c6-e63f-40c8-8db2-e6c738371e1a-logs\") on node \"crc\" DevicePath \"\"" Nov 28 10:19:33 crc kubenswrapper[4838]: I1128 10:19:33.446171 4838 generic.go:334] "Generic (PLEG): container finished" podID="a7e0e2c6-e63f-40c8-8db2-e6c738371e1a" containerID="ac9417a62df7e5d09d56e9f2081e1f4926e36360fbd4f73a8b599f3615ebe4ad" exitCode=0 Nov 28 10:19:33 crc kubenswrapper[4838]: I1128 10:19:33.446210 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 10:19:33 crc kubenswrapper[4838]: I1128 10:19:33.446250 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"a7e0e2c6-e63f-40c8-8db2-e6c738371e1a","Type":"ContainerDied","Data":"ac9417a62df7e5d09d56e9f2081e1f4926e36360fbd4f73a8b599f3615ebe4ad"} Nov 28 10:19:33 crc kubenswrapper[4838]: I1128 10:19:33.446280 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"a7e0e2c6-e63f-40c8-8db2-e6c738371e1a","Type":"ContainerDied","Data":"b6fb30e743b20441b7bd4d2616ce86f82f8de764089d51a9f4121ec70c5c1324"} Nov 28 10:19:33 crc kubenswrapper[4838]: I1128 10:19:33.446296 4838 scope.go:117] "RemoveContainer" containerID="ac9417a62df7e5d09d56e9f2081e1f4926e36360fbd4f73a8b599f3615ebe4ad" Nov 28 10:19:33 crc kubenswrapper[4838]: I1128 10:19:33.458880 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"3a05bed3-8b30-4959-9ee2-4b25a928b0e5","Type":"ContainerStarted","Data":"10dc7359723d02f42e12e6dca87ac6c431ffcb5eb4087cfbd51f1728b5d9fb94"} Nov 28 10:19:33 crc kubenswrapper[4838]: I1128 10:19:33.481065 4838 scope.go:117] "RemoveContainer" containerID="931e5c14cb8a43f4a31e0931a3d13a23e05d2b737e6ce291e48d165f15dea03f" Nov 28 10:19:33 crc kubenswrapper[4838]: I1128 10:19:33.504162 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 10:19:33 crc kubenswrapper[4838]: I1128 10:19:33.506469 4838 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 10:19:33 crc kubenswrapper[4838]: I1128 10:19:33.522524 4838 scope.go:117] "RemoveContainer" containerID="ac9417a62df7e5d09d56e9f2081e1f4926e36360fbd4f73a8b599f3615ebe4ad" Nov 28 10:19:33 crc kubenswrapper[4838]: I1128 10:19:33.526008 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 28 10:19:33 crc kubenswrapper[4838]: E1128 10:19:33.526573 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a7e0e2c6-e63f-40c8-8db2-e6c738371e1a" containerName="nova-metadata-metadata" Nov 28 10:19:33 crc kubenswrapper[4838]: I1128 10:19:33.526606 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="a7e0e2c6-e63f-40c8-8db2-e6c738371e1a" containerName="nova-metadata-metadata" Nov 28 10:19:33 crc kubenswrapper[4838]: E1128 10:19:33.526654 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a7e0e2c6-e63f-40c8-8db2-e6c738371e1a" containerName="nova-metadata-log" Nov 28 10:19:33 crc kubenswrapper[4838]: I1128 10:19:33.526678 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="a7e0e2c6-e63f-40c8-8db2-e6c738371e1a" containerName="nova-metadata-log" Nov 28 10:19:33 crc kubenswrapper[4838]: E1128 10:19:33.526642 4838 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ac9417a62df7e5d09d56e9f2081e1f4926e36360fbd4f73a8b599f3615ebe4ad\": container with ID starting with ac9417a62df7e5d09d56e9f2081e1f4926e36360fbd4f73a8b599f3615ebe4ad not found: ID does not exist" containerID="ac9417a62df7e5d09d56e9f2081e1f4926e36360fbd4f73a8b599f3615ebe4ad" Nov 28 10:19:33 crc kubenswrapper[4838]: I1128 10:19:33.526817 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ac9417a62df7e5d09d56e9f2081e1f4926e36360fbd4f73a8b599f3615ebe4ad"} err="failed to get container status \"ac9417a62df7e5d09d56e9f2081e1f4926e36360fbd4f73a8b599f3615ebe4ad\": rpc error: code = NotFound desc = could not find container \"ac9417a62df7e5d09d56e9f2081e1f4926e36360fbd4f73a8b599f3615ebe4ad\": container with ID starting with ac9417a62df7e5d09d56e9f2081e1f4926e36360fbd4f73a8b599f3615ebe4ad not found: ID does not exist" Nov 28 10:19:33 crc kubenswrapper[4838]: I1128 10:19:33.526883 4838 scope.go:117] "RemoveContainer" containerID="931e5c14cb8a43f4a31e0931a3d13a23e05d2b737e6ce291e48d165f15dea03f" Nov 28 10:19:33 crc kubenswrapper[4838]: I1128 10:19:33.526976 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="a7e0e2c6-e63f-40c8-8db2-e6c738371e1a" containerName="nova-metadata-metadata" Nov 28 10:19:33 crc kubenswrapper[4838]: I1128 10:19:33.527013 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="a7e0e2c6-e63f-40c8-8db2-e6c738371e1a" containerName="nova-metadata-log" Nov 28 10:19:33 crc kubenswrapper[4838]: E1128 10:19:33.527264 4838 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"931e5c14cb8a43f4a31e0931a3d13a23e05d2b737e6ce291e48d165f15dea03f\": container with ID starting with 931e5c14cb8a43f4a31e0931a3d13a23e05d2b737e6ce291e48d165f15dea03f not found: ID does not exist" containerID="931e5c14cb8a43f4a31e0931a3d13a23e05d2b737e6ce291e48d165f15dea03f" Nov 28 10:19:33 crc kubenswrapper[4838]: I1128 10:19:33.527303 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"931e5c14cb8a43f4a31e0931a3d13a23e05d2b737e6ce291e48d165f15dea03f"} err="failed to get container status \"931e5c14cb8a43f4a31e0931a3d13a23e05d2b737e6ce291e48d165f15dea03f\": rpc error: code = NotFound desc = could not find container \"931e5c14cb8a43f4a31e0931a3d13a23e05d2b737e6ce291e48d165f15dea03f\": container with ID starting with 931e5c14cb8a43f4a31e0931a3d13a23e05d2b737e6ce291e48d165f15dea03f not found: ID does not exist" Nov 28 10:19:33 crc kubenswrapper[4838]: I1128 10:19:33.528426 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 10:19:33 crc kubenswrapper[4838]: I1128 10:19:33.530657 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 28 10:19:33 crc kubenswrapper[4838]: I1128 10:19:33.530697 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Nov 28 10:19:33 crc kubenswrapper[4838]: I1128 10:19:33.536524 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 10:19:33 crc kubenswrapper[4838]: I1128 10:19:33.653504 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/a524de62-c36c-4abf-9a45-57247679c4e7-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"a524de62-c36c-4abf-9a45-57247679c4e7\") " pod="openstack/nova-metadata-0" Nov 28 10:19:33 crc kubenswrapper[4838]: I1128 10:19:33.653577 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a524de62-c36c-4abf-9a45-57247679c4e7-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"a524de62-c36c-4abf-9a45-57247679c4e7\") " pod="openstack/nova-metadata-0" Nov 28 10:19:33 crc kubenswrapper[4838]: I1128 10:19:33.653630 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a524de62-c36c-4abf-9a45-57247679c4e7-config-data\") pod \"nova-metadata-0\" (UID: \"a524de62-c36c-4abf-9a45-57247679c4e7\") " pod="openstack/nova-metadata-0" Nov 28 10:19:33 crc kubenswrapper[4838]: I1128 10:19:33.653927 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a524de62-c36c-4abf-9a45-57247679c4e7-logs\") pod \"nova-metadata-0\" (UID: \"a524de62-c36c-4abf-9a45-57247679c4e7\") " pod="openstack/nova-metadata-0" Nov 28 10:19:33 crc kubenswrapper[4838]: I1128 10:19:33.653995 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bqgf6\" (UniqueName: \"kubernetes.io/projected/a524de62-c36c-4abf-9a45-57247679c4e7-kube-api-access-bqgf6\") pod \"nova-metadata-0\" (UID: \"a524de62-c36c-4abf-9a45-57247679c4e7\") " pod="openstack/nova-metadata-0" Nov 28 10:19:33 crc kubenswrapper[4838]: I1128 10:19:33.755945 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a524de62-c36c-4abf-9a45-57247679c4e7-logs\") pod \"nova-metadata-0\" (UID: \"a524de62-c36c-4abf-9a45-57247679c4e7\") " pod="openstack/nova-metadata-0" Nov 28 10:19:33 crc kubenswrapper[4838]: I1128 10:19:33.756136 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bqgf6\" (UniqueName: \"kubernetes.io/projected/a524de62-c36c-4abf-9a45-57247679c4e7-kube-api-access-bqgf6\") pod \"nova-metadata-0\" (UID: \"a524de62-c36c-4abf-9a45-57247679c4e7\") " pod="openstack/nova-metadata-0" Nov 28 10:19:33 crc kubenswrapper[4838]: I1128 10:19:33.756208 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/a524de62-c36c-4abf-9a45-57247679c4e7-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"a524de62-c36c-4abf-9a45-57247679c4e7\") " pod="openstack/nova-metadata-0" Nov 28 10:19:33 crc kubenswrapper[4838]: I1128 10:19:33.756290 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a524de62-c36c-4abf-9a45-57247679c4e7-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"a524de62-c36c-4abf-9a45-57247679c4e7\") " pod="openstack/nova-metadata-0" Nov 28 10:19:33 crc kubenswrapper[4838]: I1128 10:19:33.756391 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a524de62-c36c-4abf-9a45-57247679c4e7-config-data\") pod \"nova-metadata-0\" (UID: \"a524de62-c36c-4abf-9a45-57247679c4e7\") " pod="openstack/nova-metadata-0" Nov 28 10:19:33 crc kubenswrapper[4838]: I1128 10:19:33.756677 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a524de62-c36c-4abf-9a45-57247679c4e7-logs\") pod \"nova-metadata-0\" (UID: \"a524de62-c36c-4abf-9a45-57247679c4e7\") " pod="openstack/nova-metadata-0" Nov 28 10:19:33 crc kubenswrapper[4838]: I1128 10:19:33.763459 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a524de62-c36c-4abf-9a45-57247679c4e7-config-data\") pod \"nova-metadata-0\" (UID: \"a524de62-c36c-4abf-9a45-57247679c4e7\") " pod="openstack/nova-metadata-0" Nov 28 10:19:33 crc kubenswrapper[4838]: I1128 10:19:33.764059 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a524de62-c36c-4abf-9a45-57247679c4e7-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"a524de62-c36c-4abf-9a45-57247679c4e7\") " pod="openstack/nova-metadata-0" Nov 28 10:19:33 crc kubenswrapper[4838]: I1128 10:19:33.766756 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/a524de62-c36c-4abf-9a45-57247679c4e7-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"a524de62-c36c-4abf-9a45-57247679c4e7\") " pod="openstack/nova-metadata-0" Nov 28 10:19:33 crc kubenswrapper[4838]: I1128 10:19:33.773803 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bqgf6\" (UniqueName: \"kubernetes.io/projected/a524de62-c36c-4abf-9a45-57247679c4e7-kube-api-access-bqgf6\") pod \"nova-metadata-0\" (UID: \"a524de62-c36c-4abf-9a45-57247679c4e7\") " pod="openstack/nova-metadata-0" Nov 28 10:19:33 crc kubenswrapper[4838]: I1128 10:19:33.862828 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 10:19:34 crc kubenswrapper[4838]: I1128 10:19:34.390017 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 10:19:34 crc kubenswrapper[4838]: W1128 10:19:34.390999 4838 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda524de62_c36c_4abf_9a45_57247679c4e7.slice/crio-dfd01a890b85f9e6ee707cb6c6766098520ea8e6fc50ab41425eb091c294f5f8 WatchSource:0}: Error finding container dfd01a890b85f9e6ee707cb6c6766098520ea8e6fc50ab41425eb091c294f5f8: Status 404 returned error can't find the container with id dfd01a890b85f9e6ee707cb6c6766098520ea8e6fc50ab41425eb091c294f5f8 Nov 28 10:19:34 crc kubenswrapper[4838]: I1128 10:19:34.479934 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"a524de62-c36c-4abf-9a45-57247679c4e7","Type":"ContainerStarted","Data":"dfd01a890b85f9e6ee707cb6c6766098520ea8e6fc50ab41425eb091c294f5f8"} Nov 28 10:19:34 crc kubenswrapper[4838]: I1128 10:19:34.483648 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"3a05bed3-8b30-4959-9ee2-4b25a928b0e5","Type":"ContainerStarted","Data":"a8265dfa1c47d94077228785ae8a07eb6aa65039add3855fffbdf97d83468012"} Nov 28 10:19:34 crc kubenswrapper[4838]: I1128 10:19:34.513901 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.513882325 podStartE2EDuration="2.513882325s" podCreationTimestamp="2025-11-28 10:19:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 10:19:34.506933202 +0000 UTC m=+1346.205907402" watchObservedRunningTime="2025-11-28 10:19:34.513882325 +0000 UTC m=+1346.212856495" Nov 28 10:19:34 crc kubenswrapper[4838]: I1128 10:19:34.597782 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a7e0e2c6-e63f-40c8-8db2-e6c738371e1a" path="/var/lib/kubelet/pods/a7e0e2c6-e63f-40c8-8db2-e6c738371e1a/volumes" Nov 28 10:19:35 crc kubenswrapper[4838]: I1128 10:19:35.499628 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"a524de62-c36c-4abf-9a45-57247679c4e7","Type":"ContainerStarted","Data":"cf6981c0e3e026e986859bb0ea436fdddb248aa82a63032ed3ffc3b4e55872a4"} Nov 28 10:19:35 crc kubenswrapper[4838]: I1128 10:19:35.500106 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"a524de62-c36c-4abf-9a45-57247679c4e7","Type":"ContainerStarted","Data":"92d28f2d9f0d39ec960388f82a5756ef981c9bd6cea3c01fbf3ab7e102a4f0e1"} Nov 28 10:19:37 crc kubenswrapper[4838]: I1128 10:19:37.902048 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Nov 28 10:19:38 crc kubenswrapper[4838]: I1128 10:19:38.863514 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 28 10:19:38 crc kubenswrapper[4838]: I1128 10:19:38.864045 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 28 10:19:41 crc kubenswrapper[4838]: I1128 10:19:41.061437 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 28 10:19:41 crc kubenswrapper[4838]: I1128 10:19:41.062955 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 28 10:19:42 crc kubenswrapper[4838]: I1128 10:19:42.079966 4838 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="3305f5c4-7a09-439f-bf0f-534b3dea0b05" containerName="nova-api-api" probeResult="failure" output="Get \"https://10.217.0.187:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 28 10:19:42 crc kubenswrapper[4838]: I1128 10:19:42.080044 4838 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="3305f5c4-7a09-439f-bf0f-534b3dea0b05" containerName="nova-api-log" probeResult="failure" output="Get \"https://10.217.0.187:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 28 10:19:42 crc kubenswrapper[4838]: I1128 10:19:42.901459 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Nov 28 10:19:42 crc kubenswrapper[4838]: I1128 10:19:42.933445 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Nov 28 10:19:42 crc kubenswrapper[4838]: I1128 10:19:42.957226 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=9.957203053 podStartE2EDuration="9.957203053s" podCreationTimestamp="2025-11-28 10:19:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 10:19:35.5289299 +0000 UTC m=+1347.227904110" watchObservedRunningTime="2025-11-28 10:19:42.957203053 +0000 UTC m=+1354.656177263" Nov 28 10:19:43 crc kubenswrapper[4838]: I1128 10:19:43.864025 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 28 10:19:43 crc kubenswrapper[4838]: I1128 10:19:43.864428 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 28 10:19:43 crc kubenswrapper[4838]: I1128 10:19:43.917067 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Nov 28 10:19:44 crc kubenswrapper[4838]: I1128 10:19:44.880049 4838 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="a524de62-c36c-4abf-9a45-57247679c4e7" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.189:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 28 10:19:44 crc kubenswrapper[4838]: I1128 10:19:44.880060 4838 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="a524de62-c36c-4abf-9a45-57247679c4e7" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.189:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 28 10:19:48 crc kubenswrapper[4838]: I1128 10:19:48.652047 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Nov 28 10:19:51 crc kubenswrapper[4838]: I1128 10:19:51.067155 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 28 10:19:51 crc kubenswrapper[4838]: I1128 10:19:51.067760 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 28 10:19:51 crc kubenswrapper[4838]: I1128 10:19:51.069715 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 28 10:19:51 crc kubenswrapper[4838]: I1128 10:19:51.073442 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 28 10:19:51 crc kubenswrapper[4838]: I1128 10:19:51.973451 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 28 10:19:51 crc kubenswrapper[4838]: I1128 10:19:51.981421 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 28 10:19:53 crc kubenswrapper[4838]: I1128 10:19:53.867639 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 28 10:19:53 crc kubenswrapper[4838]: I1128 10:19:53.869868 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 28 10:19:53 crc kubenswrapper[4838]: I1128 10:19:53.874239 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 28 10:19:54 crc kubenswrapper[4838]: I1128 10:19:54.006707 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 28 10:20:02 crc kubenswrapper[4838]: I1128 10:20:02.090792 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 28 10:20:03 crc kubenswrapper[4838]: I1128 10:20:03.535831 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 28 10:20:06 crc kubenswrapper[4838]: I1128 10:20:06.291202 4838 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-server-0" podUID="366c721a-0e79-44a0-aa02-761c4ddc6936" containerName="rabbitmq" containerID="cri-o://4e5b05230586f5bd5d31052d153bdc3687b15e03fd9d5613b97b7fe519435dba" gracePeriod=604796 Nov 28 10:20:07 crc kubenswrapper[4838]: I1128 10:20:07.679333 4838 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-cell1-server-0" podUID="dbe39b78-c198-480e-9bca-17eaed6183bf" containerName="rabbitmq" containerID="cri-o://14999f81bcbe0f31824a6a12d3130d4e3af12b8cc9f8f6f76e3d05268e066714" gracePeriod=604796 Nov 28 10:20:07 crc kubenswrapper[4838]: I1128 10:20:07.679910 4838 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-cell1-server-0" podUID="dbe39b78-c198-480e-9bca-17eaed6183bf" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.98:5671: connect: connection refused" Nov 28 10:20:08 crc kubenswrapper[4838]: I1128 10:20:08.015660 4838 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-server-0" podUID="366c721a-0e79-44a0-aa02-761c4ddc6936" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.99:5671: connect: connection refused" Nov 28 10:20:13 crc kubenswrapper[4838]: I1128 10:20:13.191120 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 28 10:20:13 crc kubenswrapper[4838]: I1128 10:20:13.193276 4838 generic.go:334] "Generic (PLEG): container finished" podID="366c721a-0e79-44a0-aa02-761c4ddc6936" containerID="4e5b05230586f5bd5d31052d153bdc3687b15e03fd9d5613b97b7fe519435dba" exitCode=0 Nov 28 10:20:13 crc kubenswrapper[4838]: I1128 10:20:13.193323 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"366c721a-0e79-44a0-aa02-761c4ddc6936","Type":"ContainerDied","Data":"4e5b05230586f5bd5d31052d153bdc3687b15e03fd9d5613b97b7fe519435dba"} Nov 28 10:20:13 crc kubenswrapper[4838]: I1128 10:20:13.193349 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"366c721a-0e79-44a0-aa02-761c4ddc6936","Type":"ContainerDied","Data":"249fc1438860fe7409049c7d24ce60590ae19b8902705d578708b84bba9c9aba"} Nov 28 10:20:13 crc kubenswrapper[4838]: I1128 10:20:13.193366 4838 scope.go:117] "RemoveContainer" containerID="4e5b05230586f5bd5d31052d153bdc3687b15e03fd9d5613b97b7fe519435dba" Nov 28 10:20:13 crc kubenswrapper[4838]: I1128 10:20:13.215775 4838 scope.go:117] "RemoveContainer" containerID="c72266b9b0f0228a2212b582e2d53a73166e50af00c9674d71d6a8b1fbb59a1d" Nov 28 10:20:13 crc kubenswrapper[4838]: I1128 10:20:13.271812 4838 scope.go:117] "RemoveContainer" containerID="4e5b05230586f5bd5d31052d153bdc3687b15e03fd9d5613b97b7fe519435dba" Nov 28 10:20:13 crc kubenswrapper[4838]: E1128 10:20:13.275990 4838 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4e5b05230586f5bd5d31052d153bdc3687b15e03fd9d5613b97b7fe519435dba\": container with ID starting with 4e5b05230586f5bd5d31052d153bdc3687b15e03fd9d5613b97b7fe519435dba not found: ID does not exist" containerID="4e5b05230586f5bd5d31052d153bdc3687b15e03fd9d5613b97b7fe519435dba" Nov 28 10:20:13 crc kubenswrapper[4838]: I1128 10:20:13.276043 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4e5b05230586f5bd5d31052d153bdc3687b15e03fd9d5613b97b7fe519435dba"} err="failed to get container status \"4e5b05230586f5bd5d31052d153bdc3687b15e03fd9d5613b97b7fe519435dba\": rpc error: code = NotFound desc = could not find container \"4e5b05230586f5bd5d31052d153bdc3687b15e03fd9d5613b97b7fe519435dba\": container with ID starting with 4e5b05230586f5bd5d31052d153bdc3687b15e03fd9d5613b97b7fe519435dba not found: ID does not exist" Nov 28 10:20:13 crc kubenswrapper[4838]: I1128 10:20:13.276066 4838 scope.go:117] "RemoveContainer" containerID="c72266b9b0f0228a2212b582e2d53a73166e50af00c9674d71d6a8b1fbb59a1d" Nov 28 10:20:13 crc kubenswrapper[4838]: E1128 10:20:13.276817 4838 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c72266b9b0f0228a2212b582e2d53a73166e50af00c9674d71d6a8b1fbb59a1d\": container with ID starting with c72266b9b0f0228a2212b582e2d53a73166e50af00c9674d71d6a8b1fbb59a1d not found: ID does not exist" containerID="c72266b9b0f0228a2212b582e2d53a73166e50af00c9674d71d6a8b1fbb59a1d" Nov 28 10:20:13 crc kubenswrapper[4838]: I1128 10:20:13.276849 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c72266b9b0f0228a2212b582e2d53a73166e50af00c9674d71d6a8b1fbb59a1d"} err="failed to get container status \"c72266b9b0f0228a2212b582e2d53a73166e50af00c9674d71d6a8b1fbb59a1d\": rpc error: code = NotFound desc = could not find container \"c72266b9b0f0228a2212b582e2d53a73166e50af00c9674d71d6a8b1fbb59a1d\": container with ID starting with c72266b9b0f0228a2212b582e2d53a73166e50af00c9674d71d6a8b1fbb59a1d not found: ID does not exist" Nov 28 10:20:13 crc kubenswrapper[4838]: I1128 10:20:13.320809 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/366c721a-0e79-44a0-aa02-761c4ddc6936-pod-info\") pod \"366c721a-0e79-44a0-aa02-761c4ddc6936\" (UID: \"366c721a-0e79-44a0-aa02-761c4ddc6936\") " Nov 28 10:20:13 crc kubenswrapper[4838]: I1128 10:20:13.320886 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/366c721a-0e79-44a0-aa02-761c4ddc6936-rabbitmq-tls\") pod \"366c721a-0e79-44a0-aa02-761c4ddc6936\" (UID: \"366c721a-0e79-44a0-aa02-761c4ddc6936\") " Nov 28 10:20:13 crc kubenswrapper[4838]: I1128 10:20:13.320911 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4hqzv\" (UniqueName: \"kubernetes.io/projected/366c721a-0e79-44a0-aa02-761c4ddc6936-kube-api-access-4hqzv\") pod \"366c721a-0e79-44a0-aa02-761c4ddc6936\" (UID: \"366c721a-0e79-44a0-aa02-761c4ddc6936\") " Nov 28 10:20:13 crc kubenswrapper[4838]: I1128 10:20:13.320939 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/366c721a-0e79-44a0-aa02-761c4ddc6936-rabbitmq-confd\") pod \"366c721a-0e79-44a0-aa02-761c4ddc6936\" (UID: \"366c721a-0e79-44a0-aa02-761c4ddc6936\") " Nov 28 10:20:13 crc kubenswrapper[4838]: I1128 10:20:13.321015 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/366c721a-0e79-44a0-aa02-761c4ddc6936-rabbitmq-plugins\") pod \"366c721a-0e79-44a0-aa02-761c4ddc6936\" (UID: \"366c721a-0e79-44a0-aa02-761c4ddc6936\") " Nov 28 10:20:13 crc kubenswrapper[4838]: I1128 10:20:13.321145 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/366c721a-0e79-44a0-aa02-761c4ddc6936-config-data\") pod \"366c721a-0e79-44a0-aa02-761c4ddc6936\" (UID: \"366c721a-0e79-44a0-aa02-761c4ddc6936\") " Nov 28 10:20:13 crc kubenswrapper[4838]: I1128 10:20:13.321199 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/366c721a-0e79-44a0-aa02-761c4ddc6936-erlang-cookie-secret\") pod \"366c721a-0e79-44a0-aa02-761c4ddc6936\" (UID: \"366c721a-0e79-44a0-aa02-761c4ddc6936\") " Nov 28 10:20:13 crc kubenswrapper[4838]: I1128 10:20:13.321229 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/366c721a-0e79-44a0-aa02-761c4ddc6936-plugins-conf\") pod \"366c721a-0e79-44a0-aa02-761c4ddc6936\" (UID: \"366c721a-0e79-44a0-aa02-761c4ddc6936\") " Nov 28 10:20:13 crc kubenswrapper[4838]: I1128 10:20:13.321286 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/366c721a-0e79-44a0-aa02-761c4ddc6936-rabbitmq-erlang-cookie\") pod \"366c721a-0e79-44a0-aa02-761c4ddc6936\" (UID: \"366c721a-0e79-44a0-aa02-761c4ddc6936\") " Nov 28 10:20:13 crc kubenswrapper[4838]: I1128 10:20:13.321310 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/366c721a-0e79-44a0-aa02-761c4ddc6936-server-conf\") pod \"366c721a-0e79-44a0-aa02-761c4ddc6936\" (UID: \"366c721a-0e79-44a0-aa02-761c4ddc6936\") " Nov 28 10:20:13 crc kubenswrapper[4838]: I1128 10:20:13.321364 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"366c721a-0e79-44a0-aa02-761c4ddc6936\" (UID: \"366c721a-0e79-44a0-aa02-761c4ddc6936\") " Nov 28 10:20:13 crc kubenswrapper[4838]: I1128 10:20:13.323076 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/366c721a-0e79-44a0-aa02-761c4ddc6936-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "366c721a-0e79-44a0-aa02-761c4ddc6936" (UID: "366c721a-0e79-44a0-aa02-761c4ddc6936"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 10:20:13 crc kubenswrapper[4838]: I1128 10:20:13.323210 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/366c721a-0e79-44a0-aa02-761c4ddc6936-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "366c721a-0e79-44a0-aa02-761c4ddc6936" (UID: "366c721a-0e79-44a0-aa02-761c4ddc6936"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 10:20:13 crc kubenswrapper[4838]: I1128 10:20:13.326215 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/366c721a-0e79-44a0-aa02-761c4ddc6936-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "366c721a-0e79-44a0-aa02-761c4ddc6936" (UID: "366c721a-0e79-44a0-aa02-761c4ddc6936"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 10:20:13 crc kubenswrapper[4838]: I1128 10:20:13.328274 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/366c721a-0e79-44a0-aa02-761c4ddc6936-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "366c721a-0e79-44a0-aa02-761c4ddc6936" (UID: "366c721a-0e79-44a0-aa02-761c4ddc6936"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:20:13 crc kubenswrapper[4838]: I1128 10:20:13.328426 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage06-crc" (OuterVolumeSpecName: "persistence") pod "366c721a-0e79-44a0-aa02-761c4ddc6936" (UID: "366c721a-0e79-44a0-aa02-761c4ddc6936"). InnerVolumeSpecName "local-storage06-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 28 10:20:13 crc kubenswrapper[4838]: I1128 10:20:13.335083 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/366c721a-0e79-44a0-aa02-761c4ddc6936-pod-info" (OuterVolumeSpecName: "pod-info") pod "366c721a-0e79-44a0-aa02-761c4ddc6936" (UID: "366c721a-0e79-44a0-aa02-761c4ddc6936"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Nov 28 10:20:13 crc kubenswrapper[4838]: I1128 10:20:13.335139 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/366c721a-0e79-44a0-aa02-761c4ddc6936-kube-api-access-4hqzv" (OuterVolumeSpecName: "kube-api-access-4hqzv") pod "366c721a-0e79-44a0-aa02-761c4ddc6936" (UID: "366c721a-0e79-44a0-aa02-761c4ddc6936"). InnerVolumeSpecName "kube-api-access-4hqzv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:20:13 crc kubenswrapper[4838]: I1128 10:20:13.337030 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/366c721a-0e79-44a0-aa02-761c4ddc6936-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "366c721a-0e79-44a0-aa02-761c4ddc6936" (UID: "366c721a-0e79-44a0-aa02-761c4ddc6936"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:20:13 crc kubenswrapper[4838]: I1128 10:20:13.393954 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/366c721a-0e79-44a0-aa02-761c4ddc6936-config-data" (OuterVolumeSpecName: "config-data") pod "366c721a-0e79-44a0-aa02-761c4ddc6936" (UID: "366c721a-0e79-44a0-aa02-761c4ddc6936"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 10:20:13 crc kubenswrapper[4838]: I1128 10:20:13.413975 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/366c721a-0e79-44a0-aa02-761c4ddc6936-server-conf" (OuterVolumeSpecName: "server-conf") pod "366c721a-0e79-44a0-aa02-761c4ddc6936" (UID: "366c721a-0e79-44a0-aa02-761c4ddc6936"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 10:20:13 crc kubenswrapper[4838]: I1128 10:20:13.426038 4838 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") on node \"crc\" " Nov 28 10:20:13 crc kubenswrapper[4838]: I1128 10:20:13.426073 4838 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/366c721a-0e79-44a0-aa02-761c4ddc6936-pod-info\") on node \"crc\" DevicePath \"\"" Nov 28 10:20:13 crc kubenswrapper[4838]: I1128 10:20:13.426086 4838 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/366c721a-0e79-44a0-aa02-761c4ddc6936-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Nov 28 10:20:13 crc kubenswrapper[4838]: I1128 10:20:13.426129 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4hqzv\" (UniqueName: \"kubernetes.io/projected/366c721a-0e79-44a0-aa02-761c4ddc6936-kube-api-access-4hqzv\") on node \"crc\" DevicePath \"\"" Nov 28 10:20:13 crc kubenswrapper[4838]: I1128 10:20:13.426143 4838 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/366c721a-0e79-44a0-aa02-761c4ddc6936-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Nov 28 10:20:13 crc kubenswrapper[4838]: I1128 10:20:13.426154 4838 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/366c721a-0e79-44a0-aa02-761c4ddc6936-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 10:20:13 crc kubenswrapper[4838]: I1128 10:20:13.426166 4838 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/366c721a-0e79-44a0-aa02-761c4ddc6936-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Nov 28 10:20:13 crc kubenswrapper[4838]: I1128 10:20:13.426178 4838 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/366c721a-0e79-44a0-aa02-761c4ddc6936-plugins-conf\") on node \"crc\" DevicePath \"\"" Nov 28 10:20:13 crc kubenswrapper[4838]: I1128 10:20:13.426190 4838 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/366c721a-0e79-44a0-aa02-761c4ddc6936-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Nov 28 10:20:13 crc kubenswrapper[4838]: I1128 10:20:13.426201 4838 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/366c721a-0e79-44a0-aa02-761c4ddc6936-server-conf\") on node \"crc\" DevicePath \"\"" Nov 28 10:20:13 crc kubenswrapper[4838]: I1128 10:20:13.444369 4838 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage06-crc" (UniqueName: "kubernetes.io/local-volume/local-storage06-crc") on node "crc" Nov 28 10:20:13 crc kubenswrapper[4838]: I1128 10:20:13.448608 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/366c721a-0e79-44a0-aa02-761c4ddc6936-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "366c721a-0e79-44a0-aa02-761c4ddc6936" (UID: "366c721a-0e79-44a0-aa02-761c4ddc6936"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:20:13 crc kubenswrapper[4838]: I1128 10:20:13.527750 4838 reconciler_common.go:293] "Volume detached for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") on node \"crc\" DevicePath \"\"" Nov 28 10:20:13 crc kubenswrapper[4838]: I1128 10:20:13.527787 4838 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/366c721a-0e79-44a0-aa02-761c4ddc6936-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Nov 28 10:20:14 crc kubenswrapper[4838]: I1128 10:20:14.206060 4838 generic.go:334] "Generic (PLEG): container finished" podID="dbe39b78-c198-480e-9bca-17eaed6183bf" containerID="14999f81bcbe0f31824a6a12d3130d4e3af12b8cc9f8f6f76e3d05268e066714" exitCode=0 Nov 28 10:20:14 crc kubenswrapper[4838]: I1128 10:20:14.206447 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"dbe39b78-c198-480e-9bca-17eaed6183bf","Type":"ContainerDied","Data":"14999f81bcbe0f31824a6a12d3130d4e3af12b8cc9f8f6f76e3d05268e066714"} Nov 28 10:20:14 crc kubenswrapper[4838]: I1128 10:20:14.208052 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 28 10:20:14 crc kubenswrapper[4838]: I1128 10:20:14.261865 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 28 10:20:14 crc kubenswrapper[4838]: I1128 10:20:14.288848 4838 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 28 10:20:14 crc kubenswrapper[4838]: I1128 10:20:14.336263 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-0"] Nov 28 10:20:14 crc kubenswrapper[4838]: E1128 10:20:14.336809 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="366c721a-0e79-44a0-aa02-761c4ddc6936" containerName="rabbitmq" Nov 28 10:20:14 crc kubenswrapper[4838]: I1128 10:20:14.336835 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="366c721a-0e79-44a0-aa02-761c4ddc6936" containerName="rabbitmq" Nov 28 10:20:14 crc kubenswrapper[4838]: E1128 10:20:14.336876 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="366c721a-0e79-44a0-aa02-761c4ddc6936" containerName="setup-container" Nov 28 10:20:14 crc kubenswrapper[4838]: I1128 10:20:14.336885 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="366c721a-0e79-44a0-aa02-761c4ddc6936" containerName="setup-container" Nov 28 10:20:14 crc kubenswrapper[4838]: I1128 10:20:14.337111 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="366c721a-0e79-44a0-aa02-761c4ddc6936" containerName="rabbitmq" Nov 28 10:20:14 crc kubenswrapper[4838]: I1128 10:20:14.343055 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 28 10:20:14 crc kubenswrapper[4838]: I1128 10:20:14.345323 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-erlang-cookie" Nov 28 10:20:14 crc kubenswrapper[4838]: I1128 10:20:14.345502 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-svc" Nov 28 10:20:14 crc kubenswrapper[4838]: I1128 10:20:14.345536 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-plugins-conf" Nov 28 10:20:14 crc kubenswrapper[4838]: I1128 10:20:14.345567 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-server-conf" Nov 28 10:20:14 crc kubenswrapper[4838]: I1128 10:20:14.345703 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-default-user" Nov 28 10:20:14 crc kubenswrapper[4838]: I1128 10:20:14.345753 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-config-data" Nov 28 10:20:14 crc kubenswrapper[4838]: I1128 10:20:14.347423 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-server-dockercfg-x97jh" Nov 28 10:20:14 crc kubenswrapper[4838]: I1128 10:20:14.354992 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 28 10:20:14 crc kubenswrapper[4838]: I1128 10:20:14.443892 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/969e66ab-e24e-4a63-9543-8214980ccbe3-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"969e66ab-e24e-4a63-9543-8214980ccbe3\") " pod="openstack/rabbitmq-server-0" Nov 28 10:20:14 crc kubenswrapper[4838]: I1128 10:20:14.443931 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/969e66ab-e24e-4a63-9543-8214980ccbe3-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"969e66ab-e24e-4a63-9543-8214980ccbe3\") " pod="openstack/rabbitmq-server-0" Nov 28 10:20:14 crc kubenswrapper[4838]: I1128 10:20:14.443955 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/969e66ab-e24e-4a63-9543-8214980ccbe3-pod-info\") pod \"rabbitmq-server-0\" (UID: \"969e66ab-e24e-4a63-9543-8214980ccbe3\") " pod="openstack/rabbitmq-server-0" Nov 28 10:20:14 crc kubenswrapper[4838]: I1128 10:20:14.444064 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/969e66ab-e24e-4a63-9543-8214980ccbe3-server-conf\") pod \"rabbitmq-server-0\" (UID: \"969e66ab-e24e-4a63-9543-8214980ccbe3\") " pod="openstack/rabbitmq-server-0" Nov 28 10:20:14 crc kubenswrapper[4838]: I1128 10:20:14.444126 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/969e66ab-e24e-4a63-9543-8214980ccbe3-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"969e66ab-e24e-4a63-9543-8214980ccbe3\") " pod="openstack/rabbitmq-server-0" Nov 28 10:20:14 crc kubenswrapper[4838]: I1128 10:20:14.444168 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/969e66ab-e24e-4a63-9543-8214980ccbe3-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"969e66ab-e24e-4a63-9543-8214980ccbe3\") " pod="openstack/rabbitmq-server-0" Nov 28 10:20:14 crc kubenswrapper[4838]: I1128 10:20:14.444232 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/969e66ab-e24e-4a63-9543-8214980ccbe3-config-data\") pod \"rabbitmq-server-0\" (UID: \"969e66ab-e24e-4a63-9543-8214980ccbe3\") " pod="openstack/rabbitmq-server-0" Nov 28 10:20:14 crc kubenswrapper[4838]: I1128 10:20:14.444287 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-69lnq\" (UniqueName: \"kubernetes.io/projected/969e66ab-e24e-4a63-9543-8214980ccbe3-kube-api-access-69lnq\") pod \"rabbitmq-server-0\" (UID: \"969e66ab-e24e-4a63-9543-8214980ccbe3\") " pod="openstack/rabbitmq-server-0" Nov 28 10:20:14 crc kubenswrapper[4838]: I1128 10:20:14.444406 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"rabbitmq-server-0\" (UID: \"969e66ab-e24e-4a63-9543-8214980ccbe3\") " pod="openstack/rabbitmq-server-0" Nov 28 10:20:14 crc kubenswrapper[4838]: I1128 10:20:14.444442 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/969e66ab-e24e-4a63-9543-8214980ccbe3-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"969e66ab-e24e-4a63-9543-8214980ccbe3\") " pod="openstack/rabbitmq-server-0" Nov 28 10:20:14 crc kubenswrapper[4838]: I1128 10:20:14.444489 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/969e66ab-e24e-4a63-9543-8214980ccbe3-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"969e66ab-e24e-4a63-9543-8214980ccbe3\") " pod="openstack/rabbitmq-server-0" Nov 28 10:20:14 crc kubenswrapper[4838]: I1128 10:20:14.447478 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 28 10:20:14 crc kubenswrapper[4838]: I1128 10:20:14.549576 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/dbe39b78-c198-480e-9bca-17eaed6183bf-plugins-conf\") pod \"dbe39b78-c198-480e-9bca-17eaed6183bf\" (UID: \"dbe39b78-c198-480e-9bca-17eaed6183bf\") " Nov 28 10:20:14 crc kubenswrapper[4838]: I1128 10:20:14.549671 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/dbe39b78-c198-480e-9bca-17eaed6183bf-pod-info\") pod \"dbe39b78-c198-480e-9bca-17eaed6183bf\" (UID: \"dbe39b78-c198-480e-9bca-17eaed6183bf\") " Nov 28 10:20:14 crc kubenswrapper[4838]: I1128 10:20:14.549710 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/dbe39b78-c198-480e-9bca-17eaed6183bf-rabbitmq-plugins\") pod \"dbe39b78-c198-480e-9bca-17eaed6183bf\" (UID: \"dbe39b78-c198-480e-9bca-17eaed6183bf\") " Nov 28 10:20:14 crc kubenswrapper[4838]: I1128 10:20:14.549782 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/dbe39b78-c198-480e-9bca-17eaed6183bf-rabbitmq-tls\") pod \"dbe39b78-c198-480e-9bca-17eaed6183bf\" (UID: \"dbe39b78-c198-480e-9bca-17eaed6183bf\") " Nov 28 10:20:14 crc kubenswrapper[4838]: I1128 10:20:14.549802 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"dbe39b78-c198-480e-9bca-17eaed6183bf\" (UID: \"dbe39b78-c198-480e-9bca-17eaed6183bf\") " Nov 28 10:20:14 crc kubenswrapper[4838]: I1128 10:20:14.549843 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/dbe39b78-c198-480e-9bca-17eaed6183bf-config-data\") pod \"dbe39b78-c198-480e-9bca-17eaed6183bf\" (UID: \"dbe39b78-c198-480e-9bca-17eaed6183bf\") " Nov 28 10:20:14 crc kubenswrapper[4838]: I1128 10:20:14.549864 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/dbe39b78-c198-480e-9bca-17eaed6183bf-rabbitmq-confd\") pod \"dbe39b78-c198-480e-9bca-17eaed6183bf\" (UID: \"dbe39b78-c198-480e-9bca-17eaed6183bf\") " Nov 28 10:20:14 crc kubenswrapper[4838]: I1128 10:20:14.549880 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/dbe39b78-c198-480e-9bca-17eaed6183bf-erlang-cookie-secret\") pod \"dbe39b78-c198-480e-9bca-17eaed6183bf\" (UID: \"dbe39b78-c198-480e-9bca-17eaed6183bf\") " Nov 28 10:20:14 crc kubenswrapper[4838]: I1128 10:20:14.549902 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-skjvs\" (UniqueName: \"kubernetes.io/projected/dbe39b78-c198-480e-9bca-17eaed6183bf-kube-api-access-skjvs\") pod \"dbe39b78-c198-480e-9bca-17eaed6183bf\" (UID: \"dbe39b78-c198-480e-9bca-17eaed6183bf\") " Nov 28 10:20:14 crc kubenswrapper[4838]: I1128 10:20:14.549971 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/dbe39b78-c198-480e-9bca-17eaed6183bf-server-conf\") pod \"dbe39b78-c198-480e-9bca-17eaed6183bf\" (UID: \"dbe39b78-c198-480e-9bca-17eaed6183bf\") " Nov 28 10:20:14 crc kubenswrapper[4838]: I1128 10:20:14.550015 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/dbe39b78-c198-480e-9bca-17eaed6183bf-rabbitmq-erlang-cookie\") pod \"dbe39b78-c198-480e-9bca-17eaed6183bf\" (UID: \"dbe39b78-c198-480e-9bca-17eaed6183bf\") " Nov 28 10:20:14 crc kubenswrapper[4838]: I1128 10:20:14.550745 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/969e66ab-e24e-4a63-9543-8214980ccbe3-server-conf\") pod \"rabbitmq-server-0\" (UID: \"969e66ab-e24e-4a63-9543-8214980ccbe3\") " pod="openstack/rabbitmq-server-0" Nov 28 10:20:14 crc kubenswrapper[4838]: I1128 10:20:14.550785 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/969e66ab-e24e-4a63-9543-8214980ccbe3-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"969e66ab-e24e-4a63-9543-8214980ccbe3\") " pod="openstack/rabbitmq-server-0" Nov 28 10:20:14 crc kubenswrapper[4838]: I1128 10:20:14.550812 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/969e66ab-e24e-4a63-9543-8214980ccbe3-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"969e66ab-e24e-4a63-9543-8214980ccbe3\") " pod="openstack/rabbitmq-server-0" Nov 28 10:20:14 crc kubenswrapper[4838]: I1128 10:20:14.550850 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/969e66ab-e24e-4a63-9543-8214980ccbe3-config-data\") pod \"rabbitmq-server-0\" (UID: \"969e66ab-e24e-4a63-9543-8214980ccbe3\") " pod="openstack/rabbitmq-server-0" Nov 28 10:20:14 crc kubenswrapper[4838]: I1128 10:20:14.550883 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-69lnq\" (UniqueName: \"kubernetes.io/projected/969e66ab-e24e-4a63-9543-8214980ccbe3-kube-api-access-69lnq\") pod \"rabbitmq-server-0\" (UID: \"969e66ab-e24e-4a63-9543-8214980ccbe3\") " pod="openstack/rabbitmq-server-0" Nov 28 10:20:14 crc kubenswrapper[4838]: I1128 10:20:14.550902 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/dbe39b78-c198-480e-9bca-17eaed6183bf-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "dbe39b78-c198-480e-9bca-17eaed6183bf" (UID: "dbe39b78-c198-480e-9bca-17eaed6183bf"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 10:20:14 crc kubenswrapper[4838]: I1128 10:20:14.550752 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dbe39b78-c198-480e-9bca-17eaed6183bf-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "dbe39b78-c198-480e-9bca-17eaed6183bf" (UID: "dbe39b78-c198-480e-9bca-17eaed6183bf"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 10:20:14 crc kubenswrapper[4838]: I1128 10:20:14.550973 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"rabbitmq-server-0\" (UID: \"969e66ab-e24e-4a63-9543-8214980ccbe3\") " pod="openstack/rabbitmq-server-0" Nov 28 10:20:14 crc kubenswrapper[4838]: I1128 10:20:14.550997 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/969e66ab-e24e-4a63-9543-8214980ccbe3-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"969e66ab-e24e-4a63-9543-8214980ccbe3\") " pod="openstack/rabbitmq-server-0" Nov 28 10:20:14 crc kubenswrapper[4838]: I1128 10:20:14.551045 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/969e66ab-e24e-4a63-9543-8214980ccbe3-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"969e66ab-e24e-4a63-9543-8214980ccbe3\") " pod="openstack/rabbitmq-server-0" Nov 28 10:20:14 crc kubenswrapper[4838]: I1128 10:20:14.551109 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/969e66ab-e24e-4a63-9543-8214980ccbe3-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"969e66ab-e24e-4a63-9543-8214980ccbe3\") " pod="openstack/rabbitmq-server-0" Nov 28 10:20:14 crc kubenswrapper[4838]: I1128 10:20:14.551131 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/969e66ab-e24e-4a63-9543-8214980ccbe3-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"969e66ab-e24e-4a63-9543-8214980ccbe3\") " pod="openstack/rabbitmq-server-0" Nov 28 10:20:14 crc kubenswrapper[4838]: I1128 10:20:14.551160 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/969e66ab-e24e-4a63-9543-8214980ccbe3-pod-info\") pod \"rabbitmq-server-0\" (UID: \"969e66ab-e24e-4a63-9543-8214980ccbe3\") " pod="openstack/rabbitmq-server-0" Nov 28 10:20:14 crc kubenswrapper[4838]: I1128 10:20:14.551235 4838 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/dbe39b78-c198-480e-9bca-17eaed6183bf-plugins-conf\") on node \"crc\" DevicePath \"\"" Nov 28 10:20:14 crc kubenswrapper[4838]: I1128 10:20:14.551247 4838 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/dbe39b78-c198-480e-9bca-17eaed6183bf-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Nov 28 10:20:14 crc kubenswrapper[4838]: I1128 10:20:14.551298 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/dbe39b78-c198-480e-9bca-17eaed6183bf-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "dbe39b78-c198-480e-9bca-17eaed6183bf" (UID: "dbe39b78-c198-480e-9bca-17eaed6183bf"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 10:20:14 crc kubenswrapper[4838]: I1128 10:20:14.551981 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/969e66ab-e24e-4a63-9543-8214980ccbe3-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"969e66ab-e24e-4a63-9543-8214980ccbe3\") " pod="openstack/rabbitmq-server-0" Nov 28 10:20:14 crc kubenswrapper[4838]: I1128 10:20:14.552262 4838 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"rabbitmq-server-0\" (UID: \"969e66ab-e24e-4a63-9543-8214980ccbe3\") device mount path \"/mnt/openstack/pv06\"" pod="openstack/rabbitmq-server-0" Nov 28 10:20:14 crc kubenswrapper[4838]: I1128 10:20:14.552498 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/969e66ab-e24e-4a63-9543-8214980ccbe3-server-conf\") pod \"rabbitmq-server-0\" (UID: \"969e66ab-e24e-4a63-9543-8214980ccbe3\") " pod="openstack/rabbitmq-server-0" Nov 28 10:20:14 crc kubenswrapper[4838]: I1128 10:20:14.558222 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dbe39b78-c198-480e-9bca-17eaed6183bf-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "dbe39b78-c198-480e-9bca-17eaed6183bf" (UID: "dbe39b78-c198-480e-9bca-17eaed6183bf"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:20:14 crc kubenswrapper[4838]: I1128 10:20:14.559027 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/969e66ab-e24e-4a63-9543-8214980ccbe3-pod-info\") pod \"rabbitmq-server-0\" (UID: \"969e66ab-e24e-4a63-9543-8214980ccbe3\") " pod="openstack/rabbitmq-server-0" Nov 28 10:20:14 crc kubenswrapper[4838]: I1128 10:20:14.559337 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/969e66ab-e24e-4a63-9543-8214980ccbe3-config-data\") pod \"rabbitmq-server-0\" (UID: \"969e66ab-e24e-4a63-9543-8214980ccbe3\") " pod="openstack/rabbitmq-server-0" Nov 28 10:20:14 crc kubenswrapper[4838]: I1128 10:20:14.559027 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/dbe39b78-c198-480e-9bca-17eaed6183bf-pod-info" (OuterVolumeSpecName: "pod-info") pod "dbe39b78-c198-480e-9bca-17eaed6183bf" (UID: "dbe39b78-c198-480e-9bca-17eaed6183bf"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Nov 28 10:20:14 crc kubenswrapper[4838]: I1128 10:20:14.559665 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/969e66ab-e24e-4a63-9543-8214980ccbe3-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"969e66ab-e24e-4a63-9543-8214980ccbe3\") " pod="openstack/rabbitmq-server-0" Nov 28 10:20:14 crc kubenswrapper[4838]: I1128 10:20:14.560234 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dbe39b78-c198-480e-9bca-17eaed6183bf-kube-api-access-skjvs" (OuterVolumeSpecName: "kube-api-access-skjvs") pod "dbe39b78-c198-480e-9bca-17eaed6183bf" (UID: "dbe39b78-c198-480e-9bca-17eaed6183bf"). InnerVolumeSpecName "kube-api-access-skjvs". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:20:14 crc kubenswrapper[4838]: I1128 10:20:14.560278 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/969e66ab-e24e-4a63-9543-8214980ccbe3-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"969e66ab-e24e-4a63-9543-8214980ccbe3\") " pod="openstack/rabbitmq-server-0" Nov 28 10:20:14 crc kubenswrapper[4838]: I1128 10:20:14.562167 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage01-crc" (OuterVolumeSpecName: "persistence") pod "dbe39b78-c198-480e-9bca-17eaed6183bf" (UID: "dbe39b78-c198-480e-9bca-17eaed6183bf"). InnerVolumeSpecName "local-storage01-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 28 10:20:14 crc kubenswrapper[4838]: I1128 10:20:14.563329 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/969e66ab-e24e-4a63-9543-8214980ccbe3-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"969e66ab-e24e-4a63-9543-8214980ccbe3\") " pod="openstack/rabbitmq-server-0" Nov 28 10:20:14 crc kubenswrapper[4838]: I1128 10:20:14.563998 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/969e66ab-e24e-4a63-9543-8214980ccbe3-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"969e66ab-e24e-4a63-9543-8214980ccbe3\") " pod="openstack/rabbitmq-server-0" Nov 28 10:20:14 crc kubenswrapper[4838]: I1128 10:20:14.578753 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/969e66ab-e24e-4a63-9543-8214980ccbe3-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"969e66ab-e24e-4a63-9543-8214980ccbe3\") " pod="openstack/rabbitmq-server-0" Nov 28 10:20:14 crc kubenswrapper[4838]: I1128 10:20:14.585023 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dbe39b78-c198-480e-9bca-17eaed6183bf-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "dbe39b78-c198-480e-9bca-17eaed6183bf" (UID: "dbe39b78-c198-480e-9bca-17eaed6183bf"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:20:14 crc kubenswrapper[4838]: I1128 10:20:14.585864 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-69lnq\" (UniqueName: \"kubernetes.io/projected/969e66ab-e24e-4a63-9543-8214980ccbe3-kube-api-access-69lnq\") pod \"rabbitmq-server-0\" (UID: \"969e66ab-e24e-4a63-9543-8214980ccbe3\") " pod="openstack/rabbitmq-server-0" Nov 28 10:20:14 crc kubenswrapper[4838]: I1128 10:20:14.588731 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="366c721a-0e79-44a0-aa02-761c4ddc6936" path="/var/lib/kubelet/pods/366c721a-0e79-44a0-aa02-761c4ddc6936/volumes" Nov 28 10:20:14 crc kubenswrapper[4838]: I1128 10:20:14.598892 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dbe39b78-c198-480e-9bca-17eaed6183bf-config-data" (OuterVolumeSpecName: "config-data") pod "dbe39b78-c198-480e-9bca-17eaed6183bf" (UID: "dbe39b78-c198-480e-9bca-17eaed6183bf"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 10:20:14 crc kubenswrapper[4838]: I1128 10:20:14.610164 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"rabbitmq-server-0\" (UID: \"969e66ab-e24e-4a63-9543-8214980ccbe3\") " pod="openstack/rabbitmq-server-0" Nov 28 10:20:14 crc kubenswrapper[4838]: I1128 10:20:14.643650 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dbe39b78-c198-480e-9bca-17eaed6183bf-server-conf" (OuterVolumeSpecName: "server-conf") pod "dbe39b78-c198-480e-9bca-17eaed6183bf" (UID: "dbe39b78-c198-480e-9bca-17eaed6183bf"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 10:20:14 crc kubenswrapper[4838]: I1128 10:20:14.661399 4838 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/dbe39b78-c198-480e-9bca-17eaed6183bf-pod-info\") on node \"crc\" DevicePath \"\"" Nov 28 10:20:14 crc kubenswrapper[4838]: I1128 10:20:14.661432 4838 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/dbe39b78-c198-480e-9bca-17eaed6183bf-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Nov 28 10:20:14 crc kubenswrapper[4838]: I1128 10:20:14.661463 4838 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") on node \"crc\" " Nov 28 10:20:14 crc kubenswrapper[4838]: I1128 10:20:14.661476 4838 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/dbe39b78-c198-480e-9bca-17eaed6183bf-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 10:20:14 crc kubenswrapper[4838]: I1128 10:20:14.661508 4838 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/dbe39b78-c198-480e-9bca-17eaed6183bf-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Nov 28 10:20:14 crc kubenswrapper[4838]: I1128 10:20:14.661522 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-skjvs\" (UniqueName: \"kubernetes.io/projected/dbe39b78-c198-480e-9bca-17eaed6183bf-kube-api-access-skjvs\") on node \"crc\" DevicePath \"\"" Nov 28 10:20:14 crc kubenswrapper[4838]: I1128 10:20:14.661534 4838 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/dbe39b78-c198-480e-9bca-17eaed6183bf-server-conf\") on node \"crc\" DevicePath \"\"" Nov 28 10:20:14 crc kubenswrapper[4838]: I1128 10:20:14.661547 4838 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/dbe39b78-c198-480e-9bca-17eaed6183bf-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Nov 28 10:20:14 crc kubenswrapper[4838]: I1128 10:20:14.672845 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dbe39b78-c198-480e-9bca-17eaed6183bf-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "dbe39b78-c198-480e-9bca-17eaed6183bf" (UID: "dbe39b78-c198-480e-9bca-17eaed6183bf"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:20:14 crc kubenswrapper[4838]: I1128 10:20:14.684502 4838 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage01-crc" (UniqueName: "kubernetes.io/local-volume/local-storage01-crc") on node "crc" Nov 28 10:20:14 crc kubenswrapper[4838]: I1128 10:20:14.744444 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 28 10:20:14 crc kubenswrapper[4838]: I1128 10:20:14.763809 4838 reconciler_common.go:293] "Volume detached for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") on node \"crc\" DevicePath \"\"" Nov 28 10:20:14 crc kubenswrapper[4838]: I1128 10:20:14.763868 4838 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/dbe39b78-c198-480e-9bca-17eaed6183bf-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Nov 28 10:20:15 crc kubenswrapper[4838]: I1128 10:20:15.058372 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 28 10:20:15 crc kubenswrapper[4838]: I1128 10:20:15.224068 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"dbe39b78-c198-480e-9bca-17eaed6183bf","Type":"ContainerDied","Data":"d0b19a4e964ccfcc6a584d94a03552ce077687841c08299a7d1d4c655f6ca93b"} Nov 28 10:20:15 crc kubenswrapper[4838]: I1128 10:20:15.224124 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 28 10:20:15 crc kubenswrapper[4838]: I1128 10:20:15.224146 4838 scope.go:117] "RemoveContainer" containerID="14999f81bcbe0f31824a6a12d3130d4e3af12b8cc9f8f6f76e3d05268e066714" Nov 28 10:20:15 crc kubenswrapper[4838]: I1128 10:20:15.229508 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"969e66ab-e24e-4a63-9543-8214980ccbe3","Type":"ContainerStarted","Data":"9518382b93b9f0d5376fc673ede990f0b50a8e9f9e5fbff8f6c65a9ff30910f1"} Nov 28 10:20:15 crc kubenswrapper[4838]: I1128 10:20:15.256325 4838 scope.go:117] "RemoveContainer" containerID="4556fc74aa9f704938030bc686cf395afad6eac12531f3c48a0fbaeeaf2d8910" Nov 28 10:20:15 crc kubenswrapper[4838]: I1128 10:20:15.262858 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 28 10:20:15 crc kubenswrapper[4838]: I1128 10:20:15.275329 4838 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 28 10:20:15 crc kubenswrapper[4838]: I1128 10:20:15.295801 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 28 10:20:15 crc kubenswrapper[4838]: E1128 10:20:15.296658 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dbe39b78-c198-480e-9bca-17eaed6183bf" containerName="setup-container" Nov 28 10:20:15 crc kubenswrapper[4838]: I1128 10:20:15.296680 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="dbe39b78-c198-480e-9bca-17eaed6183bf" containerName="setup-container" Nov 28 10:20:15 crc kubenswrapper[4838]: E1128 10:20:15.296702 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dbe39b78-c198-480e-9bca-17eaed6183bf" containerName="rabbitmq" Nov 28 10:20:15 crc kubenswrapper[4838]: I1128 10:20:15.296713 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="dbe39b78-c198-480e-9bca-17eaed6183bf" containerName="rabbitmq" Nov 28 10:20:15 crc kubenswrapper[4838]: I1128 10:20:15.297015 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="dbe39b78-c198-480e-9bca-17eaed6183bf" containerName="rabbitmq" Nov 28 10:20:15 crc kubenswrapper[4838]: I1128 10:20:15.322776 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 28 10:20:15 crc kubenswrapper[4838]: I1128 10:20:15.332869 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-default-user" Nov 28 10:20:15 crc kubenswrapper[4838]: I1128 10:20:15.333017 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-config-data" Nov 28 10:20:15 crc kubenswrapper[4838]: I1128 10:20:15.333344 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-plugins-conf" Nov 28 10:20:15 crc kubenswrapper[4838]: I1128 10:20:15.334461 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 28 10:20:15 crc kubenswrapper[4838]: I1128 10:20:15.338609 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-server-dockercfg-87q8x" Nov 28 10:20:15 crc kubenswrapper[4838]: I1128 10:20:15.338913 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-erlang-cookie" Nov 28 10:20:15 crc kubenswrapper[4838]: I1128 10:20:15.339170 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-server-conf" Nov 28 10:20:15 crc kubenswrapper[4838]: I1128 10:20:15.339383 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-cell1-svc" Nov 28 10:20:15 crc kubenswrapper[4838]: I1128 10:20:15.382124 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/091769fb-bf67-454a-b0da-3e33589799f9-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"091769fb-bf67-454a-b0da-3e33589799f9\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 10:20:15 crc kubenswrapper[4838]: I1128 10:20:15.382191 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/091769fb-bf67-454a-b0da-3e33589799f9-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"091769fb-bf67-454a-b0da-3e33589799f9\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 10:20:15 crc kubenswrapper[4838]: I1128 10:20:15.382277 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/091769fb-bf67-454a-b0da-3e33589799f9-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"091769fb-bf67-454a-b0da-3e33589799f9\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 10:20:15 crc kubenswrapper[4838]: I1128 10:20:15.382363 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qm8bg\" (UniqueName: \"kubernetes.io/projected/091769fb-bf67-454a-b0da-3e33589799f9-kube-api-access-qm8bg\") pod \"rabbitmq-cell1-server-0\" (UID: \"091769fb-bf67-454a-b0da-3e33589799f9\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 10:20:15 crc kubenswrapper[4838]: I1128 10:20:15.382405 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/091769fb-bf67-454a-b0da-3e33589799f9-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"091769fb-bf67-454a-b0da-3e33589799f9\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 10:20:15 crc kubenswrapper[4838]: I1128 10:20:15.382444 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/091769fb-bf67-454a-b0da-3e33589799f9-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"091769fb-bf67-454a-b0da-3e33589799f9\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 10:20:15 crc kubenswrapper[4838]: I1128 10:20:15.382482 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"091769fb-bf67-454a-b0da-3e33589799f9\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 10:20:15 crc kubenswrapper[4838]: I1128 10:20:15.382503 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/091769fb-bf67-454a-b0da-3e33589799f9-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"091769fb-bf67-454a-b0da-3e33589799f9\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 10:20:15 crc kubenswrapper[4838]: I1128 10:20:15.382529 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/091769fb-bf67-454a-b0da-3e33589799f9-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"091769fb-bf67-454a-b0da-3e33589799f9\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 10:20:15 crc kubenswrapper[4838]: I1128 10:20:15.382551 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/091769fb-bf67-454a-b0da-3e33589799f9-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"091769fb-bf67-454a-b0da-3e33589799f9\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 10:20:15 crc kubenswrapper[4838]: I1128 10:20:15.382579 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/091769fb-bf67-454a-b0da-3e33589799f9-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"091769fb-bf67-454a-b0da-3e33589799f9\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 10:20:15 crc kubenswrapper[4838]: I1128 10:20:15.483377 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qm8bg\" (UniqueName: \"kubernetes.io/projected/091769fb-bf67-454a-b0da-3e33589799f9-kube-api-access-qm8bg\") pod \"rabbitmq-cell1-server-0\" (UID: \"091769fb-bf67-454a-b0da-3e33589799f9\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 10:20:15 crc kubenswrapper[4838]: I1128 10:20:15.483460 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/091769fb-bf67-454a-b0da-3e33589799f9-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"091769fb-bf67-454a-b0da-3e33589799f9\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 10:20:15 crc kubenswrapper[4838]: I1128 10:20:15.483510 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/091769fb-bf67-454a-b0da-3e33589799f9-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"091769fb-bf67-454a-b0da-3e33589799f9\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 10:20:15 crc kubenswrapper[4838]: I1128 10:20:15.483540 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"091769fb-bf67-454a-b0da-3e33589799f9\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 10:20:15 crc kubenswrapper[4838]: I1128 10:20:15.483559 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/091769fb-bf67-454a-b0da-3e33589799f9-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"091769fb-bf67-454a-b0da-3e33589799f9\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 10:20:15 crc kubenswrapper[4838]: I1128 10:20:15.483577 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/091769fb-bf67-454a-b0da-3e33589799f9-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"091769fb-bf67-454a-b0da-3e33589799f9\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 10:20:15 crc kubenswrapper[4838]: I1128 10:20:15.483614 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/091769fb-bf67-454a-b0da-3e33589799f9-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"091769fb-bf67-454a-b0da-3e33589799f9\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 10:20:15 crc kubenswrapper[4838]: I1128 10:20:15.483636 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/091769fb-bf67-454a-b0da-3e33589799f9-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"091769fb-bf67-454a-b0da-3e33589799f9\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 10:20:15 crc kubenswrapper[4838]: I1128 10:20:15.483699 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/091769fb-bf67-454a-b0da-3e33589799f9-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"091769fb-bf67-454a-b0da-3e33589799f9\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 10:20:15 crc kubenswrapper[4838]: I1128 10:20:15.483758 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/091769fb-bf67-454a-b0da-3e33589799f9-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"091769fb-bf67-454a-b0da-3e33589799f9\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 10:20:15 crc kubenswrapper[4838]: I1128 10:20:15.483829 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/091769fb-bf67-454a-b0da-3e33589799f9-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"091769fb-bf67-454a-b0da-3e33589799f9\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 10:20:15 crc kubenswrapper[4838]: I1128 10:20:15.484225 4838 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"091769fb-bf67-454a-b0da-3e33589799f9\") device mount path \"/mnt/openstack/pv01\"" pod="openstack/rabbitmq-cell1-server-0" Nov 28 10:20:15 crc kubenswrapper[4838]: I1128 10:20:15.484937 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/091769fb-bf67-454a-b0da-3e33589799f9-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"091769fb-bf67-454a-b0da-3e33589799f9\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 10:20:15 crc kubenswrapper[4838]: I1128 10:20:15.485173 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/091769fb-bf67-454a-b0da-3e33589799f9-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"091769fb-bf67-454a-b0da-3e33589799f9\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 10:20:15 crc kubenswrapper[4838]: I1128 10:20:15.485282 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/091769fb-bf67-454a-b0da-3e33589799f9-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"091769fb-bf67-454a-b0da-3e33589799f9\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 10:20:15 crc kubenswrapper[4838]: I1128 10:20:15.485983 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/091769fb-bf67-454a-b0da-3e33589799f9-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"091769fb-bf67-454a-b0da-3e33589799f9\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 10:20:15 crc kubenswrapper[4838]: I1128 10:20:15.486261 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/091769fb-bf67-454a-b0da-3e33589799f9-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"091769fb-bf67-454a-b0da-3e33589799f9\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 10:20:15 crc kubenswrapper[4838]: I1128 10:20:15.487708 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/091769fb-bf67-454a-b0da-3e33589799f9-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"091769fb-bf67-454a-b0da-3e33589799f9\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 10:20:15 crc kubenswrapper[4838]: I1128 10:20:15.488478 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/091769fb-bf67-454a-b0da-3e33589799f9-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"091769fb-bf67-454a-b0da-3e33589799f9\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 10:20:15 crc kubenswrapper[4838]: I1128 10:20:15.489113 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/091769fb-bf67-454a-b0da-3e33589799f9-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"091769fb-bf67-454a-b0da-3e33589799f9\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 10:20:15 crc kubenswrapper[4838]: I1128 10:20:15.489342 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/091769fb-bf67-454a-b0da-3e33589799f9-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"091769fb-bf67-454a-b0da-3e33589799f9\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 10:20:15 crc kubenswrapper[4838]: I1128 10:20:15.501776 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qm8bg\" (UniqueName: \"kubernetes.io/projected/091769fb-bf67-454a-b0da-3e33589799f9-kube-api-access-qm8bg\") pod \"rabbitmq-cell1-server-0\" (UID: \"091769fb-bf67-454a-b0da-3e33589799f9\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 10:20:15 crc kubenswrapper[4838]: I1128 10:20:15.512598 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"091769fb-bf67-454a-b0da-3e33589799f9\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 10:20:15 crc kubenswrapper[4838]: I1128 10:20:15.669824 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 28 10:20:15 crc kubenswrapper[4838]: I1128 10:20:15.942705 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 28 10:20:16 crc kubenswrapper[4838]: W1128 10:20:16.034937 4838 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod091769fb_bf67_454a_b0da_3e33589799f9.slice/crio-0b0dc2720d8b6b4663527fe228f8f9aa0bf0bb4ca586bbf8cbaba5fee416a6ad WatchSource:0}: Error finding container 0b0dc2720d8b6b4663527fe228f8f9aa0bf0bb4ca586bbf8cbaba5fee416a6ad: Status 404 returned error can't find the container with id 0b0dc2720d8b6b4663527fe228f8f9aa0bf0bb4ca586bbf8cbaba5fee416a6ad Nov 28 10:20:16 crc kubenswrapper[4838]: I1128 10:20:16.244651 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"091769fb-bf67-454a-b0da-3e33589799f9","Type":"ContainerStarted","Data":"0b0dc2720d8b6b4663527fe228f8f9aa0bf0bb4ca586bbf8cbaba5fee416a6ad"} Nov 28 10:20:16 crc kubenswrapper[4838]: I1128 10:20:16.586461 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dbe39b78-c198-480e-9bca-17eaed6183bf" path="/var/lib/kubelet/pods/dbe39b78-c198-480e-9bca-17eaed6183bf/volumes" Nov 28 10:20:17 crc kubenswrapper[4838]: I1128 10:20:17.258748 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"969e66ab-e24e-4a63-9543-8214980ccbe3","Type":"ContainerStarted","Data":"0cf28d03b4fa35c112fe6f96d4e4f78d618d4f146ee2e52d121bcfae34610cd9"} Nov 28 10:20:18 crc kubenswrapper[4838]: I1128 10:20:18.112233 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6447ccbd8f-8sr92"] Nov 28 10:20:18 crc kubenswrapper[4838]: I1128 10:20:18.114904 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6447ccbd8f-8sr92" Nov 28 10:20:18 crc kubenswrapper[4838]: I1128 10:20:18.117938 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-edpm-ipam" Nov 28 10:20:18 crc kubenswrapper[4838]: I1128 10:20:18.142135 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6447ccbd8f-8sr92"] Nov 28 10:20:18 crc kubenswrapper[4838]: I1128 10:20:18.305287 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/ed3a711b-2077-4680-9e4a-7e9100ed4bd9-openstack-edpm-ipam\") pod \"dnsmasq-dns-6447ccbd8f-8sr92\" (UID: \"ed3a711b-2077-4680-9e4a-7e9100ed4bd9\") " pod="openstack/dnsmasq-dns-6447ccbd8f-8sr92" Nov 28 10:20:18 crc kubenswrapper[4838]: I1128 10:20:18.305362 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ed3a711b-2077-4680-9e4a-7e9100ed4bd9-ovsdbserver-sb\") pod \"dnsmasq-dns-6447ccbd8f-8sr92\" (UID: \"ed3a711b-2077-4680-9e4a-7e9100ed4bd9\") " pod="openstack/dnsmasq-dns-6447ccbd8f-8sr92" Nov 28 10:20:18 crc kubenswrapper[4838]: I1128 10:20:18.305424 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7wxsw\" (UniqueName: \"kubernetes.io/projected/ed3a711b-2077-4680-9e4a-7e9100ed4bd9-kube-api-access-7wxsw\") pod \"dnsmasq-dns-6447ccbd8f-8sr92\" (UID: \"ed3a711b-2077-4680-9e4a-7e9100ed4bd9\") " pod="openstack/dnsmasq-dns-6447ccbd8f-8sr92" Nov 28 10:20:18 crc kubenswrapper[4838]: I1128 10:20:18.305469 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ed3a711b-2077-4680-9e4a-7e9100ed4bd9-ovsdbserver-nb\") pod \"dnsmasq-dns-6447ccbd8f-8sr92\" (UID: \"ed3a711b-2077-4680-9e4a-7e9100ed4bd9\") " pod="openstack/dnsmasq-dns-6447ccbd8f-8sr92" Nov 28 10:20:18 crc kubenswrapper[4838]: I1128 10:20:18.305490 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ed3a711b-2077-4680-9e4a-7e9100ed4bd9-config\") pod \"dnsmasq-dns-6447ccbd8f-8sr92\" (UID: \"ed3a711b-2077-4680-9e4a-7e9100ed4bd9\") " pod="openstack/dnsmasq-dns-6447ccbd8f-8sr92" Nov 28 10:20:18 crc kubenswrapper[4838]: I1128 10:20:18.305519 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ed3a711b-2077-4680-9e4a-7e9100ed4bd9-dns-svc\") pod \"dnsmasq-dns-6447ccbd8f-8sr92\" (UID: \"ed3a711b-2077-4680-9e4a-7e9100ed4bd9\") " pod="openstack/dnsmasq-dns-6447ccbd8f-8sr92" Nov 28 10:20:18 crc kubenswrapper[4838]: I1128 10:20:18.406821 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/ed3a711b-2077-4680-9e4a-7e9100ed4bd9-openstack-edpm-ipam\") pod \"dnsmasq-dns-6447ccbd8f-8sr92\" (UID: \"ed3a711b-2077-4680-9e4a-7e9100ed4bd9\") " pod="openstack/dnsmasq-dns-6447ccbd8f-8sr92" Nov 28 10:20:18 crc kubenswrapper[4838]: I1128 10:20:18.406902 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ed3a711b-2077-4680-9e4a-7e9100ed4bd9-ovsdbserver-sb\") pod \"dnsmasq-dns-6447ccbd8f-8sr92\" (UID: \"ed3a711b-2077-4680-9e4a-7e9100ed4bd9\") " pod="openstack/dnsmasq-dns-6447ccbd8f-8sr92" Nov 28 10:20:18 crc kubenswrapper[4838]: I1128 10:20:18.406969 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7wxsw\" (UniqueName: \"kubernetes.io/projected/ed3a711b-2077-4680-9e4a-7e9100ed4bd9-kube-api-access-7wxsw\") pod \"dnsmasq-dns-6447ccbd8f-8sr92\" (UID: \"ed3a711b-2077-4680-9e4a-7e9100ed4bd9\") " pod="openstack/dnsmasq-dns-6447ccbd8f-8sr92" Nov 28 10:20:18 crc kubenswrapper[4838]: I1128 10:20:18.406987 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ed3a711b-2077-4680-9e4a-7e9100ed4bd9-ovsdbserver-nb\") pod \"dnsmasq-dns-6447ccbd8f-8sr92\" (UID: \"ed3a711b-2077-4680-9e4a-7e9100ed4bd9\") " pod="openstack/dnsmasq-dns-6447ccbd8f-8sr92" Nov 28 10:20:18 crc kubenswrapper[4838]: I1128 10:20:18.407003 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ed3a711b-2077-4680-9e4a-7e9100ed4bd9-config\") pod \"dnsmasq-dns-6447ccbd8f-8sr92\" (UID: \"ed3a711b-2077-4680-9e4a-7e9100ed4bd9\") " pod="openstack/dnsmasq-dns-6447ccbd8f-8sr92" Nov 28 10:20:18 crc kubenswrapper[4838]: I1128 10:20:18.407017 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ed3a711b-2077-4680-9e4a-7e9100ed4bd9-dns-svc\") pod \"dnsmasq-dns-6447ccbd8f-8sr92\" (UID: \"ed3a711b-2077-4680-9e4a-7e9100ed4bd9\") " pod="openstack/dnsmasq-dns-6447ccbd8f-8sr92" Nov 28 10:20:18 crc kubenswrapper[4838]: I1128 10:20:18.408032 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/ed3a711b-2077-4680-9e4a-7e9100ed4bd9-openstack-edpm-ipam\") pod \"dnsmasq-dns-6447ccbd8f-8sr92\" (UID: \"ed3a711b-2077-4680-9e4a-7e9100ed4bd9\") " pod="openstack/dnsmasq-dns-6447ccbd8f-8sr92" Nov 28 10:20:18 crc kubenswrapper[4838]: I1128 10:20:18.408618 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ed3a711b-2077-4680-9e4a-7e9100ed4bd9-ovsdbserver-nb\") pod \"dnsmasq-dns-6447ccbd8f-8sr92\" (UID: \"ed3a711b-2077-4680-9e4a-7e9100ed4bd9\") " pod="openstack/dnsmasq-dns-6447ccbd8f-8sr92" Nov 28 10:20:18 crc kubenswrapper[4838]: I1128 10:20:18.409028 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ed3a711b-2077-4680-9e4a-7e9100ed4bd9-ovsdbserver-sb\") pod \"dnsmasq-dns-6447ccbd8f-8sr92\" (UID: \"ed3a711b-2077-4680-9e4a-7e9100ed4bd9\") " pod="openstack/dnsmasq-dns-6447ccbd8f-8sr92" Nov 28 10:20:18 crc kubenswrapper[4838]: I1128 10:20:18.409113 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ed3a711b-2077-4680-9e4a-7e9100ed4bd9-dns-svc\") pod \"dnsmasq-dns-6447ccbd8f-8sr92\" (UID: \"ed3a711b-2077-4680-9e4a-7e9100ed4bd9\") " pod="openstack/dnsmasq-dns-6447ccbd8f-8sr92" Nov 28 10:20:18 crc kubenswrapper[4838]: I1128 10:20:18.409913 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ed3a711b-2077-4680-9e4a-7e9100ed4bd9-config\") pod \"dnsmasq-dns-6447ccbd8f-8sr92\" (UID: \"ed3a711b-2077-4680-9e4a-7e9100ed4bd9\") " pod="openstack/dnsmasq-dns-6447ccbd8f-8sr92" Nov 28 10:20:18 crc kubenswrapper[4838]: I1128 10:20:18.436257 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7wxsw\" (UniqueName: \"kubernetes.io/projected/ed3a711b-2077-4680-9e4a-7e9100ed4bd9-kube-api-access-7wxsw\") pod \"dnsmasq-dns-6447ccbd8f-8sr92\" (UID: \"ed3a711b-2077-4680-9e4a-7e9100ed4bd9\") " pod="openstack/dnsmasq-dns-6447ccbd8f-8sr92" Nov 28 10:20:18 crc kubenswrapper[4838]: I1128 10:20:18.449964 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6447ccbd8f-8sr92" Nov 28 10:20:18 crc kubenswrapper[4838]: W1128 10:20:18.808800 4838 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poded3a711b_2077_4680_9e4a_7e9100ed4bd9.slice/crio-61b9dedf1765d989ef8c58d3d6c01d55a3f5b6fec5d3e5a2a8721512c2780d80 WatchSource:0}: Error finding container 61b9dedf1765d989ef8c58d3d6c01d55a3f5b6fec5d3e5a2a8721512c2780d80: Status 404 returned error can't find the container with id 61b9dedf1765d989ef8c58d3d6c01d55a3f5b6fec5d3e5a2a8721512c2780d80 Nov 28 10:20:18 crc kubenswrapper[4838]: I1128 10:20:18.814069 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6447ccbd8f-8sr92"] Nov 28 10:20:19 crc kubenswrapper[4838]: I1128 10:20:19.278193 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6447ccbd8f-8sr92" event={"ID":"ed3a711b-2077-4680-9e4a-7e9100ed4bd9","Type":"ContainerStarted","Data":"61b9dedf1765d989ef8c58d3d6c01d55a3f5b6fec5d3e5a2a8721512c2780d80"} Nov 28 10:20:19 crc kubenswrapper[4838]: I1128 10:20:19.280334 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"091769fb-bf67-454a-b0da-3e33589799f9","Type":"ContainerStarted","Data":"2872d73efb485ffc8d941ee6987506dbdb8ad26811667d69461a20762ee7ca2a"} Nov 28 10:20:20 crc kubenswrapper[4838]: I1128 10:20:20.294282 4838 generic.go:334] "Generic (PLEG): container finished" podID="ed3a711b-2077-4680-9e4a-7e9100ed4bd9" containerID="658bc2e96f46f9c978fedbabf11e046a68e79be2c74c5432eac7657449128e4f" exitCode=0 Nov 28 10:20:20 crc kubenswrapper[4838]: I1128 10:20:20.294426 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6447ccbd8f-8sr92" event={"ID":"ed3a711b-2077-4680-9e4a-7e9100ed4bd9","Type":"ContainerDied","Data":"658bc2e96f46f9c978fedbabf11e046a68e79be2c74c5432eac7657449128e4f"} Nov 28 10:20:21 crc kubenswrapper[4838]: I1128 10:20:21.308145 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6447ccbd8f-8sr92" event={"ID":"ed3a711b-2077-4680-9e4a-7e9100ed4bd9","Type":"ContainerStarted","Data":"ae8b495b45c579b904040c9f3fffcafbbed7dd2400fbcff588c207593b46f56c"} Nov 28 10:20:21 crc kubenswrapper[4838]: I1128 10:20:21.308526 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-6447ccbd8f-8sr92" Nov 28 10:20:21 crc kubenswrapper[4838]: I1128 10:20:21.348362 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-6447ccbd8f-8sr92" podStartSLOduration=3.348335677 podStartE2EDuration="3.348335677s" podCreationTimestamp="2025-11-28 10:20:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 10:20:21.340421258 +0000 UTC m=+1393.039395498" watchObservedRunningTime="2025-11-28 10:20:21.348335677 +0000 UTC m=+1393.047309877" Nov 28 10:20:26 crc kubenswrapper[4838]: I1128 10:20:26.223809 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-rnlcp"] Nov 28 10:20:26 crc kubenswrapper[4838]: I1128 10:20:26.228349 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-rnlcp" Nov 28 10:20:26 crc kubenswrapper[4838]: I1128 10:20:26.241445 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-rnlcp"] Nov 28 10:20:26 crc kubenswrapper[4838]: I1128 10:20:26.324232 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0b3677e0-92bc-462b-a9b2-bb0b199bd116-catalog-content\") pod \"redhat-operators-rnlcp\" (UID: \"0b3677e0-92bc-462b-a9b2-bb0b199bd116\") " pod="openshift-marketplace/redhat-operators-rnlcp" Nov 28 10:20:26 crc kubenswrapper[4838]: I1128 10:20:26.324660 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0b3677e0-92bc-462b-a9b2-bb0b199bd116-utilities\") pod \"redhat-operators-rnlcp\" (UID: \"0b3677e0-92bc-462b-a9b2-bb0b199bd116\") " pod="openshift-marketplace/redhat-operators-rnlcp" Nov 28 10:20:26 crc kubenswrapper[4838]: I1128 10:20:26.324743 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2fv48\" (UniqueName: \"kubernetes.io/projected/0b3677e0-92bc-462b-a9b2-bb0b199bd116-kube-api-access-2fv48\") pod \"redhat-operators-rnlcp\" (UID: \"0b3677e0-92bc-462b-a9b2-bb0b199bd116\") " pod="openshift-marketplace/redhat-operators-rnlcp" Nov 28 10:20:26 crc kubenswrapper[4838]: I1128 10:20:26.444242 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0b3677e0-92bc-462b-a9b2-bb0b199bd116-catalog-content\") pod \"redhat-operators-rnlcp\" (UID: \"0b3677e0-92bc-462b-a9b2-bb0b199bd116\") " pod="openshift-marketplace/redhat-operators-rnlcp" Nov 28 10:20:26 crc kubenswrapper[4838]: I1128 10:20:26.444617 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0b3677e0-92bc-462b-a9b2-bb0b199bd116-utilities\") pod \"redhat-operators-rnlcp\" (UID: \"0b3677e0-92bc-462b-a9b2-bb0b199bd116\") " pod="openshift-marketplace/redhat-operators-rnlcp" Nov 28 10:20:26 crc kubenswrapper[4838]: I1128 10:20:26.444777 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2fv48\" (UniqueName: \"kubernetes.io/projected/0b3677e0-92bc-462b-a9b2-bb0b199bd116-kube-api-access-2fv48\") pod \"redhat-operators-rnlcp\" (UID: \"0b3677e0-92bc-462b-a9b2-bb0b199bd116\") " pod="openshift-marketplace/redhat-operators-rnlcp" Nov 28 10:20:26 crc kubenswrapper[4838]: I1128 10:20:26.446494 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0b3677e0-92bc-462b-a9b2-bb0b199bd116-utilities\") pod \"redhat-operators-rnlcp\" (UID: \"0b3677e0-92bc-462b-a9b2-bb0b199bd116\") " pod="openshift-marketplace/redhat-operators-rnlcp" Nov 28 10:20:26 crc kubenswrapper[4838]: I1128 10:20:26.446465 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0b3677e0-92bc-462b-a9b2-bb0b199bd116-catalog-content\") pod \"redhat-operators-rnlcp\" (UID: \"0b3677e0-92bc-462b-a9b2-bb0b199bd116\") " pod="openshift-marketplace/redhat-operators-rnlcp" Nov 28 10:20:26 crc kubenswrapper[4838]: I1128 10:20:26.468684 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2fv48\" (UniqueName: \"kubernetes.io/projected/0b3677e0-92bc-462b-a9b2-bb0b199bd116-kube-api-access-2fv48\") pod \"redhat-operators-rnlcp\" (UID: \"0b3677e0-92bc-462b-a9b2-bb0b199bd116\") " pod="openshift-marketplace/redhat-operators-rnlcp" Nov 28 10:20:26 crc kubenswrapper[4838]: I1128 10:20:26.555888 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-rnlcp" Nov 28 10:20:27 crc kubenswrapper[4838]: I1128 10:20:27.054770 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-rnlcp"] Nov 28 10:20:27 crc kubenswrapper[4838]: I1128 10:20:27.392034 4838 generic.go:334] "Generic (PLEG): container finished" podID="0b3677e0-92bc-462b-a9b2-bb0b199bd116" containerID="ba9016084e12d24b89946c050f6dd42e636f099c657b6e2d070901544d996173" exitCode=0 Nov 28 10:20:27 crc kubenswrapper[4838]: I1128 10:20:27.392097 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rnlcp" event={"ID":"0b3677e0-92bc-462b-a9b2-bb0b199bd116","Type":"ContainerDied","Data":"ba9016084e12d24b89946c050f6dd42e636f099c657b6e2d070901544d996173"} Nov 28 10:20:27 crc kubenswrapper[4838]: I1128 10:20:27.393537 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rnlcp" event={"ID":"0b3677e0-92bc-462b-a9b2-bb0b199bd116","Type":"ContainerStarted","Data":"9c3273fa066ac8e3e58d928dea3266697f177e758b90d9d35e64d327e3bed5a0"} Nov 28 10:20:27 crc kubenswrapper[4838]: I1128 10:20:27.394367 4838 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 28 10:20:28 crc kubenswrapper[4838]: I1128 10:20:28.407495 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rnlcp" event={"ID":"0b3677e0-92bc-462b-a9b2-bb0b199bd116","Type":"ContainerStarted","Data":"ddd1c515aad6669610038a3bbc75b35d5e3c650c560330feb0b65b5b015b3555"} Nov 28 10:20:28 crc kubenswrapper[4838]: I1128 10:20:28.453003 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-6447ccbd8f-8sr92" Nov 28 10:20:28 crc kubenswrapper[4838]: I1128 10:20:28.543791 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5b856c5697-mjgmt"] Nov 28 10:20:28 crc kubenswrapper[4838]: I1128 10:20:28.544147 4838 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-5b856c5697-mjgmt" podUID="d861e633-58c3-4190-b4bf-c113fe415368" containerName="dnsmasq-dns" containerID="cri-o://1dc6b5d8ba0342ebc40a6054a39bb36a6a8085273a805c2e3e99f6f4d39ace9a" gracePeriod=10 Nov 28 10:20:28 crc kubenswrapper[4838]: I1128 10:20:28.685182 4838 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-5b856c5697-mjgmt" podUID="d861e633-58c3-4190-b4bf-c113fe415368" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.183:5353: connect: connection refused" Nov 28 10:20:28 crc kubenswrapper[4838]: I1128 10:20:28.711870 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-864d5fc68c-z59nl"] Nov 28 10:20:28 crc kubenswrapper[4838]: I1128 10:20:28.718528 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-864d5fc68c-z59nl" Nov 28 10:20:28 crc kubenswrapper[4838]: I1128 10:20:28.726820 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-864d5fc68c-z59nl"] Nov 28 10:20:28 crc kubenswrapper[4838]: I1128 10:20:28.793846 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/cb064300-abd5-4c16-ab31-cf82ff261ac8-dns-svc\") pod \"dnsmasq-dns-864d5fc68c-z59nl\" (UID: \"cb064300-abd5-4c16-ab31-cf82ff261ac8\") " pod="openstack/dnsmasq-dns-864d5fc68c-z59nl" Nov 28 10:20:28 crc kubenswrapper[4838]: I1128 10:20:28.793898 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/cb064300-abd5-4c16-ab31-cf82ff261ac8-ovsdbserver-sb\") pod \"dnsmasq-dns-864d5fc68c-z59nl\" (UID: \"cb064300-abd5-4c16-ab31-cf82ff261ac8\") " pod="openstack/dnsmasq-dns-864d5fc68c-z59nl" Nov 28 10:20:28 crc kubenswrapper[4838]: I1128 10:20:28.793982 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sn2p2\" (UniqueName: \"kubernetes.io/projected/cb064300-abd5-4c16-ab31-cf82ff261ac8-kube-api-access-sn2p2\") pod \"dnsmasq-dns-864d5fc68c-z59nl\" (UID: \"cb064300-abd5-4c16-ab31-cf82ff261ac8\") " pod="openstack/dnsmasq-dns-864d5fc68c-z59nl" Nov 28 10:20:28 crc kubenswrapper[4838]: I1128 10:20:28.794108 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/cb064300-abd5-4c16-ab31-cf82ff261ac8-openstack-edpm-ipam\") pod \"dnsmasq-dns-864d5fc68c-z59nl\" (UID: \"cb064300-abd5-4c16-ab31-cf82ff261ac8\") " pod="openstack/dnsmasq-dns-864d5fc68c-z59nl" Nov 28 10:20:28 crc kubenswrapper[4838]: I1128 10:20:28.794273 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cb064300-abd5-4c16-ab31-cf82ff261ac8-config\") pod \"dnsmasq-dns-864d5fc68c-z59nl\" (UID: \"cb064300-abd5-4c16-ab31-cf82ff261ac8\") " pod="openstack/dnsmasq-dns-864d5fc68c-z59nl" Nov 28 10:20:28 crc kubenswrapper[4838]: I1128 10:20:28.794339 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/cb064300-abd5-4c16-ab31-cf82ff261ac8-ovsdbserver-nb\") pod \"dnsmasq-dns-864d5fc68c-z59nl\" (UID: \"cb064300-abd5-4c16-ab31-cf82ff261ac8\") " pod="openstack/dnsmasq-dns-864d5fc68c-z59nl" Nov 28 10:20:28 crc kubenswrapper[4838]: I1128 10:20:28.898917 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/cb064300-abd5-4c16-ab31-cf82ff261ac8-dns-svc\") pod \"dnsmasq-dns-864d5fc68c-z59nl\" (UID: \"cb064300-abd5-4c16-ab31-cf82ff261ac8\") " pod="openstack/dnsmasq-dns-864d5fc68c-z59nl" Nov 28 10:20:28 crc kubenswrapper[4838]: I1128 10:20:28.899006 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/cb064300-abd5-4c16-ab31-cf82ff261ac8-ovsdbserver-sb\") pod \"dnsmasq-dns-864d5fc68c-z59nl\" (UID: \"cb064300-abd5-4c16-ab31-cf82ff261ac8\") " pod="openstack/dnsmasq-dns-864d5fc68c-z59nl" Nov 28 10:20:28 crc kubenswrapper[4838]: I1128 10:20:28.899092 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sn2p2\" (UniqueName: \"kubernetes.io/projected/cb064300-abd5-4c16-ab31-cf82ff261ac8-kube-api-access-sn2p2\") pod \"dnsmasq-dns-864d5fc68c-z59nl\" (UID: \"cb064300-abd5-4c16-ab31-cf82ff261ac8\") " pod="openstack/dnsmasq-dns-864d5fc68c-z59nl" Nov 28 10:20:28 crc kubenswrapper[4838]: I1128 10:20:28.899164 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/cb064300-abd5-4c16-ab31-cf82ff261ac8-openstack-edpm-ipam\") pod \"dnsmasq-dns-864d5fc68c-z59nl\" (UID: \"cb064300-abd5-4c16-ab31-cf82ff261ac8\") " pod="openstack/dnsmasq-dns-864d5fc68c-z59nl" Nov 28 10:20:28 crc kubenswrapper[4838]: I1128 10:20:28.899213 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cb064300-abd5-4c16-ab31-cf82ff261ac8-config\") pod \"dnsmasq-dns-864d5fc68c-z59nl\" (UID: \"cb064300-abd5-4c16-ab31-cf82ff261ac8\") " pod="openstack/dnsmasq-dns-864d5fc68c-z59nl" Nov 28 10:20:28 crc kubenswrapper[4838]: I1128 10:20:28.899245 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/cb064300-abd5-4c16-ab31-cf82ff261ac8-ovsdbserver-nb\") pod \"dnsmasq-dns-864d5fc68c-z59nl\" (UID: \"cb064300-abd5-4c16-ab31-cf82ff261ac8\") " pod="openstack/dnsmasq-dns-864d5fc68c-z59nl" Nov 28 10:20:28 crc kubenswrapper[4838]: I1128 10:20:28.900188 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/cb064300-abd5-4c16-ab31-cf82ff261ac8-ovsdbserver-nb\") pod \"dnsmasq-dns-864d5fc68c-z59nl\" (UID: \"cb064300-abd5-4c16-ab31-cf82ff261ac8\") " pod="openstack/dnsmasq-dns-864d5fc68c-z59nl" Nov 28 10:20:28 crc kubenswrapper[4838]: I1128 10:20:28.903571 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/cb064300-abd5-4c16-ab31-cf82ff261ac8-dns-svc\") pod \"dnsmasq-dns-864d5fc68c-z59nl\" (UID: \"cb064300-abd5-4c16-ab31-cf82ff261ac8\") " pod="openstack/dnsmasq-dns-864d5fc68c-z59nl" Nov 28 10:20:28 crc kubenswrapper[4838]: I1128 10:20:28.903855 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/cb064300-abd5-4c16-ab31-cf82ff261ac8-openstack-edpm-ipam\") pod \"dnsmasq-dns-864d5fc68c-z59nl\" (UID: \"cb064300-abd5-4c16-ab31-cf82ff261ac8\") " pod="openstack/dnsmasq-dns-864d5fc68c-z59nl" Nov 28 10:20:28 crc kubenswrapper[4838]: I1128 10:20:28.904086 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cb064300-abd5-4c16-ab31-cf82ff261ac8-config\") pod \"dnsmasq-dns-864d5fc68c-z59nl\" (UID: \"cb064300-abd5-4c16-ab31-cf82ff261ac8\") " pod="openstack/dnsmasq-dns-864d5fc68c-z59nl" Nov 28 10:20:28 crc kubenswrapper[4838]: I1128 10:20:28.905093 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/cb064300-abd5-4c16-ab31-cf82ff261ac8-ovsdbserver-sb\") pod \"dnsmasq-dns-864d5fc68c-z59nl\" (UID: \"cb064300-abd5-4c16-ab31-cf82ff261ac8\") " pod="openstack/dnsmasq-dns-864d5fc68c-z59nl" Nov 28 10:20:28 crc kubenswrapper[4838]: I1128 10:20:28.929526 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sn2p2\" (UniqueName: \"kubernetes.io/projected/cb064300-abd5-4c16-ab31-cf82ff261ac8-kube-api-access-sn2p2\") pod \"dnsmasq-dns-864d5fc68c-z59nl\" (UID: \"cb064300-abd5-4c16-ab31-cf82ff261ac8\") " pod="openstack/dnsmasq-dns-864d5fc68c-z59nl" Nov 28 10:20:29 crc kubenswrapper[4838]: I1128 10:20:29.067771 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-864d5fc68c-z59nl" Nov 28 10:20:29 crc kubenswrapper[4838]: I1128 10:20:29.144948 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5b856c5697-mjgmt" Nov 28 10:20:29 crc kubenswrapper[4838]: I1128 10:20:29.317796 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d861e633-58c3-4190-b4bf-c113fe415368-config\") pod \"d861e633-58c3-4190-b4bf-c113fe415368\" (UID: \"d861e633-58c3-4190-b4bf-c113fe415368\") " Nov 28 10:20:29 crc kubenswrapper[4838]: I1128 10:20:29.318199 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d861e633-58c3-4190-b4bf-c113fe415368-dns-svc\") pod \"d861e633-58c3-4190-b4bf-c113fe415368\" (UID: \"d861e633-58c3-4190-b4bf-c113fe415368\") " Nov 28 10:20:29 crc kubenswrapper[4838]: I1128 10:20:29.318260 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d861e633-58c3-4190-b4bf-c113fe415368-ovsdbserver-nb\") pod \"d861e633-58c3-4190-b4bf-c113fe415368\" (UID: \"d861e633-58c3-4190-b4bf-c113fe415368\") " Nov 28 10:20:29 crc kubenswrapper[4838]: I1128 10:20:29.318290 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d861e633-58c3-4190-b4bf-c113fe415368-ovsdbserver-sb\") pod \"d861e633-58c3-4190-b4bf-c113fe415368\" (UID: \"d861e633-58c3-4190-b4bf-c113fe415368\") " Nov 28 10:20:29 crc kubenswrapper[4838]: I1128 10:20:29.318357 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pfm6l\" (UniqueName: \"kubernetes.io/projected/d861e633-58c3-4190-b4bf-c113fe415368-kube-api-access-pfm6l\") pod \"d861e633-58c3-4190-b4bf-c113fe415368\" (UID: \"d861e633-58c3-4190-b4bf-c113fe415368\") " Nov 28 10:20:29 crc kubenswrapper[4838]: I1128 10:20:29.323074 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d861e633-58c3-4190-b4bf-c113fe415368-kube-api-access-pfm6l" (OuterVolumeSpecName: "kube-api-access-pfm6l") pod "d861e633-58c3-4190-b4bf-c113fe415368" (UID: "d861e633-58c3-4190-b4bf-c113fe415368"). InnerVolumeSpecName "kube-api-access-pfm6l". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:20:29 crc kubenswrapper[4838]: I1128 10:20:29.373422 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d861e633-58c3-4190-b4bf-c113fe415368-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "d861e633-58c3-4190-b4bf-c113fe415368" (UID: "d861e633-58c3-4190-b4bf-c113fe415368"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 10:20:29 crc kubenswrapper[4838]: I1128 10:20:29.389045 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d861e633-58c3-4190-b4bf-c113fe415368-config" (OuterVolumeSpecName: "config") pod "d861e633-58c3-4190-b4bf-c113fe415368" (UID: "d861e633-58c3-4190-b4bf-c113fe415368"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 10:20:29 crc kubenswrapper[4838]: I1128 10:20:29.391334 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d861e633-58c3-4190-b4bf-c113fe415368-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "d861e633-58c3-4190-b4bf-c113fe415368" (UID: "d861e633-58c3-4190-b4bf-c113fe415368"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 10:20:29 crc kubenswrapper[4838]: I1128 10:20:29.402309 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d861e633-58c3-4190-b4bf-c113fe415368-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "d861e633-58c3-4190-b4bf-c113fe415368" (UID: "d861e633-58c3-4190-b4bf-c113fe415368"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 10:20:29 crc kubenswrapper[4838]: I1128 10:20:29.421955 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pfm6l\" (UniqueName: \"kubernetes.io/projected/d861e633-58c3-4190-b4bf-c113fe415368-kube-api-access-pfm6l\") on node \"crc\" DevicePath \"\"" Nov 28 10:20:29 crc kubenswrapper[4838]: I1128 10:20:29.421980 4838 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d861e633-58c3-4190-b4bf-c113fe415368-config\") on node \"crc\" DevicePath \"\"" Nov 28 10:20:29 crc kubenswrapper[4838]: I1128 10:20:29.421990 4838 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d861e633-58c3-4190-b4bf-c113fe415368-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 10:20:29 crc kubenswrapper[4838]: I1128 10:20:29.422001 4838 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d861e633-58c3-4190-b4bf-c113fe415368-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 28 10:20:29 crc kubenswrapper[4838]: I1128 10:20:29.422009 4838 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d861e633-58c3-4190-b4bf-c113fe415368-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 28 10:20:29 crc kubenswrapper[4838]: I1128 10:20:29.422211 4838 generic.go:334] "Generic (PLEG): container finished" podID="d861e633-58c3-4190-b4bf-c113fe415368" containerID="1dc6b5d8ba0342ebc40a6054a39bb36a6a8085273a805c2e3e99f6f4d39ace9a" exitCode=0 Nov 28 10:20:29 crc kubenswrapper[4838]: I1128 10:20:29.422275 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5b856c5697-mjgmt" event={"ID":"d861e633-58c3-4190-b4bf-c113fe415368","Type":"ContainerDied","Data":"1dc6b5d8ba0342ebc40a6054a39bb36a6a8085273a805c2e3e99f6f4d39ace9a"} Nov 28 10:20:29 crc kubenswrapper[4838]: I1128 10:20:29.422293 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5b856c5697-mjgmt" Nov 28 10:20:29 crc kubenswrapper[4838]: I1128 10:20:29.422309 4838 scope.go:117] "RemoveContainer" containerID="1dc6b5d8ba0342ebc40a6054a39bb36a6a8085273a805c2e3e99f6f4d39ace9a" Nov 28 10:20:29 crc kubenswrapper[4838]: I1128 10:20:29.422299 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5b856c5697-mjgmt" event={"ID":"d861e633-58c3-4190-b4bf-c113fe415368","Type":"ContainerDied","Data":"e0a84c13b189d18971b400bda7cd3d54d2e7b411085a22568684fea3e9aef536"} Nov 28 10:20:29 crc kubenswrapper[4838]: I1128 10:20:29.432451 4838 generic.go:334] "Generic (PLEG): container finished" podID="0b3677e0-92bc-462b-a9b2-bb0b199bd116" containerID="ddd1c515aad6669610038a3bbc75b35d5e3c650c560330feb0b65b5b015b3555" exitCode=0 Nov 28 10:20:29 crc kubenswrapper[4838]: I1128 10:20:29.432485 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rnlcp" event={"ID":"0b3677e0-92bc-462b-a9b2-bb0b199bd116","Type":"ContainerDied","Data":"ddd1c515aad6669610038a3bbc75b35d5e3c650c560330feb0b65b5b015b3555"} Nov 28 10:20:29 crc kubenswrapper[4838]: I1128 10:20:29.477888 4838 scope.go:117] "RemoveContainer" containerID="856d7fb9a5ac41586e57db7f605cd0bfdc2f48f6c0c5e7fa6b0efd53ff6c9a5c" Nov 28 10:20:29 crc kubenswrapper[4838]: I1128 10:20:29.480751 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5b856c5697-mjgmt"] Nov 28 10:20:29 crc kubenswrapper[4838]: I1128 10:20:29.511500 4838 scope.go:117] "RemoveContainer" containerID="1dc6b5d8ba0342ebc40a6054a39bb36a6a8085273a805c2e3e99f6f4d39ace9a" Nov 28 10:20:29 crc kubenswrapper[4838]: E1128 10:20:29.512374 4838 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1dc6b5d8ba0342ebc40a6054a39bb36a6a8085273a805c2e3e99f6f4d39ace9a\": container with ID starting with 1dc6b5d8ba0342ebc40a6054a39bb36a6a8085273a805c2e3e99f6f4d39ace9a not found: ID does not exist" containerID="1dc6b5d8ba0342ebc40a6054a39bb36a6a8085273a805c2e3e99f6f4d39ace9a" Nov 28 10:20:29 crc kubenswrapper[4838]: I1128 10:20:29.512433 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1dc6b5d8ba0342ebc40a6054a39bb36a6a8085273a805c2e3e99f6f4d39ace9a"} err="failed to get container status \"1dc6b5d8ba0342ebc40a6054a39bb36a6a8085273a805c2e3e99f6f4d39ace9a\": rpc error: code = NotFound desc = could not find container \"1dc6b5d8ba0342ebc40a6054a39bb36a6a8085273a805c2e3e99f6f4d39ace9a\": container with ID starting with 1dc6b5d8ba0342ebc40a6054a39bb36a6a8085273a805c2e3e99f6f4d39ace9a not found: ID does not exist" Nov 28 10:20:29 crc kubenswrapper[4838]: I1128 10:20:29.512461 4838 scope.go:117] "RemoveContainer" containerID="856d7fb9a5ac41586e57db7f605cd0bfdc2f48f6c0c5e7fa6b0efd53ff6c9a5c" Nov 28 10:20:29 crc kubenswrapper[4838]: E1128 10:20:29.512762 4838 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"856d7fb9a5ac41586e57db7f605cd0bfdc2f48f6c0c5e7fa6b0efd53ff6c9a5c\": container with ID starting with 856d7fb9a5ac41586e57db7f605cd0bfdc2f48f6c0c5e7fa6b0efd53ff6c9a5c not found: ID does not exist" containerID="856d7fb9a5ac41586e57db7f605cd0bfdc2f48f6c0c5e7fa6b0efd53ff6c9a5c" Nov 28 10:20:29 crc kubenswrapper[4838]: I1128 10:20:29.512787 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"856d7fb9a5ac41586e57db7f605cd0bfdc2f48f6c0c5e7fa6b0efd53ff6c9a5c"} err="failed to get container status \"856d7fb9a5ac41586e57db7f605cd0bfdc2f48f6c0c5e7fa6b0efd53ff6c9a5c\": rpc error: code = NotFound desc = could not find container \"856d7fb9a5ac41586e57db7f605cd0bfdc2f48f6c0c5e7fa6b0efd53ff6c9a5c\": container with ID starting with 856d7fb9a5ac41586e57db7f605cd0bfdc2f48f6c0c5e7fa6b0efd53ff6c9a5c not found: ID does not exist" Nov 28 10:20:29 crc kubenswrapper[4838]: I1128 10:20:29.515546 4838 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5b856c5697-mjgmt"] Nov 28 10:20:29 crc kubenswrapper[4838]: I1128 10:20:29.585386 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-864d5fc68c-z59nl"] Nov 28 10:20:29 crc kubenswrapper[4838]: W1128 10:20:29.595128 4838 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podcb064300_abd5_4c16_ab31_cf82ff261ac8.slice/crio-42c78ec8530fc50c1b56d2d15be290ca2b82f5a7e0408e19087fe663fdfa3c84 WatchSource:0}: Error finding container 42c78ec8530fc50c1b56d2d15be290ca2b82f5a7e0408e19087fe663fdfa3c84: Status 404 returned error can't find the container with id 42c78ec8530fc50c1b56d2d15be290ca2b82f5a7e0408e19087fe663fdfa3c84 Nov 28 10:20:30 crc kubenswrapper[4838]: I1128 10:20:30.445075 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rnlcp" event={"ID":"0b3677e0-92bc-462b-a9b2-bb0b199bd116","Type":"ContainerStarted","Data":"eb6a6b8e9af301377c09571c6367c5a730d10a8ba67996f9feb8cc7146a47cd6"} Nov 28 10:20:30 crc kubenswrapper[4838]: I1128 10:20:30.447225 4838 generic.go:334] "Generic (PLEG): container finished" podID="cb064300-abd5-4c16-ab31-cf82ff261ac8" containerID="da5267b9bb23fd94888d48e99b1c570ed3913790b5209a85ad4cfafc3c2b287d" exitCode=0 Nov 28 10:20:30 crc kubenswrapper[4838]: I1128 10:20:30.447337 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-864d5fc68c-z59nl" event={"ID":"cb064300-abd5-4c16-ab31-cf82ff261ac8","Type":"ContainerDied","Data":"da5267b9bb23fd94888d48e99b1c570ed3913790b5209a85ad4cfafc3c2b287d"} Nov 28 10:20:30 crc kubenswrapper[4838]: I1128 10:20:30.447382 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-864d5fc68c-z59nl" event={"ID":"cb064300-abd5-4c16-ab31-cf82ff261ac8","Type":"ContainerStarted","Data":"42c78ec8530fc50c1b56d2d15be290ca2b82f5a7e0408e19087fe663fdfa3c84"} Nov 28 10:20:30 crc kubenswrapper[4838]: I1128 10:20:30.474256 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-rnlcp" podStartSLOduration=1.975170008 podStartE2EDuration="4.474237832s" podCreationTimestamp="2025-11-28 10:20:26 +0000 UTC" firstStartedPulling="2025-11-28 10:20:27.394083724 +0000 UTC m=+1399.093057904" lastFinishedPulling="2025-11-28 10:20:29.893151558 +0000 UTC m=+1401.592125728" observedRunningTime="2025-11-28 10:20:30.472135093 +0000 UTC m=+1402.171109273" watchObservedRunningTime="2025-11-28 10:20:30.474237832 +0000 UTC m=+1402.173212002" Nov 28 10:20:30 crc kubenswrapper[4838]: I1128 10:20:30.572628 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d861e633-58c3-4190-b4bf-c113fe415368" path="/var/lib/kubelet/pods/d861e633-58c3-4190-b4bf-c113fe415368/volumes" Nov 28 10:20:31 crc kubenswrapper[4838]: I1128 10:20:31.465905 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-864d5fc68c-z59nl" event={"ID":"cb064300-abd5-4c16-ab31-cf82ff261ac8","Type":"ContainerStarted","Data":"96050f858f233fd7f06c24d049ac366b4bc0d4bec0dd8b61fec920eb960e234a"} Nov 28 10:20:31 crc kubenswrapper[4838]: I1128 10:20:31.466364 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-864d5fc68c-z59nl" Nov 28 10:20:31 crc kubenswrapper[4838]: I1128 10:20:31.496593 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-864d5fc68c-z59nl" podStartSLOduration=3.496563048 podStartE2EDuration="3.496563048s" podCreationTimestamp="2025-11-28 10:20:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 10:20:31.484318399 +0000 UTC m=+1403.183292569" watchObservedRunningTime="2025-11-28 10:20:31.496563048 +0000 UTC m=+1403.195537258" Nov 28 10:20:36 crc kubenswrapper[4838]: I1128 10:20:36.556562 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-rnlcp" Nov 28 10:20:36 crc kubenswrapper[4838]: I1128 10:20:36.557233 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-rnlcp" Nov 28 10:20:37 crc kubenswrapper[4838]: I1128 10:20:37.631660 4838 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-rnlcp" podUID="0b3677e0-92bc-462b-a9b2-bb0b199bd116" containerName="registry-server" probeResult="failure" output=< Nov 28 10:20:37 crc kubenswrapper[4838]: timeout: failed to connect service ":50051" within 1s Nov 28 10:20:37 crc kubenswrapper[4838]: > Nov 28 10:20:39 crc kubenswrapper[4838]: I1128 10:20:39.070004 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-864d5fc68c-z59nl" Nov 28 10:20:39 crc kubenswrapper[4838]: I1128 10:20:39.179794 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6447ccbd8f-8sr92"] Nov 28 10:20:39 crc kubenswrapper[4838]: I1128 10:20:39.180104 4838 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-6447ccbd8f-8sr92" podUID="ed3a711b-2077-4680-9e4a-7e9100ed4bd9" containerName="dnsmasq-dns" containerID="cri-o://ae8b495b45c579b904040c9f3fffcafbbed7dd2400fbcff588c207593b46f56c" gracePeriod=10 Nov 28 10:20:39 crc kubenswrapper[4838]: I1128 10:20:39.574043 4838 generic.go:334] "Generic (PLEG): container finished" podID="ed3a711b-2077-4680-9e4a-7e9100ed4bd9" containerID="ae8b495b45c579b904040c9f3fffcafbbed7dd2400fbcff588c207593b46f56c" exitCode=0 Nov 28 10:20:39 crc kubenswrapper[4838]: I1128 10:20:39.574093 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6447ccbd8f-8sr92" event={"ID":"ed3a711b-2077-4680-9e4a-7e9100ed4bd9","Type":"ContainerDied","Data":"ae8b495b45c579b904040c9f3fffcafbbed7dd2400fbcff588c207593b46f56c"} Nov 28 10:20:40 crc kubenswrapper[4838]: I1128 10:20:40.230259 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6447ccbd8f-8sr92" Nov 28 10:20:40 crc kubenswrapper[4838]: I1128 10:20:40.383031 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ed3a711b-2077-4680-9e4a-7e9100ed4bd9-ovsdbserver-sb\") pod \"ed3a711b-2077-4680-9e4a-7e9100ed4bd9\" (UID: \"ed3a711b-2077-4680-9e4a-7e9100ed4bd9\") " Nov 28 10:20:40 crc kubenswrapper[4838]: I1128 10:20:40.383395 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ed3a711b-2077-4680-9e4a-7e9100ed4bd9-ovsdbserver-nb\") pod \"ed3a711b-2077-4680-9e4a-7e9100ed4bd9\" (UID: \"ed3a711b-2077-4680-9e4a-7e9100ed4bd9\") " Nov 28 10:20:40 crc kubenswrapper[4838]: I1128 10:20:40.383583 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ed3a711b-2077-4680-9e4a-7e9100ed4bd9-config\") pod \"ed3a711b-2077-4680-9e4a-7e9100ed4bd9\" (UID: \"ed3a711b-2077-4680-9e4a-7e9100ed4bd9\") " Nov 28 10:20:40 crc kubenswrapper[4838]: I1128 10:20:40.383936 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7wxsw\" (UniqueName: \"kubernetes.io/projected/ed3a711b-2077-4680-9e4a-7e9100ed4bd9-kube-api-access-7wxsw\") pod \"ed3a711b-2077-4680-9e4a-7e9100ed4bd9\" (UID: \"ed3a711b-2077-4680-9e4a-7e9100ed4bd9\") " Nov 28 10:20:40 crc kubenswrapper[4838]: I1128 10:20:40.384214 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/ed3a711b-2077-4680-9e4a-7e9100ed4bd9-openstack-edpm-ipam\") pod \"ed3a711b-2077-4680-9e4a-7e9100ed4bd9\" (UID: \"ed3a711b-2077-4680-9e4a-7e9100ed4bd9\") " Nov 28 10:20:40 crc kubenswrapper[4838]: I1128 10:20:40.384371 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ed3a711b-2077-4680-9e4a-7e9100ed4bd9-dns-svc\") pod \"ed3a711b-2077-4680-9e4a-7e9100ed4bd9\" (UID: \"ed3a711b-2077-4680-9e4a-7e9100ed4bd9\") " Nov 28 10:20:40 crc kubenswrapper[4838]: I1128 10:20:40.390204 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ed3a711b-2077-4680-9e4a-7e9100ed4bd9-kube-api-access-7wxsw" (OuterVolumeSpecName: "kube-api-access-7wxsw") pod "ed3a711b-2077-4680-9e4a-7e9100ed4bd9" (UID: "ed3a711b-2077-4680-9e4a-7e9100ed4bd9"). InnerVolumeSpecName "kube-api-access-7wxsw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:20:40 crc kubenswrapper[4838]: I1128 10:20:40.440461 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ed3a711b-2077-4680-9e4a-7e9100ed4bd9-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "ed3a711b-2077-4680-9e4a-7e9100ed4bd9" (UID: "ed3a711b-2077-4680-9e4a-7e9100ed4bd9"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 10:20:40 crc kubenswrapper[4838]: I1128 10:20:40.452804 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ed3a711b-2077-4680-9e4a-7e9100ed4bd9-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "ed3a711b-2077-4680-9e4a-7e9100ed4bd9" (UID: "ed3a711b-2077-4680-9e4a-7e9100ed4bd9"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 10:20:40 crc kubenswrapper[4838]: I1128 10:20:40.455504 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ed3a711b-2077-4680-9e4a-7e9100ed4bd9-config" (OuterVolumeSpecName: "config") pod "ed3a711b-2077-4680-9e4a-7e9100ed4bd9" (UID: "ed3a711b-2077-4680-9e4a-7e9100ed4bd9"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 10:20:40 crc kubenswrapper[4838]: I1128 10:20:40.471685 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ed3a711b-2077-4680-9e4a-7e9100ed4bd9-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "ed3a711b-2077-4680-9e4a-7e9100ed4bd9" (UID: "ed3a711b-2077-4680-9e4a-7e9100ed4bd9"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 10:20:40 crc kubenswrapper[4838]: I1128 10:20:40.483742 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ed3a711b-2077-4680-9e4a-7e9100ed4bd9-openstack-edpm-ipam" (OuterVolumeSpecName: "openstack-edpm-ipam") pod "ed3a711b-2077-4680-9e4a-7e9100ed4bd9" (UID: "ed3a711b-2077-4680-9e4a-7e9100ed4bd9"). InnerVolumeSpecName "openstack-edpm-ipam". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 10:20:40 crc kubenswrapper[4838]: I1128 10:20:40.487281 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7wxsw\" (UniqueName: \"kubernetes.io/projected/ed3a711b-2077-4680-9e4a-7e9100ed4bd9-kube-api-access-7wxsw\") on node \"crc\" DevicePath \"\"" Nov 28 10:20:40 crc kubenswrapper[4838]: I1128 10:20:40.487318 4838 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/ed3a711b-2077-4680-9e4a-7e9100ed4bd9-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Nov 28 10:20:40 crc kubenswrapper[4838]: I1128 10:20:40.487330 4838 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ed3a711b-2077-4680-9e4a-7e9100ed4bd9-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 10:20:40 crc kubenswrapper[4838]: I1128 10:20:40.487342 4838 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ed3a711b-2077-4680-9e4a-7e9100ed4bd9-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 28 10:20:40 crc kubenswrapper[4838]: I1128 10:20:40.487350 4838 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ed3a711b-2077-4680-9e4a-7e9100ed4bd9-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 28 10:20:40 crc kubenswrapper[4838]: I1128 10:20:40.487358 4838 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ed3a711b-2077-4680-9e4a-7e9100ed4bd9-config\") on node \"crc\" DevicePath \"\"" Nov 28 10:20:40 crc kubenswrapper[4838]: I1128 10:20:40.587983 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6447ccbd8f-8sr92" Nov 28 10:20:40 crc kubenswrapper[4838]: I1128 10:20:40.588413 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6447ccbd8f-8sr92" event={"ID":"ed3a711b-2077-4680-9e4a-7e9100ed4bd9","Type":"ContainerDied","Data":"61b9dedf1765d989ef8c58d3d6c01d55a3f5b6fec5d3e5a2a8721512c2780d80"} Nov 28 10:20:40 crc kubenswrapper[4838]: I1128 10:20:40.588497 4838 scope.go:117] "RemoveContainer" containerID="ae8b495b45c579b904040c9f3fffcafbbed7dd2400fbcff588c207593b46f56c" Nov 28 10:20:40 crc kubenswrapper[4838]: I1128 10:20:40.629145 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6447ccbd8f-8sr92"] Nov 28 10:20:40 crc kubenswrapper[4838]: I1128 10:20:40.636438 4838 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6447ccbd8f-8sr92"] Nov 28 10:20:40 crc kubenswrapper[4838]: I1128 10:20:40.643848 4838 scope.go:117] "RemoveContainer" containerID="658bc2e96f46f9c978fedbabf11e046a68e79be2c74c5432eac7657449128e4f" Nov 28 10:20:42 crc kubenswrapper[4838]: I1128 10:20:42.581893 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ed3a711b-2077-4680-9e4a-7e9100ed4bd9" path="/var/lib/kubelet/pods/ed3a711b-2077-4680-9e4a-7e9100ed4bd9/volumes" Nov 28 10:20:46 crc kubenswrapper[4838]: I1128 10:20:46.641955 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-rnlcp" Nov 28 10:20:46 crc kubenswrapper[4838]: I1128 10:20:46.732607 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-rnlcp" Nov 28 10:20:46 crc kubenswrapper[4838]: I1128 10:20:46.897219 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-rnlcp"] Nov 28 10:20:47 crc kubenswrapper[4838]: I1128 10:20:47.671026 4838 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-rnlcp" podUID="0b3677e0-92bc-462b-a9b2-bb0b199bd116" containerName="registry-server" containerID="cri-o://eb6a6b8e9af301377c09571c6367c5a730d10a8ba67996f9feb8cc7146a47cd6" gracePeriod=2 Nov 28 10:20:48 crc kubenswrapper[4838]: I1128 10:20:48.263491 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-rnlcp" Nov 28 10:20:48 crc kubenswrapper[4838]: I1128 10:20:48.464055 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0b3677e0-92bc-462b-a9b2-bb0b199bd116-utilities\") pod \"0b3677e0-92bc-462b-a9b2-bb0b199bd116\" (UID: \"0b3677e0-92bc-462b-a9b2-bb0b199bd116\") " Nov 28 10:20:48 crc kubenswrapper[4838]: I1128 10:20:48.464519 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2fv48\" (UniqueName: \"kubernetes.io/projected/0b3677e0-92bc-462b-a9b2-bb0b199bd116-kube-api-access-2fv48\") pod \"0b3677e0-92bc-462b-a9b2-bb0b199bd116\" (UID: \"0b3677e0-92bc-462b-a9b2-bb0b199bd116\") " Nov 28 10:20:48 crc kubenswrapper[4838]: I1128 10:20:48.464578 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0b3677e0-92bc-462b-a9b2-bb0b199bd116-catalog-content\") pod \"0b3677e0-92bc-462b-a9b2-bb0b199bd116\" (UID: \"0b3677e0-92bc-462b-a9b2-bb0b199bd116\") " Nov 28 10:20:48 crc kubenswrapper[4838]: I1128 10:20:48.466332 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0b3677e0-92bc-462b-a9b2-bb0b199bd116-utilities" (OuterVolumeSpecName: "utilities") pod "0b3677e0-92bc-462b-a9b2-bb0b199bd116" (UID: "0b3677e0-92bc-462b-a9b2-bb0b199bd116"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 10:20:48 crc kubenswrapper[4838]: I1128 10:20:48.478888 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b3677e0-92bc-462b-a9b2-bb0b199bd116-kube-api-access-2fv48" (OuterVolumeSpecName: "kube-api-access-2fv48") pod "0b3677e0-92bc-462b-a9b2-bb0b199bd116" (UID: "0b3677e0-92bc-462b-a9b2-bb0b199bd116"). InnerVolumeSpecName "kube-api-access-2fv48". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:20:48 crc kubenswrapper[4838]: I1128 10:20:48.567864 4838 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0b3677e0-92bc-462b-a9b2-bb0b199bd116-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 10:20:48 crc kubenswrapper[4838]: I1128 10:20:48.567915 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2fv48\" (UniqueName: \"kubernetes.io/projected/0b3677e0-92bc-462b-a9b2-bb0b199bd116-kube-api-access-2fv48\") on node \"crc\" DevicePath \"\"" Nov 28 10:20:48 crc kubenswrapper[4838]: I1128 10:20:48.648674 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0b3677e0-92bc-462b-a9b2-bb0b199bd116-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "0b3677e0-92bc-462b-a9b2-bb0b199bd116" (UID: "0b3677e0-92bc-462b-a9b2-bb0b199bd116"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 10:20:48 crc kubenswrapper[4838]: I1128 10:20:48.670647 4838 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0b3677e0-92bc-462b-a9b2-bb0b199bd116-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 10:20:48 crc kubenswrapper[4838]: I1128 10:20:48.686915 4838 generic.go:334] "Generic (PLEG): container finished" podID="0b3677e0-92bc-462b-a9b2-bb0b199bd116" containerID="eb6a6b8e9af301377c09571c6367c5a730d10a8ba67996f9feb8cc7146a47cd6" exitCode=0 Nov 28 10:20:48 crc kubenswrapper[4838]: I1128 10:20:48.686968 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rnlcp" event={"ID":"0b3677e0-92bc-462b-a9b2-bb0b199bd116","Type":"ContainerDied","Data":"eb6a6b8e9af301377c09571c6367c5a730d10a8ba67996f9feb8cc7146a47cd6"} Nov 28 10:20:48 crc kubenswrapper[4838]: I1128 10:20:48.687000 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rnlcp" event={"ID":"0b3677e0-92bc-462b-a9b2-bb0b199bd116","Type":"ContainerDied","Data":"9c3273fa066ac8e3e58d928dea3266697f177e758b90d9d35e64d327e3bed5a0"} Nov 28 10:20:48 crc kubenswrapper[4838]: I1128 10:20:48.687024 4838 scope.go:117] "RemoveContainer" containerID="eb6a6b8e9af301377c09571c6367c5a730d10a8ba67996f9feb8cc7146a47cd6" Nov 28 10:20:48 crc kubenswrapper[4838]: I1128 10:20:48.687217 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-rnlcp" Nov 28 10:20:48 crc kubenswrapper[4838]: I1128 10:20:48.742650 4838 scope.go:117] "RemoveContainer" containerID="ddd1c515aad6669610038a3bbc75b35d5e3c650c560330feb0b65b5b015b3555" Nov 28 10:20:48 crc kubenswrapper[4838]: I1128 10:20:48.758263 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-rnlcp"] Nov 28 10:20:48 crc kubenswrapper[4838]: I1128 10:20:48.771249 4838 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-rnlcp"] Nov 28 10:20:48 crc kubenswrapper[4838]: I1128 10:20:48.786943 4838 scope.go:117] "RemoveContainer" containerID="ba9016084e12d24b89946c050f6dd42e636f099c657b6e2d070901544d996173" Nov 28 10:20:48 crc kubenswrapper[4838]: I1128 10:20:48.835399 4838 scope.go:117] "RemoveContainer" containerID="eb6a6b8e9af301377c09571c6367c5a730d10a8ba67996f9feb8cc7146a47cd6" Nov 28 10:20:48 crc kubenswrapper[4838]: E1128 10:20:48.836080 4838 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"eb6a6b8e9af301377c09571c6367c5a730d10a8ba67996f9feb8cc7146a47cd6\": container with ID starting with eb6a6b8e9af301377c09571c6367c5a730d10a8ba67996f9feb8cc7146a47cd6 not found: ID does not exist" containerID="eb6a6b8e9af301377c09571c6367c5a730d10a8ba67996f9feb8cc7146a47cd6" Nov 28 10:20:48 crc kubenswrapper[4838]: I1128 10:20:48.836129 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"eb6a6b8e9af301377c09571c6367c5a730d10a8ba67996f9feb8cc7146a47cd6"} err="failed to get container status \"eb6a6b8e9af301377c09571c6367c5a730d10a8ba67996f9feb8cc7146a47cd6\": rpc error: code = NotFound desc = could not find container \"eb6a6b8e9af301377c09571c6367c5a730d10a8ba67996f9feb8cc7146a47cd6\": container with ID starting with eb6a6b8e9af301377c09571c6367c5a730d10a8ba67996f9feb8cc7146a47cd6 not found: ID does not exist" Nov 28 10:20:48 crc kubenswrapper[4838]: I1128 10:20:48.836161 4838 scope.go:117] "RemoveContainer" containerID="ddd1c515aad6669610038a3bbc75b35d5e3c650c560330feb0b65b5b015b3555" Nov 28 10:20:48 crc kubenswrapper[4838]: E1128 10:20:48.836937 4838 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ddd1c515aad6669610038a3bbc75b35d5e3c650c560330feb0b65b5b015b3555\": container with ID starting with ddd1c515aad6669610038a3bbc75b35d5e3c650c560330feb0b65b5b015b3555 not found: ID does not exist" containerID="ddd1c515aad6669610038a3bbc75b35d5e3c650c560330feb0b65b5b015b3555" Nov 28 10:20:48 crc kubenswrapper[4838]: I1128 10:20:48.836993 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ddd1c515aad6669610038a3bbc75b35d5e3c650c560330feb0b65b5b015b3555"} err="failed to get container status \"ddd1c515aad6669610038a3bbc75b35d5e3c650c560330feb0b65b5b015b3555\": rpc error: code = NotFound desc = could not find container \"ddd1c515aad6669610038a3bbc75b35d5e3c650c560330feb0b65b5b015b3555\": container with ID starting with ddd1c515aad6669610038a3bbc75b35d5e3c650c560330feb0b65b5b015b3555 not found: ID does not exist" Nov 28 10:20:48 crc kubenswrapper[4838]: I1128 10:20:48.837032 4838 scope.go:117] "RemoveContainer" containerID="ba9016084e12d24b89946c050f6dd42e636f099c657b6e2d070901544d996173" Nov 28 10:20:48 crc kubenswrapper[4838]: E1128 10:20:48.837371 4838 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ba9016084e12d24b89946c050f6dd42e636f099c657b6e2d070901544d996173\": container with ID starting with ba9016084e12d24b89946c050f6dd42e636f099c657b6e2d070901544d996173 not found: ID does not exist" containerID="ba9016084e12d24b89946c050f6dd42e636f099c657b6e2d070901544d996173" Nov 28 10:20:48 crc kubenswrapper[4838]: I1128 10:20:48.837406 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ba9016084e12d24b89946c050f6dd42e636f099c657b6e2d070901544d996173"} err="failed to get container status \"ba9016084e12d24b89946c050f6dd42e636f099c657b6e2d070901544d996173\": rpc error: code = NotFound desc = could not find container \"ba9016084e12d24b89946c050f6dd42e636f099c657b6e2d070901544d996173\": container with ID starting with ba9016084e12d24b89946c050f6dd42e636f099c657b6e2d070901544d996173 not found: ID does not exist" Nov 28 10:20:49 crc kubenswrapper[4838]: I1128 10:20:49.542850 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-bjdcp"] Nov 28 10:20:49 crc kubenswrapper[4838]: E1128 10:20:49.543773 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0b3677e0-92bc-462b-a9b2-bb0b199bd116" containerName="registry-server" Nov 28 10:20:49 crc kubenswrapper[4838]: I1128 10:20:49.543799 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="0b3677e0-92bc-462b-a9b2-bb0b199bd116" containerName="registry-server" Nov 28 10:20:49 crc kubenswrapper[4838]: E1128 10:20:49.543817 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d861e633-58c3-4190-b4bf-c113fe415368" containerName="init" Nov 28 10:20:49 crc kubenswrapper[4838]: I1128 10:20:49.543825 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="d861e633-58c3-4190-b4bf-c113fe415368" containerName="init" Nov 28 10:20:49 crc kubenswrapper[4838]: E1128 10:20:49.543857 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ed3a711b-2077-4680-9e4a-7e9100ed4bd9" containerName="init" Nov 28 10:20:49 crc kubenswrapper[4838]: I1128 10:20:49.543864 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="ed3a711b-2077-4680-9e4a-7e9100ed4bd9" containerName="init" Nov 28 10:20:49 crc kubenswrapper[4838]: E1128 10:20:49.543880 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d861e633-58c3-4190-b4bf-c113fe415368" containerName="dnsmasq-dns" Nov 28 10:20:49 crc kubenswrapper[4838]: I1128 10:20:49.543888 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="d861e633-58c3-4190-b4bf-c113fe415368" containerName="dnsmasq-dns" Nov 28 10:20:49 crc kubenswrapper[4838]: E1128 10:20:49.543901 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ed3a711b-2077-4680-9e4a-7e9100ed4bd9" containerName="dnsmasq-dns" Nov 28 10:20:49 crc kubenswrapper[4838]: I1128 10:20:49.543910 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="ed3a711b-2077-4680-9e4a-7e9100ed4bd9" containerName="dnsmasq-dns" Nov 28 10:20:49 crc kubenswrapper[4838]: E1128 10:20:49.543922 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0b3677e0-92bc-462b-a9b2-bb0b199bd116" containerName="extract-utilities" Nov 28 10:20:49 crc kubenswrapper[4838]: I1128 10:20:49.543930 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="0b3677e0-92bc-462b-a9b2-bb0b199bd116" containerName="extract-utilities" Nov 28 10:20:49 crc kubenswrapper[4838]: E1128 10:20:49.543944 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0b3677e0-92bc-462b-a9b2-bb0b199bd116" containerName="extract-content" Nov 28 10:20:49 crc kubenswrapper[4838]: I1128 10:20:49.543953 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="0b3677e0-92bc-462b-a9b2-bb0b199bd116" containerName="extract-content" Nov 28 10:20:49 crc kubenswrapper[4838]: I1128 10:20:49.544156 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="ed3a711b-2077-4680-9e4a-7e9100ed4bd9" containerName="dnsmasq-dns" Nov 28 10:20:49 crc kubenswrapper[4838]: I1128 10:20:49.544172 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="d861e633-58c3-4190-b4bf-c113fe415368" containerName="dnsmasq-dns" Nov 28 10:20:49 crc kubenswrapper[4838]: I1128 10:20:49.544200 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="0b3677e0-92bc-462b-a9b2-bb0b199bd116" containerName="registry-server" Nov 28 10:20:49 crc kubenswrapper[4838]: I1128 10:20:49.544988 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-bjdcp" Nov 28 10:20:49 crc kubenswrapper[4838]: I1128 10:20:49.550532 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 28 10:20:49 crc kubenswrapper[4838]: I1128 10:20:49.550883 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-t6dwn" Nov 28 10:20:49 crc kubenswrapper[4838]: I1128 10:20:49.551091 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 28 10:20:49 crc kubenswrapper[4838]: I1128 10:20:49.551280 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 28 10:20:49 crc kubenswrapper[4838]: I1128 10:20:49.558017 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-bjdcp"] Nov 28 10:20:49 crc kubenswrapper[4838]: I1128 10:20:49.593042 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0dc66645-e2fb-4fa3-ae22-be67c8bf6eb5-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-bjdcp\" (UID: \"0dc66645-e2fb-4fa3-ae22-be67c8bf6eb5\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-bjdcp" Nov 28 10:20:49 crc kubenswrapper[4838]: I1128 10:20:49.593154 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0dc66645-e2fb-4fa3-ae22-be67c8bf6eb5-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-bjdcp\" (UID: \"0dc66645-e2fb-4fa3-ae22-be67c8bf6eb5\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-bjdcp" Nov 28 10:20:49 crc kubenswrapper[4838]: I1128 10:20:49.593269 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9tnht\" (UniqueName: \"kubernetes.io/projected/0dc66645-e2fb-4fa3-ae22-be67c8bf6eb5-kube-api-access-9tnht\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-bjdcp\" (UID: \"0dc66645-e2fb-4fa3-ae22-be67c8bf6eb5\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-bjdcp" Nov 28 10:20:49 crc kubenswrapper[4838]: I1128 10:20:49.593367 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/0dc66645-e2fb-4fa3-ae22-be67c8bf6eb5-ssh-key\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-bjdcp\" (UID: \"0dc66645-e2fb-4fa3-ae22-be67c8bf6eb5\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-bjdcp" Nov 28 10:20:49 crc kubenswrapper[4838]: I1128 10:20:49.694467 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0dc66645-e2fb-4fa3-ae22-be67c8bf6eb5-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-bjdcp\" (UID: \"0dc66645-e2fb-4fa3-ae22-be67c8bf6eb5\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-bjdcp" Nov 28 10:20:49 crc kubenswrapper[4838]: I1128 10:20:49.694559 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0dc66645-e2fb-4fa3-ae22-be67c8bf6eb5-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-bjdcp\" (UID: \"0dc66645-e2fb-4fa3-ae22-be67c8bf6eb5\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-bjdcp" Nov 28 10:20:49 crc kubenswrapper[4838]: I1128 10:20:49.694648 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9tnht\" (UniqueName: \"kubernetes.io/projected/0dc66645-e2fb-4fa3-ae22-be67c8bf6eb5-kube-api-access-9tnht\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-bjdcp\" (UID: \"0dc66645-e2fb-4fa3-ae22-be67c8bf6eb5\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-bjdcp" Nov 28 10:20:49 crc kubenswrapper[4838]: I1128 10:20:49.694769 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/0dc66645-e2fb-4fa3-ae22-be67c8bf6eb5-ssh-key\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-bjdcp\" (UID: \"0dc66645-e2fb-4fa3-ae22-be67c8bf6eb5\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-bjdcp" Nov 28 10:20:49 crc kubenswrapper[4838]: I1128 10:20:49.699074 4838 generic.go:334] "Generic (PLEG): container finished" podID="969e66ab-e24e-4a63-9543-8214980ccbe3" containerID="0cf28d03b4fa35c112fe6f96d4e4f78d618d4f146ee2e52d121bcfae34610cd9" exitCode=0 Nov 28 10:20:49 crc kubenswrapper[4838]: I1128 10:20:49.699194 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"969e66ab-e24e-4a63-9543-8214980ccbe3","Type":"ContainerDied","Data":"0cf28d03b4fa35c112fe6f96d4e4f78d618d4f146ee2e52d121bcfae34610cd9"} Nov 28 10:20:49 crc kubenswrapper[4838]: I1128 10:20:49.704990 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0dc66645-e2fb-4fa3-ae22-be67c8bf6eb5-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-bjdcp\" (UID: \"0dc66645-e2fb-4fa3-ae22-be67c8bf6eb5\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-bjdcp" Nov 28 10:20:49 crc kubenswrapper[4838]: I1128 10:20:49.705007 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0dc66645-e2fb-4fa3-ae22-be67c8bf6eb5-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-bjdcp\" (UID: \"0dc66645-e2fb-4fa3-ae22-be67c8bf6eb5\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-bjdcp" Nov 28 10:20:49 crc kubenswrapper[4838]: I1128 10:20:49.707831 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/0dc66645-e2fb-4fa3-ae22-be67c8bf6eb5-ssh-key\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-bjdcp\" (UID: \"0dc66645-e2fb-4fa3-ae22-be67c8bf6eb5\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-bjdcp" Nov 28 10:20:49 crc kubenswrapper[4838]: I1128 10:20:49.732872 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9tnht\" (UniqueName: \"kubernetes.io/projected/0dc66645-e2fb-4fa3-ae22-be67c8bf6eb5-kube-api-access-9tnht\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-bjdcp\" (UID: \"0dc66645-e2fb-4fa3-ae22-be67c8bf6eb5\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-bjdcp" Nov 28 10:20:49 crc kubenswrapper[4838]: I1128 10:20:49.871866 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-bjdcp" Nov 28 10:20:50 crc kubenswrapper[4838]: I1128 10:20:50.476257 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-bjdcp"] Nov 28 10:20:50 crc kubenswrapper[4838]: W1128 10:20:50.481859 4838 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0dc66645_e2fb_4fa3_ae22_be67c8bf6eb5.slice/crio-ffeec12deb6b1225796a17652fe5d47be4de25695866fee5f2a954736f095fea WatchSource:0}: Error finding container ffeec12deb6b1225796a17652fe5d47be4de25695866fee5f2a954736f095fea: Status 404 returned error can't find the container with id ffeec12deb6b1225796a17652fe5d47be4de25695866fee5f2a954736f095fea Nov 28 10:20:50 crc kubenswrapper[4838]: I1128 10:20:50.571534 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b3677e0-92bc-462b-a9b2-bb0b199bd116" path="/var/lib/kubelet/pods/0b3677e0-92bc-462b-a9b2-bb0b199bd116/volumes" Nov 28 10:20:50 crc kubenswrapper[4838]: I1128 10:20:50.723823 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"969e66ab-e24e-4a63-9543-8214980ccbe3","Type":"ContainerStarted","Data":"b1fea4b4021bfa22bb018b36f221588bbe5ce19abd8e5f8235d234de6fecc43e"} Nov 28 10:20:50 crc kubenswrapper[4838]: I1128 10:20:50.726597 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-bjdcp" event={"ID":"0dc66645-e2fb-4fa3-ae22-be67c8bf6eb5","Type":"ContainerStarted","Data":"ffeec12deb6b1225796a17652fe5d47be4de25695866fee5f2a954736f095fea"} Nov 28 10:20:50 crc kubenswrapper[4838]: I1128 10:20:50.726994 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-0" Nov 28 10:20:50 crc kubenswrapper[4838]: I1128 10:20:50.764918 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-0" podStartSLOduration=36.764902126 podStartE2EDuration="36.764902126s" podCreationTimestamp="2025-11-28 10:20:14 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 10:20:50.757315995 +0000 UTC m=+1422.456290165" watchObservedRunningTime="2025-11-28 10:20:50.764902126 +0000 UTC m=+1422.463876296" Nov 28 10:20:51 crc kubenswrapper[4838]: I1128 10:20:51.747414 4838 generic.go:334] "Generic (PLEG): container finished" podID="091769fb-bf67-454a-b0da-3e33589799f9" containerID="2872d73efb485ffc8d941ee6987506dbdb8ad26811667d69461a20762ee7ca2a" exitCode=0 Nov 28 10:20:51 crc kubenswrapper[4838]: I1128 10:20:51.747538 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"091769fb-bf67-454a-b0da-3e33589799f9","Type":"ContainerDied","Data":"2872d73efb485ffc8d941ee6987506dbdb8ad26811667d69461a20762ee7ca2a"} Nov 28 10:20:52 crc kubenswrapper[4838]: I1128 10:20:52.758856 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"091769fb-bf67-454a-b0da-3e33589799f9","Type":"ContainerStarted","Data":"efe52f176c2fee92686b098e51bbb4875f8a5a18b85d2f9ef0892f77412ec9fc"} Nov 28 10:20:52 crc kubenswrapper[4838]: I1128 10:20:52.759709 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-cell1-server-0" Nov 28 10:20:52 crc kubenswrapper[4838]: I1128 10:20:52.793302 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-cell1-server-0" podStartSLOduration=37.79328736 podStartE2EDuration="37.79328736s" podCreationTimestamp="2025-11-28 10:20:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 10:20:52.789675989 +0000 UTC m=+1424.488650159" watchObservedRunningTime="2025-11-28 10:20:52.79328736 +0000 UTC m=+1424.492261530" Nov 28 10:21:01 crc kubenswrapper[4838]: I1128 10:21:01.847994 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-bjdcp" event={"ID":"0dc66645-e2fb-4fa3-ae22-be67c8bf6eb5","Type":"ContainerStarted","Data":"38de45b3d49d118d7707573f1ce8256730514f7c25ff22bd51c5b87eb6e1f73e"} Nov 28 10:21:01 crc kubenswrapper[4838]: I1128 10:21:01.884989 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-bjdcp" podStartSLOduration=2.611031193 podStartE2EDuration="12.884964013s" podCreationTimestamp="2025-11-28 10:20:49 +0000 UTC" firstStartedPulling="2025-11-28 10:20:50.484401528 +0000 UTC m=+1422.183375698" lastFinishedPulling="2025-11-28 10:21:00.758334318 +0000 UTC m=+1432.457308518" observedRunningTime="2025-11-28 10:21:01.869305892 +0000 UTC m=+1433.568280102" watchObservedRunningTime="2025-11-28 10:21:01.884964013 +0000 UTC m=+1433.583938213" Nov 28 10:21:04 crc kubenswrapper[4838]: I1128 10:21:04.751885 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-0" Nov 28 10:21:05 crc kubenswrapper[4838]: I1128 10:21:05.674020 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-cell1-server-0" Nov 28 10:21:12 crc kubenswrapper[4838]: E1128 10:21:12.361583 4838 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0dc66645_e2fb_4fa3_ae22_be67c8bf6eb5.slice/crio-38de45b3d49d118d7707573f1ce8256730514f7c25ff22bd51c5b87eb6e1f73e.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0dc66645_e2fb_4fa3_ae22_be67c8bf6eb5.slice/crio-conmon-38de45b3d49d118d7707573f1ce8256730514f7c25ff22bd51c5b87eb6e1f73e.scope\": RecentStats: unable to find data in memory cache]" Nov 28 10:21:12 crc kubenswrapper[4838]: I1128 10:21:12.969979 4838 generic.go:334] "Generic (PLEG): container finished" podID="0dc66645-e2fb-4fa3-ae22-be67c8bf6eb5" containerID="38de45b3d49d118d7707573f1ce8256730514f7c25ff22bd51c5b87eb6e1f73e" exitCode=0 Nov 28 10:21:12 crc kubenswrapper[4838]: I1128 10:21:12.970040 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-bjdcp" event={"ID":"0dc66645-e2fb-4fa3-ae22-be67c8bf6eb5","Type":"ContainerDied","Data":"38de45b3d49d118d7707573f1ce8256730514f7c25ff22bd51c5b87eb6e1f73e"} Nov 28 10:21:14 crc kubenswrapper[4838]: I1128 10:21:14.457332 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-bjdcp" Nov 28 10:21:14 crc kubenswrapper[4838]: I1128 10:21:14.513736 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9tnht\" (UniqueName: \"kubernetes.io/projected/0dc66645-e2fb-4fa3-ae22-be67c8bf6eb5-kube-api-access-9tnht\") pod \"0dc66645-e2fb-4fa3-ae22-be67c8bf6eb5\" (UID: \"0dc66645-e2fb-4fa3-ae22-be67c8bf6eb5\") " Nov 28 10:21:14 crc kubenswrapper[4838]: I1128 10:21:14.513805 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0dc66645-e2fb-4fa3-ae22-be67c8bf6eb5-inventory\") pod \"0dc66645-e2fb-4fa3-ae22-be67c8bf6eb5\" (UID: \"0dc66645-e2fb-4fa3-ae22-be67c8bf6eb5\") " Nov 28 10:21:14 crc kubenswrapper[4838]: I1128 10:21:14.513822 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/0dc66645-e2fb-4fa3-ae22-be67c8bf6eb5-ssh-key\") pod \"0dc66645-e2fb-4fa3-ae22-be67c8bf6eb5\" (UID: \"0dc66645-e2fb-4fa3-ae22-be67c8bf6eb5\") " Nov 28 10:21:14 crc kubenswrapper[4838]: I1128 10:21:14.513873 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0dc66645-e2fb-4fa3-ae22-be67c8bf6eb5-repo-setup-combined-ca-bundle\") pod \"0dc66645-e2fb-4fa3-ae22-be67c8bf6eb5\" (UID: \"0dc66645-e2fb-4fa3-ae22-be67c8bf6eb5\") " Nov 28 10:21:14 crc kubenswrapper[4838]: I1128 10:21:14.518853 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0dc66645-e2fb-4fa3-ae22-be67c8bf6eb5-kube-api-access-9tnht" (OuterVolumeSpecName: "kube-api-access-9tnht") pod "0dc66645-e2fb-4fa3-ae22-be67c8bf6eb5" (UID: "0dc66645-e2fb-4fa3-ae22-be67c8bf6eb5"). InnerVolumeSpecName "kube-api-access-9tnht". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:21:14 crc kubenswrapper[4838]: I1128 10:21:14.520439 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0dc66645-e2fb-4fa3-ae22-be67c8bf6eb5-repo-setup-combined-ca-bundle" (OuterVolumeSpecName: "repo-setup-combined-ca-bundle") pod "0dc66645-e2fb-4fa3-ae22-be67c8bf6eb5" (UID: "0dc66645-e2fb-4fa3-ae22-be67c8bf6eb5"). InnerVolumeSpecName "repo-setup-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:21:14 crc kubenswrapper[4838]: I1128 10:21:14.542996 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0dc66645-e2fb-4fa3-ae22-be67c8bf6eb5-inventory" (OuterVolumeSpecName: "inventory") pod "0dc66645-e2fb-4fa3-ae22-be67c8bf6eb5" (UID: "0dc66645-e2fb-4fa3-ae22-be67c8bf6eb5"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:21:14 crc kubenswrapper[4838]: I1128 10:21:14.565610 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0dc66645-e2fb-4fa3-ae22-be67c8bf6eb5-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "0dc66645-e2fb-4fa3-ae22-be67c8bf6eb5" (UID: "0dc66645-e2fb-4fa3-ae22-be67c8bf6eb5"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:21:14 crc kubenswrapper[4838]: I1128 10:21:14.616473 4838 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0dc66645-e2fb-4fa3-ae22-be67c8bf6eb5-inventory\") on node \"crc\" DevicePath \"\"" Nov 28 10:21:14 crc kubenswrapper[4838]: I1128 10:21:14.616508 4838 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/0dc66645-e2fb-4fa3-ae22-be67c8bf6eb5-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 28 10:21:14 crc kubenswrapper[4838]: I1128 10:21:14.616521 4838 reconciler_common.go:293] "Volume detached for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0dc66645-e2fb-4fa3-ae22-be67c8bf6eb5-repo-setup-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 10:21:14 crc kubenswrapper[4838]: I1128 10:21:14.616536 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9tnht\" (UniqueName: \"kubernetes.io/projected/0dc66645-e2fb-4fa3-ae22-be67c8bf6eb5-kube-api-access-9tnht\") on node \"crc\" DevicePath \"\"" Nov 28 10:21:14 crc kubenswrapper[4838]: I1128 10:21:14.993151 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-bjdcp" event={"ID":"0dc66645-e2fb-4fa3-ae22-be67c8bf6eb5","Type":"ContainerDied","Data":"ffeec12deb6b1225796a17652fe5d47be4de25695866fee5f2a954736f095fea"} Nov 28 10:21:14 crc kubenswrapper[4838]: I1128 10:21:14.993544 4838 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ffeec12deb6b1225796a17652fe5d47be4de25695866fee5f2a954736f095fea" Nov 28 10:21:14 crc kubenswrapper[4838]: I1128 10:21:14.993201 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-bjdcp" Nov 28 10:21:15 crc kubenswrapper[4838]: I1128 10:21:15.078385 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-mf8cg"] Nov 28 10:21:15 crc kubenswrapper[4838]: E1128 10:21:15.078774 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0dc66645-e2fb-4fa3-ae22-be67c8bf6eb5" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Nov 28 10:21:15 crc kubenswrapper[4838]: I1128 10:21:15.078795 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="0dc66645-e2fb-4fa3-ae22-be67c8bf6eb5" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Nov 28 10:21:15 crc kubenswrapper[4838]: I1128 10:21:15.078965 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="0dc66645-e2fb-4fa3-ae22-be67c8bf6eb5" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Nov 28 10:21:15 crc kubenswrapper[4838]: I1128 10:21:15.079528 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-mf8cg" Nov 28 10:21:15 crc kubenswrapper[4838]: I1128 10:21:15.084760 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 28 10:21:15 crc kubenswrapper[4838]: I1128 10:21:15.085156 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 28 10:21:15 crc kubenswrapper[4838]: I1128 10:21:15.085532 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 28 10:21:15 crc kubenswrapper[4838]: I1128 10:21:15.086498 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-t6dwn" Nov 28 10:21:15 crc kubenswrapper[4838]: I1128 10:21:15.091266 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-mf8cg"] Nov 28 10:21:15 crc kubenswrapper[4838]: I1128 10:21:15.228137 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/4c8f2bc3-9c2d-43c2-99bb-3d7cb73233e0-ssh-key\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-mf8cg\" (UID: \"4c8f2bc3-9c2d-43c2-99bb-3d7cb73233e0\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-mf8cg" Nov 28 10:21:15 crc kubenswrapper[4838]: I1128 10:21:15.228621 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4c8f2bc3-9c2d-43c2-99bb-3d7cb73233e0-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-mf8cg\" (UID: \"4c8f2bc3-9c2d-43c2-99bb-3d7cb73233e0\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-mf8cg" Nov 28 10:21:15 crc kubenswrapper[4838]: I1128 10:21:15.228765 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xw6g5\" (UniqueName: \"kubernetes.io/projected/4c8f2bc3-9c2d-43c2-99bb-3d7cb73233e0-kube-api-access-xw6g5\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-mf8cg\" (UID: \"4c8f2bc3-9c2d-43c2-99bb-3d7cb73233e0\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-mf8cg" Nov 28 10:21:15 crc kubenswrapper[4838]: I1128 10:21:15.228913 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4c8f2bc3-9c2d-43c2-99bb-3d7cb73233e0-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-mf8cg\" (UID: \"4c8f2bc3-9c2d-43c2-99bb-3d7cb73233e0\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-mf8cg" Nov 28 10:21:15 crc kubenswrapper[4838]: I1128 10:21:15.331283 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4c8f2bc3-9c2d-43c2-99bb-3d7cb73233e0-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-mf8cg\" (UID: \"4c8f2bc3-9c2d-43c2-99bb-3d7cb73233e0\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-mf8cg" Nov 28 10:21:15 crc kubenswrapper[4838]: I1128 10:21:15.331354 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xw6g5\" (UniqueName: \"kubernetes.io/projected/4c8f2bc3-9c2d-43c2-99bb-3d7cb73233e0-kube-api-access-xw6g5\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-mf8cg\" (UID: \"4c8f2bc3-9c2d-43c2-99bb-3d7cb73233e0\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-mf8cg" Nov 28 10:21:15 crc kubenswrapper[4838]: I1128 10:21:15.331414 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4c8f2bc3-9c2d-43c2-99bb-3d7cb73233e0-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-mf8cg\" (UID: \"4c8f2bc3-9c2d-43c2-99bb-3d7cb73233e0\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-mf8cg" Nov 28 10:21:15 crc kubenswrapper[4838]: I1128 10:21:15.331470 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/4c8f2bc3-9c2d-43c2-99bb-3d7cb73233e0-ssh-key\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-mf8cg\" (UID: \"4c8f2bc3-9c2d-43c2-99bb-3d7cb73233e0\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-mf8cg" Nov 28 10:21:15 crc kubenswrapper[4838]: I1128 10:21:15.349489 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4c8f2bc3-9c2d-43c2-99bb-3d7cb73233e0-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-mf8cg\" (UID: \"4c8f2bc3-9c2d-43c2-99bb-3d7cb73233e0\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-mf8cg" Nov 28 10:21:15 crc kubenswrapper[4838]: I1128 10:21:15.351238 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4c8f2bc3-9c2d-43c2-99bb-3d7cb73233e0-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-mf8cg\" (UID: \"4c8f2bc3-9c2d-43c2-99bb-3d7cb73233e0\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-mf8cg" Nov 28 10:21:15 crc kubenswrapper[4838]: I1128 10:21:15.353090 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/4c8f2bc3-9c2d-43c2-99bb-3d7cb73233e0-ssh-key\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-mf8cg\" (UID: \"4c8f2bc3-9c2d-43c2-99bb-3d7cb73233e0\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-mf8cg" Nov 28 10:21:15 crc kubenswrapper[4838]: I1128 10:21:15.353592 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xw6g5\" (UniqueName: \"kubernetes.io/projected/4c8f2bc3-9c2d-43c2-99bb-3d7cb73233e0-kube-api-access-xw6g5\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-mf8cg\" (UID: \"4c8f2bc3-9c2d-43c2-99bb-3d7cb73233e0\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-mf8cg" Nov 28 10:21:15 crc kubenswrapper[4838]: I1128 10:21:15.398135 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-mf8cg" Nov 28 10:21:16 crc kubenswrapper[4838]: I1128 10:21:16.017597 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-mf8cg"] Nov 28 10:21:17 crc kubenswrapper[4838]: I1128 10:21:17.015055 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-mf8cg" event={"ID":"4c8f2bc3-9c2d-43c2-99bb-3d7cb73233e0","Type":"ContainerStarted","Data":"d17fdebf7ddf7bf9d073a969b6fa6c85acd4d54a509d6cb4c67f1dad2fec6a0a"} Nov 28 10:21:17 crc kubenswrapper[4838]: I1128 10:21:17.015669 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-mf8cg" event={"ID":"4c8f2bc3-9c2d-43c2-99bb-3d7cb73233e0","Type":"ContainerStarted","Data":"6f055b39bba8d82c73b609389d00348b30a9b5ee369bf5d10c14af8f1bd67aca"} Nov 28 10:21:17 crc kubenswrapper[4838]: I1128 10:21:17.038563 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-mf8cg" podStartSLOduration=1.4440845119999999 podStartE2EDuration="2.038540177s" podCreationTimestamp="2025-11-28 10:21:15 +0000 UTC" firstStartedPulling="2025-11-28 10:21:16.033727141 +0000 UTC m=+1447.732701311" lastFinishedPulling="2025-11-28 10:21:16.628182776 +0000 UTC m=+1448.327156976" observedRunningTime="2025-11-28 10:21:17.03602717 +0000 UTC m=+1448.735001340" watchObservedRunningTime="2025-11-28 10:21:17.038540177 +0000 UTC m=+1448.737514377" Nov 28 10:21:23 crc kubenswrapper[4838]: I1128 10:21:23.940389 4838 patch_prober.go:28] interesting pod/machine-config-daemon-5dxdd container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 10:21:23 crc kubenswrapper[4838]: I1128 10:21:23.940873 4838 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 10:21:53 crc kubenswrapper[4838]: I1128 10:21:53.940329 4838 patch_prober.go:28] interesting pod/machine-config-daemon-5dxdd container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 10:21:53 crc kubenswrapper[4838]: I1128 10:21:53.940875 4838 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 10:22:14 crc kubenswrapper[4838]: I1128 10:22:14.833257 4838 scope.go:117] "RemoveContainer" containerID="69d421e40045fc218969791f368c7f037bec542d7f7d42c32a098a332a595239" Nov 28 10:22:14 crc kubenswrapper[4838]: I1128 10:22:14.877521 4838 scope.go:117] "RemoveContainer" containerID="e039323569056de17135c4b6cfcccdad13e28b3a15678ee89c93c65fb83260cb" Nov 28 10:22:14 crc kubenswrapper[4838]: I1128 10:22:14.922401 4838 scope.go:117] "RemoveContainer" containerID="70561ffe7582eb001bea6579c416ab59dfacdd89d833a820e8066ba416af0b06" Nov 28 10:22:23 crc kubenswrapper[4838]: I1128 10:22:23.940758 4838 patch_prober.go:28] interesting pod/machine-config-daemon-5dxdd container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 10:22:23 crc kubenswrapper[4838]: I1128 10:22:23.942201 4838 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 10:22:23 crc kubenswrapper[4838]: I1128 10:22:23.942308 4838 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" Nov 28 10:22:23 crc kubenswrapper[4838]: I1128 10:22:23.943473 4838 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"37eb1fd382bf7ac855fc0bf19ecaa14a9b60925b9775992a84755c27a44467c5"} pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 10:22:23 crc kubenswrapper[4838]: I1128 10:22:23.943586 4838 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" containerName="machine-config-daemon" containerID="cri-o://37eb1fd382bf7ac855fc0bf19ecaa14a9b60925b9775992a84755c27a44467c5" gracePeriod=600 Nov 28 10:22:24 crc kubenswrapper[4838]: I1128 10:22:24.813118 4838 generic.go:334] "Generic (PLEG): container finished" podID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" containerID="37eb1fd382bf7ac855fc0bf19ecaa14a9b60925b9775992a84755c27a44467c5" exitCode=0 Nov 28 10:22:24 crc kubenswrapper[4838]: I1128 10:22:24.813212 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" event={"ID":"5c3daa53-8c4e-4e30-aeba-146602dd45cd","Type":"ContainerDied","Data":"37eb1fd382bf7ac855fc0bf19ecaa14a9b60925b9775992a84755c27a44467c5"} Nov 28 10:22:24 crc kubenswrapper[4838]: I1128 10:22:24.813653 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" event={"ID":"5c3daa53-8c4e-4e30-aeba-146602dd45cd","Type":"ContainerStarted","Data":"94f5e3e0d388a990bfe9cc3186f4655bc8ffbe5eaaa4558781704f164737d7ab"} Nov 28 10:22:24 crc kubenswrapper[4838]: I1128 10:22:24.813680 4838 scope.go:117] "RemoveContainer" containerID="1815c6b644c08c4a75da2a50900db223999a631363ba83a16dba3176b263bb61" Nov 28 10:23:15 crc kubenswrapper[4838]: I1128 10:23:15.030213 4838 scope.go:117] "RemoveContainer" containerID="7d3e99aa3c380850369bca076ddc18dc078bc00cc5df9176c58055ecb3805926" Nov 28 10:23:15 crc kubenswrapper[4838]: I1128 10:23:15.094160 4838 scope.go:117] "RemoveContainer" containerID="f50eef4ef7b6d2f9da4b3f4a982f5900bae1db40cf0ac32340b73745f9f95797" Nov 28 10:23:23 crc kubenswrapper[4838]: I1128 10:23:23.290217 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-shpt2"] Nov 28 10:23:23 crc kubenswrapper[4838]: I1128 10:23:23.294014 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-shpt2" Nov 28 10:23:23 crc kubenswrapper[4838]: I1128 10:23:23.302754 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-shpt2"] Nov 28 10:23:23 crc kubenswrapper[4838]: I1128 10:23:23.378187 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bbba9d22-595e-4652-9ad1-3a39be7ddd84-utilities\") pod \"redhat-marketplace-shpt2\" (UID: \"bbba9d22-595e-4652-9ad1-3a39be7ddd84\") " pod="openshift-marketplace/redhat-marketplace-shpt2" Nov 28 10:23:23 crc kubenswrapper[4838]: I1128 10:23:23.378747 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bbba9d22-595e-4652-9ad1-3a39be7ddd84-catalog-content\") pod \"redhat-marketplace-shpt2\" (UID: \"bbba9d22-595e-4652-9ad1-3a39be7ddd84\") " pod="openshift-marketplace/redhat-marketplace-shpt2" Nov 28 10:23:23 crc kubenswrapper[4838]: I1128 10:23:23.378908 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wntgg\" (UniqueName: \"kubernetes.io/projected/bbba9d22-595e-4652-9ad1-3a39be7ddd84-kube-api-access-wntgg\") pod \"redhat-marketplace-shpt2\" (UID: \"bbba9d22-595e-4652-9ad1-3a39be7ddd84\") " pod="openshift-marketplace/redhat-marketplace-shpt2" Nov 28 10:23:23 crc kubenswrapper[4838]: I1128 10:23:23.480707 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bbba9d22-595e-4652-9ad1-3a39be7ddd84-catalog-content\") pod \"redhat-marketplace-shpt2\" (UID: \"bbba9d22-595e-4652-9ad1-3a39be7ddd84\") " pod="openshift-marketplace/redhat-marketplace-shpt2" Nov 28 10:23:23 crc kubenswrapper[4838]: I1128 10:23:23.481069 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wntgg\" (UniqueName: \"kubernetes.io/projected/bbba9d22-595e-4652-9ad1-3a39be7ddd84-kube-api-access-wntgg\") pod \"redhat-marketplace-shpt2\" (UID: \"bbba9d22-595e-4652-9ad1-3a39be7ddd84\") " pod="openshift-marketplace/redhat-marketplace-shpt2" Nov 28 10:23:23 crc kubenswrapper[4838]: I1128 10:23:23.481257 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bbba9d22-595e-4652-9ad1-3a39be7ddd84-utilities\") pod \"redhat-marketplace-shpt2\" (UID: \"bbba9d22-595e-4652-9ad1-3a39be7ddd84\") " pod="openshift-marketplace/redhat-marketplace-shpt2" Nov 28 10:23:23 crc kubenswrapper[4838]: I1128 10:23:23.481166 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bbba9d22-595e-4652-9ad1-3a39be7ddd84-catalog-content\") pod \"redhat-marketplace-shpt2\" (UID: \"bbba9d22-595e-4652-9ad1-3a39be7ddd84\") " pod="openshift-marketplace/redhat-marketplace-shpt2" Nov 28 10:23:23 crc kubenswrapper[4838]: I1128 10:23:23.481532 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bbba9d22-595e-4652-9ad1-3a39be7ddd84-utilities\") pod \"redhat-marketplace-shpt2\" (UID: \"bbba9d22-595e-4652-9ad1-3a39be7ddd84\") " pod="openshift-marketplace/redhat-marketplace-shpt2" Nov 28 10:23:23 crc kubenswrapper[4838]: I1128 10:23:23.506552 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wntgg\" (UniqueName: \"kubernetes.io/projected/bbba9d22-595e-4652-9ad1-3a39be7ddd84-kube-api-access-wntgg\") pod \"redhat-marketplace-shpt2\" (UID: \"bbba9d22-595e-4652-9ad1-3a39be7ddd84\") " pod="openshift-marketplace/redhat-marketplace-shpt2" Nov 28 10:23:23 crc kubenswrapper[4838]: I1128 10:23:23.619610 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-shpt2" Nov 28 10:23:24 crc kubenswrapper[4838]: I1128 10:23:24.102912 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-shpt2"] Nov 28 10:23:24 crc kubenswrapper[4838]: I1128 10:23:24.497045 4838 generic.go:334] "Generic (PLEG): container finished" podID="bbba9d22-595e-4652-9ad1-3a39be7ddd84" containerID="7aa37d43f711c860038a25494b0bdb34f668f51ba86237c9da3ed3cd4a53bf50" exitCode=0 Nov 28 10:23:24 crc kubenswrapper[4838]: I1128 10:23:24.497116 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-shpt2" event={"ID":"bbba9d22-595e-4652-9ad1-3a39be7ddd84","Type":"ContainerDied","Data":"7aa37d43f711c860038a25494b0bdb34f668f51ba86237c9da3ed3cd4a53bf50"} Nov 28 10:23:24 crc kubenswrapper[4838]: I1128 10:23:24.497465 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-shpt2" event={"ID":"bbba9d22-595e-4652-9ad1-3a39be7ddd84","Type":"ContainerStarted","Data":"641a30d0aa32de1bcbe33116ddab440176a150e88098dcdb9748a50dc74f18d4"} Nov 28 10:23:26 crc kubenswrapper[4838]: E1128 10:23:26.006129 4838 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podbbba9d22_595e_4652_9ad1_3a39be7ddd84.slice/crio-conmon-d3e0a099e6c4fe9c1c4d8486e26511163df26268c102af4b6c9e127cced303d1.scope\": RecentStats: unable to find data in memory cache]" Nov 28 10:23:26 crc kubenswrapper[4838]: I1128 10:23:26.519015 4838 generic.go:334] "Generic (PLEG): container finished" podID="bbba9d22-595e-4652-9ad1-3a39be7ddd84" containerID="d3e0a099e6c4fe9c1c4d8486e26511163df26268c102af4b6c9e127cced303d1" exitCode=0 Nov 28 10:23:26 crc kubenswrapper[4838]: I1128 10:23:26.519060 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-shpt2" event={"ID":"bbba9d22-595e-4652-9ad1-3a39be7ddd84","Type":"ContainerDied","Data":"d3e0a099e6c4fe9c1c4d8486e26511163df26268c102af4b6c9e127cced303d1"} Nov 28 10:23:28 crc kubenswrapper[4838]: I1128 10:23:28.554690 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-shpt2" event={"ID":"bbba9d22-595e-4652-9ad1-3a39be7ddd84","Type":"ContainerStarted","Data":"1ec3c6c00b3ede170facb1c82fb6d6a2cefeaea9a7ae2b2385561246f9b4cbfe"} Nov 28 10:23:28 crc kubenswrapper[4838]: I1128 10:23:28.644048 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-shpt2" podStartSLOduration=2.781793045 podStartE2EDuration="5.644024181s" podCreationTimestamp="2025-11-28 10:23:23 +0000 UTC" firstStartedPulling="2025-11-28 10:23:24.50091677 +0000 UTC m=+1576.199890940" lastFinishedPulling="2025-11-28 10:23:27.363147906 +0000 UTC m=+1579.062122076" observedRunningTime="2025-11-28 10:23:28.586571556 +0000 UTC m=+1580.285545786" watchObservedRunningTime="2025-11-28 10:23:28.644024181 +0000 UTC m=+1580.342998351" Nov 28 10:23:28 crc kubenswrapper[4838]: I1128 10:23:28.652876 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-lhmbn"] Nov 28 10:23:28 crc kubenswrapper[4838]: I1128 10:23:28.654561 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-lhmbn" Nov 28 10:23:28 crc kubenswrapper[4838]: I1128 10:23:28.669336 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-lhmbn"] Nov 28 10:23:28 crc kubenswrapper[4838]: I1128 10:23:28.796350 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/39ff7a43-9be5-4b9a-a8c1-35273042074e-catalog-content\") pod \"community-operators-lhmbn\" (UID: \"39ff7a43-9be5-4b9a-a8c1-35273042074e\") " pod="openshift-marketplace/community-operators-lhmbn" Nov 28 10:23:28 crc kubenswrapper[4838]: I1128 10:23:28.796423 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/39ff7a43-9be5-4b9a-a8c1-35273042074e-utilities\") pod \"community-operators-lhmbn\" (UID: \"39ff7a43-9be5-4b9a-a8c1-35273042074e\") " pod="openshift-marketplace/community-operators-lhmbn" Nov 28 10:23:28 crc kubenswrapper[4838]: I1128 10:23:28.796467 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-whmm4\" (UniqueName: \"kubernetes.io/projected/39ff7a43-9be5-4b9a-a8c1-35273042074e-kube-api-access-whmm4\") pod \"community-operators-lhmbn\" (UID: \"39ff7a43-9be5-4b9a-a8c1-35273042074e\") " pod="openshift-marketplace/community-operators-lhmbn" Nov 28 10:23:28 crc kubenswrapper[4838]: I1128 10:23:28.898530 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/39ff7a43-9be5-4b9a-a8c1-35273042074e-utilities\") pod \"community-operators-lhmbn\" (UID: \"39ff7a43-9be5-4b9a-a8c1-35273042074e\") " pod="openshift-marketplace/community-operators-lhmbn" Nov 28 10:23:28 crc kubenswrapper[4838]: I1128 10:23:28.898673 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-whmm4\" (UniqueName: \"kubernetes.io/projected/39ff7a43-9be5-4b9a-a8c1-35273042074e-kube-api-access-whmm4\") pod \"community-operators-lhmbn\" (UID: \"39ff7a43-9be5-4b9a-a8c1-35273042074e\") " pod="openshift-marketplace/community-operators-lhmbn" Nov 28 10:23:28 crc kubenswrapper[4838]: I1128 10:23:28.898889 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/39ff7a43-9be5-4b9a-a8c1-35273042074e-catalog-content\") pod \"community-operators-lhmbn\" (UID: \"39ff7a43-9be5-4b9a-a8c1-35273042074e\") " pod="openshift-marketplace/community-operators-lhmbn" Nov 28 10:23:28 crc kubenswrapper[4838]: I1128 10:23:28.899340 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/39ff7a43-9be5-4b9a-a8c1-35273042074e-utilities\") pod \"community-operators-lhmbn\" (UID: \"39ff7a43-9be5-4b9a-a8c1-35273042074e\") " pod="openshift-marketplace/community-operators-lhmbn" Nov 28 10:23:28 crc kubenswrapper[4838]: I1128 10:23:28.899563 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/39ff7a43-9be5-4b9a-a8c1-35273042074e-catalog-content\") pod \"community-operators-lhmbn\" (UID: \"39ff7a43-9be5-4b9a-a8c1-35273042074e\") " pod="openshift-marketplace/community-operators-lhmbn" Nov 28 10:23:28 crc kubenswrapper[4838]: I1128 10:23:28.922785 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-whmm4\" (UniqueName: \"kubernetes.io/projected/39ff7a43-9be5-4b9a-a8c1-35273042074e-kube-api-access-whmm4\") pod \"community-operators-lhmbn\" (UID: \"39ff7a43-9be5-4b9a-a8c1-35273042074e\") " pod="openshift-marketplace/community-operators-lhmbn" Nov 28 10:23:28 crc kubenswrapper[4838]: I1128 10:23:28.971942 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-lhmbn" Nov 28 10:23:29 crc kubenswrapper[4838]: I1128 10:23:29.488649 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-lhmbn"] Nov 28 10:23:29 crc kubenswrapper[4838]: I1128 10:23:29.575223 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-lhmbn" event={"ID":"39ff7a43-9be5-4b9a-a8c1-35273042074e","Type":"ContainerStarted","Data":"fe6e5d7a1644c7dfbc9f924cf77255a3839aaa0378e39700f15c0311709f153d"} Nov 28 10:23:30 crc kubenswrapper[4838]: I1128 10:23:30.590948 4838 generic.go:334] "Generic (PLEG): container finished" podID="39ff7a43-9be5-4b9a-a8c1-35273042074e" containerID="375af4270e882c04caabc51188297bca24b88be09aa6984e46a42cf3cee09b93" exitCode=0 Nov 28 10:23:30 crc kubenswrapper[4838]: I1128 10:23:30.591174 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-lhmbn" event={"ID":"39ff7a43-9be5-4b9a-a8c1-35273042074e","Type":"ContainerDied","Data":"375af4270e882c04caabc51188297bca24b88be09aa6984e46a42cf3cee09b93"} Nov 28 10:23:32 crc kubenswrapper[4838]: I1128 10:23:32.619406 4838 generic.go:334] "Generic (PLEG): container finished" podID="39ff7a43-9be5-4b9a-a8c1-35273042074e" containerID="482a8b22cd99fc91f9ab736de82a6893a2c9584d4f8dc1658e1ee814dae0dd94" exitCode=0 Nov 28 10:23:32 crc kubenswrapper[4838]: I1128 10:23:32.620132 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-lhmbn" event={"ID":"39ff7a43-9be5-4b9a-a8c1-35273042074e","Type":"ContainerDied","Data":"482a8b22cd99fc91f9ab736de82a6893a2c9584d4f8dc1658e1ee814dae0dd94"} Nov 28 10:23:33 crc kubenswrapper[4838]: I1128 10:23:33.620830 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-shpt2" Nov 28 10:23:33 crc kubenswrapper[4838]: I1128 10:23:33.621570 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-shpt2" Nov 28 10:23:33 crc kubenswrapper[4838]: I1128 10:23:33.668945 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-shpt2" Nov 28 10:23:34 crc kubenswrapper[4838]: I1128 10:23:34.642359 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-lhmbn" event={"ID":"39ff7a43-9be5-4b9a-a8c1-35273042074e","Type":"ContainerStarted","Data":"05de02b8d1ab7d56d92ae65bf4e94e3197ae15ab3806c8ed170ec488915352df"} Nov 28 10:23:34 crc kubenswrapper[4838]: I1128 10:23:34.680455 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-lhmbn" podStartSLOduration=4.075884594 podStartE2EDuration="6.680432186s" podCreationTimestamp="2025-11-28 10:23:28 +0000 UTC" firstStartedPulling="2025-11-28 10:23:30.593002904 +0000 UTC m=+1582.291977074" lastFinishedPulling="2025-11-28 10:23:33.197550486 +0000 UTC m=+1584.896524666" observedRunningTime="2025-11-28 10:23:34.667666772 +0000 UTC m=+1586.366640952" watchObservedRunningTime="2025-11-28 10:23:34.680432186 +0000 UTC m=+1586.379406356" Nov 28 10:23:34 crc kubenswrapper[4838]: I1128 10:23:34.696022 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-shpt2" Nov 28 10:23:35 crc kubenswrapper[4838]: I1128 10:23:35.836344 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-shpt2"] Nov 28 10:23:36 crc kubenswrapper[4838]: I1128 10:23:36.664791 4838 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-shpt2" podUID="bbba9d22-595e-4652-9ad1-3a39be7ddd84" containerName="registry-server" containerID="cri-o://1ec3c6c00b3ede170facb1c82fb6d6a2cefeaea9a7ae2b2385561246f9b4cbfe" gracePeriod=2 Nov 28 10:23:37 crc kubenswrapper[4838]: I1128 10:23:37.279918 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-shpt2" Nov 28 10:23:37 crc kubenswrapper[4838]: I1128 10:23:37.382708 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bbba9d22-595e-4652-9ad1-3a39be7ddd84-utilities\") pod \"bbba9d22-595e-4652-9ad1-3a39be7ddd84\" (UID: \"bbba9d22-595e-4652-9ad1-3a39be7ddd84\") " Nov 28 10:23:37 crc kubenswrapper[4838]: I1128 10:23:37.383306 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wntgg\" (UniqueName: \"kubernetes.io/projected/bbba9d22-595e-4652-9ad1-3a39be7ddd84-kube-api-access-wntgg\") pod \"bbba9d22-595e-4652-9ad1-3a39be7ddd84\" (UID: \"bbba9d22-595e-4652-9ad1-3a39be7ddd84\") " Nov 28 10:23:37 crc kubenswrapper[4838]: I1128 10:23:37.383329 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bbba9d22-595e-4652-9ad1-3a39be7ddd84-catalog-content\") pod \"bbba9d22-595e-4652-9ad1-3a39be7ddd84\" (UID: \"bbba9d22-595e-4652-9ad1-3a39be7ddd84\") " Nov 28 10:23:37 crc kubenswrapper[4838]: I1128 10:23:37.383740 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bbba9d22-595e-4652-9ad1-3a39be7ddd84-utilities" (OuterVolumeSpecName: "utilities") pod "bbba9d22-595e-4652-9ad1-3a39be7ddd84" (UID: "bbba9d22-595e-4652-9ad1-3a39be7ddd84"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 10:23:37 crc kubenswrapper[4838]: I1128 10:23:37.392155 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bbba9d22-595e-4652-9ad1-3a39be7ddd84-kube-api-access-wntgg" (OuterVolumeSpecName: "kube-api-access-wntgg") pod "bbba9d22-595e-4652-9ad1-3a39be7ddd84" (UID: "bbba9d22-595e-4652-9ad1-3a39be7ddd84"). InnerVolumeSpecName "kube-api-access-wntgg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:23:37 crc kubenswrapper[4838]: I1128 10:23:37.402785 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bbba9d22-595e-4652-9ad1-3a39be7ddd84-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "bbba9d22-595e-4652-9ad1-3a39be7ddd84" (UID: "bbba9d22-595e-4652-9ad1-3a39be7ddd84"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 10:23:37 crc kubenswrapper[4838]: I1128 10:23:37.485594 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wntgg\" (UniqueName: \"kubernetes.io/projected/bbba9d22-595e-4652-9ad1-3a39be7ddd84-kube-api-access-wntgg\") on node \"crc\" DevicePath \"\"" Nov 28 10:23:37 crc kubenswrapper[4838]: I1128 10:23:37.485640 4838 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bbba9d22-595e-4652-9ad1-3a39be7ddd84-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 10:23:37 crc kubenswrapper[4838]: I1128 10:23:37.485654 4838 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bbba9d22-595e-4652-9ad1-3a39be7ddd84-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 10:23:37 crc kubenswrapper[4838]: I1128 10:23:37.673935 4838 generic.go:334] "Generic (PLEG): container finished" podID="bbba9d22-595e-4652-9ad1-3a39be7ddd84" containerID="1ec3c6c00b3ede170facb1c82fb6d6a2cefeaea9a7ae2b2385561246f9b4cbfe" exitCode=0 Nov 28 10:23:37 crc kubenswrapper[4838]: I1128 10:23:37.673982 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-shpt2" event={"ID":"bbba9d22-595e-4652-9ad1-3a39be7ddd84","Type":"ContainerDied","Data":"1ec3c6c00b3ede170facb1c82fb6d6a2cefeaea9a7ae2b2385561246f9b4cbfe"} Nov 28 10:23:37 crc kubenswrapper[4838]: I1128 10:23:37.674007 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-shpt2" Nov 28 10:23:37 crc kubenswrapper[4838]: I1128 10:23:37.674026 4838 scope.go:117] "RemoveContainer" containerID="1ec3c6c00b3ede170facb1c82fb6d6a2cefeaea9a7ae2b2385561246f9b4cbfe" Nov 28 10:23:37 crc kubenswrapper[4838]: I1128 10:23:37.674013 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-shpt2" event={"ID":"bbba9d22-595e-4652-9ad1-3a39be7ddd84","Type":"ContainerDied","Data":"641a30d0aa32de1bcbe33116ddab440176a150e88098dcdb9748a50dc74f18d4"} Nov 28 10:23:37 crc kubenswrapper[4838]: I1128 10:23:37.692355 4838 scope.go:117] "RemoveContainer" containerID="d3e0a099e6c4fe9c1c4d8486e26511163df26268c102af4b6c9e127cced303d1" Nov 28 10:23:37 crc kubenswrapper[4838]: I1128 10:23:37.709682 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-shpt2"] Nov 28 10:23:37 crc kubenswrapper[4838]: I1128 10:23:37.719532 4838 scope.go:117] "RemoveContainer" containerID="7aa37d43f711c860038a25494b0bdb34f668f51ba86237c9da3ed3cd4a53bf50" Nov 28 10:23:37 crc kubenswrapper[4838]: I1128 10:23:37.720397 4838 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-shpt2"] Nov 28 10:23:37 crc kubenswrapper[4838]: I1128 10:23:37.764332 4838 scope.go:117] "RemoveContainer" containerID="1ec3c6c00b3ede170facb1c82fb6d6a2cefeaea9a7ae2b2385561246f9b4cbfe" Nov 28 10:23:37 crc kubenswrapper[4838]: E1128 10:23:37.765056 4838 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1ec3c6c00b3ede170facb1c82fb6d6a2cefeaea9a7ae2b2385561246f9b4cbfe\": container with ID starting with 1ec3c6c00b3ede170facb1c82fb6d6a2cefeaea9a7ae2b2385561246f9b4cbfe not found: ID does not exist" containerID="1ec3c6c00b3ede170facb1c82fb6d6a2cefeaea9a7ae2b2385561246f9b4cbfe" Nov 28 10:23:37 crc kubenswrapper[4838]: I1128 10:23:37.765112 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1ec3c6c00b3ede170facb1c82fb6d6a2cefeaea9a7ae2b2385561246f9b4cbfe"} err="failed to get container status \"1ec3c6c00b3ede170facb1c82fb6d6a2cefeaea9a7ae2b2385561246f9b4cbfe\": rpc error: code = NotFound desc = could not find container \"1ec3c6c00b3ede170facb1c82fb6d6a2cefeaea9a7ae2b2385561246f9b4cbfe\": container with ID starting with 1ec3c6c00b3ede170facb1c82fb6d6a2cefeaea9a7ae2b2385561246f9b4cbfe not found: ID does not exist" Nov 28 10:23:37 crc kubenswrapper[4838]: I1128 10:23:37.765145 4838 scope.go:117] "RemoveContainer" containerID="d3e0a099e6c4fe9c1c4d8486e26511163df26268c102af4b6c9e127cced303d1" Nov 28 10:23:37 crc kubenswrapper[4838]: E1128 10:23:37.765543 4838 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d3e0a099e6c4fe9c1c4d8486e26511163df26268c102af4b6c9e127cced303d1\": container with ID starting with d3e0a099e6c4fe9c1c4d8486e26511163df26268c102af4b6c9e127cced303d1 not found: ID does not exist" containerID="d3e0a099e6c4fe9c1c4d8486e26511163df26268c102af4b6c9e127cced303d1" Nov 28 10:23:37 crc kubenswrapper[4838]: I1128 10:23:37.765573 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d3e0a099e6c4fe9c1c4d8486e26511163df26268c102af4b6c9e127cced303d1"} err="failed to get container status \"d3e0a099e6c4fe9c1c4d8486e26511163df26268c102af4b6c9e127cced303d1\": rpc error: code = NotFound desc = could not find container \"d3e0a099e6c4fe9c1c4d8486e26511163df26268c102af4b6c9e127cced303d1\": container with ID starting with d3e0a099e6c4fe9c1c4d8486e26511163df26268c102af4b6c9e127cced303d1 not found: ID does not exist" Nov 28 10:23:37 crc kubenswrapper[4838]: I1128 10:23:37.765589 4838 scope.go:117] "RemoveContainer" containerID="7aa37d43f711c860038a25494b0bdb34f668f51ba86237c9da3ed3cd4a53bf50" Nov 28 10:23:37 crc kubenswrapper[4838]: E1128 10:23:37.765943 4838 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7aa37d43f711c860038a25494b0bdb34f668f51ba86237c9da3ed3cd4a53bf50\": container with ID starting with 7aa37d43f711c860038a25494b0bdb34f668f51ba86237c9da3ed3cd4a53bf50 not found: ID does not exist" containerID="7aa37d43f711c860038a25494b0bdb34f668f51ba86237c9da3ed3cd4a53bf50" Nov 28 10:23:37 crc kubenswrapper[4838]: I1128 10:23:37.766083 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7aa37d43f711c860038a25494b0bdb34f668f51ba86237c9da3ed3cd4a53bf50"} err="failed to get container status \"7aa37d43f711c860038a25494b0bdb34f668f51ba86237c9da3ed3cd4a53bf50\": rpc error: code = NotFound desc = could not find container \"7aa37d43f711c860038a25494b0bdb34f668f51ba86237c9da3ed3cd4a53bf50\": container with ID starting with 7aa37d43f711c860038a25494b0bdb34f668f51ba86237c9da3ed3cd4a53bf50 not found: ID does not exist" Nov 28 10:23:38 crc kubenswrapper[4838]: I1128 10:23:38.588254 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bbba9d22-595e-4652-9ad1-3a39be7ddd84" path="/var/lib/kubelet/pods/bbba9d22-595e-4652-9ad1-3a39be7ddd84/volumes" Nov 28 10:23:38 crc kubenswrapper[4838]: I1128 10:23:38.972690 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-lhmbn" Nov 28 10:23:38 crc kubenswrapper[4838]: I1128 10:23:38.973023 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-lhmbn" Nov 28 10:23:39 crc kubenswrapper[4838]: I1128 10:23:39.022045 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-lhmbn" Nov 28 10:23:39 crc kubenswrapper[4838]: I1128 10:23:39.735802 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-lhmbn" Nov 28 10:23:40 crc kubenswrapper[4838]: I1128 10:23:40.232346 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-lhmbn"] Nov 28 10:23:41 crc kubenswrapper[4838]: I1128 10:23:41.710596 4838 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-lhmbn" podUID="39ff7a43-9be5-4b9a-a8c1-35273042074e" containerName="registry-server" containerID="cri-o://05de02b8d1ab7d56d92ae65bf4e94e3197ae15ab3806c8ed170ec488915352df" gracePeriod=2 Nov 28 10:23:42 crc kubenswrapper[4838]: I1128 10:23:42.194080 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-lhmbn" Nov 28 10:23:42 crc kubenswrapper[4838]: I1128 10:23:42.288794 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/39ff7a43-9be5-4b9a-a8c1-35273042074e-utilities\") pod \"39ff7a43-9be5-4b9a-a8c1-35273042074e\" (UID: \"39ff7a43-9be5-4b9a-a8c1-35273042074e\") " Nov 28 10:23:42 crc kubenswrapper[4838]: I1128 10:23:42.288924 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/39ff7a43-9be5-4b9a-a8c1-35273042074e-catalog-content\") pod \"39ff7a43-9be5-4b9a-a8c1-35273042074e\" (UID: \"39ff7a43-9be5-4b9a-a8c1-35273042074e\") " Nov 28 10:23:42 crc kubenswrapper[4838]: I1128 10:23:42.288962 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-whmm4\" (UniqueName: \"kubernetes.io/projected/39ff7a43-9be5-4b9a-a8c1-35273042074e-kube-api-access-whmm4\") pod \"39ff7a43-9be5-4b9a-a8c1-35273042074e\" (UID: \"39ff7a43-9be5-4b9a-a8c1-35273042074e\") " Nov 28 10:23:42 crc kubenswrapper[4838]: I1128 10:23:42.289687 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/39ff7a43-9be5-4b9a-a8c1-35273042074e-utilities" (OuterVolumeSpecName: "utilities") pod "39ff7a43-9be5-4b9a-a8c1-35273042074e" (UID: "39ff7a43-9be5-4b9a-a8c1-35273042074e"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 10:23:42 crc kubenswrapper[4838]: I1128 10:23:42.298497 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/39ff7a43-9be5-4b9a-a8c1-35273042074e-kube-api-access-whmm4" (OuterVolumeSpecName: "kube-api-access-whmm4") pod "39ff7a43-9be5-4b9a-a8c1-35273042074e" (UID: "39ff7a43-9be5-4b9a-a8c1-35273042074e"). InnerVolumeSpecName "kube-api-access-whmm4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:23:42 crc kubenswrapper[4838]: I1128 10:23:42.360803 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/39ff7a43-9be5-4b9a-a8c1-35273042074e-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "39ff7a43-9be5-4b9a-a8c1-35273042074e" (UID: "39ff7a43-9be5-4b9a-a8c1-35273042074e"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 10:23:42 crc kubenswrapper[4838]: I1128 10:23:42.390791 4838 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/39ff7a43-9be5-4b9a-a8c1-35273042074e-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 10:23:42 crc kubenswrapper[4838]: I1128 10:23:42.390818 4838 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/39ff7a43-9be5-4b9a-a8c1-35273042074e-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 10:23:42 crc kubenswrapper[4838]: I1128 10:23:42.390827 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-whmm4\" (UniqueName: \"kubernetes.io/projected/39ff7a43-9be5-4b9a-a8c1-35273042074e-kube-api-access-whmm4\") on node \"crc\" DevicePath \"\"" Nov 28 10:23:42 crc kubenswrapper[4838]: I1128 10:23:42.728668 4838 generic.go:334] "Generic (PLEG): container finished" podID="39ff7a43-9be5-4b9a-a8c1-35273042074e" containerID="05de02b8d1ab7d56d92ae65bf4e94e3197ae15ab3806c8ed170ec488915352df" exitCode=0 Nov 28 10:23:42 crc kubenswrapper[4838]: I1128 10:23:42.728742 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-lhmbn" event={"ID":"39ff7a43-9be5-4b9a-a8c1-35273042074e","Type":"ContainerDied","Data":"05de02b8d1ab7d56d92ae65bf4e94e3197ae15ab3806c8ed170ec488915352df"} Nov 28 10:23:42 crc kubenswrapper[4838]: I1128 10:23:42.728775 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-lhmbn" event={"ID":"39ff7a43-9be5-4b9a-a8c1-35273042074e","Type":"ContainerDied","Data":"fe6e5d7a1644c7dfbc9f924cf77255a3839aaa0378e39700f15c0311709f153d"} Nov 28 10:23:42 crc kubenswrapper[4838]: I1128 10:23:42.728798 4838 scope.go:117] "RemoveContainer" containerID="05de02b8d1ab7d56d92ae65bf4e94e3197ae15ab3806c8ed170ec488915352df" Nov 28 10:23:42 crc kubenswrapper[4838]: I1128 10:23:42.728923 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-lhmbn" Nov 28 10:23:42 crc kubenswrapper[4838]: I1128 10:23:42.774679 4838 scope.go:117] "RemoveContainer" containerID="482a8b22cd99fc91f9ab736de82a6893a2c9584d4f8dc1658e1ee814dae0dd94" Nov 28 10:23:42 crc kubenswrapper[4838]: I1128 10:23:42.780476 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-lhmbn"] Nov 28 10:23:42 crc kubenswrapper[4838]: I1128 10:23:42.810142 4838 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-lhmbn"] Nov 28 10:23:42 crc kubenswrapper[4838]: I1128 10:23:42.820305 4838 scope.go:117] "RemoveContainer" containerID="375af4270e882c04caabc51188297bca24b88be09aa6984e46a42cf3cee09b93" Nov 28 10:23:42 crc kubenswrapper[4838]: I1128 10:23:42.852640 4838 scope.go:117] "RemoveContainer" containerID="05de02b8d1ab7d56d92ae65bf4e94e3197ae15ab3806c8ed170ec488915352df" Nov 28 10:23:42 crc kubenswrapper[4838]: E1128 10:23:42.853105 4838 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"05de02b8d1ab7d56d92ae65bf4e94e3197ae15ab3806c8ed170ec488915352df\": container with ID starting with 05de02b8d1ab7d56d92ae65bf4e94e3197ae15ab3806c8ed170ec488915352df not found: ID does not exist" containerID="05de02b8d1ab7d56d92ae65bf4e94e3197ae15ab3806c8ed170ec488915352df" Nov 28 10:23:42 crc kubenswrapper[4838]: I1128 10:23:42.853146 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"05de02b8d1ab7d56d92ae65bf4e94e3197ae15ab3806c8ed170ec488915352df"} err="failed to get container status \"05de02b8d1ab7d56d92ae65bf4e94e3197ae15ab3806c8ed170ec488915352df\": rpc error: code = NotFound desc = could not find container \"05de02b8d1ab7d56d92ae65bf4e94e3197ae15ab3806c8ed170ec488915352df\": container with ID starting with 05de02b8d1ab7d56d92ae65bf4e94e3197ae15ab3806c8ed170ec488915352df not found: ID does not exist" Nov 28 10:23:42 crc kubenswrapper[4838]: I1128 10:23:42.853171 4838 scope.go:117] "RemoveContainer" containerID="482a8b22cd99fc91f9ab736de82a6893a2c9584d4f8dc1658e1ee814dae0dd94" Nov 28 10:23:42 crc kubenswrapper[4838]: E1128 10:23:42.853538 4838 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"482a8b22cd99fc91f9ab736de82a6893a2c9584d4f8dc1658e1ee814dae0dd94\": container with ID starting with 482a8b22cd99fc91f9ab736de82a6893a2c9584d4f8dc1658e1ee814dae0dd94 not found: ID does not exist" containerID="482a8b22cd99fc91f9ab736de82a6893a2c9584d4f8dc1658e1ee814dae0dd94" Nov 28 10:23:42 crc kubenswrapper[4838]: I1128 10:23:42.853591 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"482a8b22cd99fc91f9ab736de82a6893a2c9584d4f8dc1658e1ee814dae0dd94"} err="failed to get container status \"482a8b22cd99fc91f9ab736de82a6893a2c9584d4f8dc1658e1ee814dae0dd94\": rpc error: code = NotFound desc = could not find container \"482a8b22cd99fc91f9ab736de82a6893a2c9584d4f8dc1658e1ee814dae0dd94\": container with ID starting with 482a8b22cd99fc91f9ab736de82a6893a2c9584d4f8dc1658e1ee814dae0dd94 not found: ID does not exist" Nov 28 10:23:42 crc kubenswrapper[4838]: I1128 10:23:42.853610 4838 scope.go:117] "RemoveContainer" containerID="375af4270e882c04caabc51188297bca24b88be09aa6984e46a42cf3cee09b93" Nov 28 10:23:42 crc kubenswrapper[4838]: E1128 10:23:42.853989 4838 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"375af4270e882c04caabc51188297bca24b88be09aa6984e46a42cf3cee09b93\": container with ID starting with 375af4270e882c04caabc51188297bca24b88be09aa6984e46a42cf3cee09b93 not found: ID does not exist" containerID="375af4270e882c04caabc51188297bca24b88be09aa6984e46a42cf3cee09b93" Nov 28 10:23:42 crc kubenswrapper[4838]: I1128 10:23:42.854023 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"375af4270e882c04caabc51188297bca24b88be09aa6984e46a42cf3cee09b93"} err="failed to get container status \"375af4270e882c04caabc51188297bca24b88be09aa6984e46a42cf3cee09b93\": rpc error: code = NotFound desc = could not find container \"375af4270e882c04caabc51188297bca24b88be09aa6984e46a42cf3cee09b93\": container with ID starting with 375af4270e882c04caabc51188297bca24b88be09aa6984e46a42cf3cee09b93 not found: ID does not exist" Nov 28 10:23:44 crc kubenswrapper[4838]: I1128 10:23:44.575877 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="39ff7a43-9be5-4b9a-a8c1-35273042074e" path="/var/lib/kubelet/pods/39ff7a43-9be5-4b9a-a8c1-35273042074e/volumes" Nov 28 10:24:31 crc kubenswrapper[4838]: I1128 10:24:31.106041 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-8qzh9"] Nov 28 10:24:31 crc kubenswrapper[4838]: E1128 10:24:31.107364 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="39ff7a43-9be5-4b9a-a8c1-35273042074e" containerName="extract-utilities" Nov 28 10:24:31 crc kubenswrapper[4838]: I1128 10:24:31.107389 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="39ff7a43-9be5-4b9a-a8c1-35273042074e" containerName="extract-utilities" Nov 28 10:24:31 crc kubenswrapper[4838]: E1128 10:24:31.107410 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="39ff7a43-9be5-4b9a-a8c1-35273042074e" containerName="registry-server" Nov 28 10:24:31 crc kubenswrapper[4838]: I1128 10:24:31.107423 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="39ff7a43-9be5-4b9a-a8c1-35273042074e" containerName="registry-server" Nov 28 10:24:31 crc kubenswrapper[4838]: E1128 10:24:31.107442 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bbba9d22-595e-4652-9ad1-3a39be7ddd84" containerName="registry-server" Nov 28 10:24:31 crc kubenswrapper[4838]: I1128 10:24:31.107453 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="bbba9d22-595e-4652-9ad1-3a39be7ddd84" containerName="registry-server" Nov 28 10:24:31 crc kubenswrapper[4838]: E1128 10:24:31.107476 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bbba9d22-595e-4652-9ad1-3a39be7ddd84" containerName="extract-utilities" Nov 28 10:24:31 crc kubenswrapper[4838]: I1128 10:24:31.107487 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="bbba9d22-595e-4652-9ad1-3a39be7ddd84" containerName="extract-utilities" Nov 28 10:24:31 crc kubenswrapper[4838]: E1128 10:24:31.107509 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bbba9d22-595e-4652-9ad1-3a39be7ddd84" containerName="extract-content" Nov 28 10:24:31 crc kubenswrapper[4838]: I1128 10:24:31.107519 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="bbba9d22-595e-4652-9ad1-3a39be7ddd84" containerName="extract-content" Nov 28 10:24:31 crc kubenswrapper[4838]: E1128 10:24:31.107536 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="39ff7a43-9be5-4b9a-a8c1-35273042074e" containerName="extract-content" Nov 28 10:24:31 crc kubenswrapper[4838]: I1128 10:24:31.107546 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="39ff7a43-9be5-4b9a-a8c1-35273042074e" containerName="extract-content" Nov 28 10:24:31 crc kubenswrapper[4838]: I1128 10:24:31.107856 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="bbba9d22-595e-4652-9ad1-3a39be7ddd84" containerName="registry-server" Nov 28 10:24:31 crc kubenswrapper[4838]: I1128 10:24:31.107904 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="39ff7a43-9be5-4b9a-a8c1-35273042074e" containerName="registry-server" Nov 28 10:24:31 crc kubenswrapper[4838]: I1128 10:24:31.110108 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-8qzh9" Nov 28 10:24:31 crc kubenswrapper[4838]: I1128 10:24:31.120585 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-8qzh9"] Nov 28 10:24:31 crc kubenswrapper[4838]: I1128 10:24:31.197217 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n68vt\" (UniqueName: \"kubernetes.io/projected/71da78fb-d6e2-460b-9b1f-54b2c6c9c33b-kube-api-access-n68vt\") pod \"certified-operators-8qzh9\" (UID: \"71da78fb-d6e2-460b-9b1f-54b2c6c9c33b\") " pod="openshift-marketplace/certified-operators-8qzh9" Nov 28 10:24:31 crc kubenswrapper[4838]: I1128 10:24:31.197309 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/71da78fb-d6e2-460b-9b1f-54b2c6c9c33b-catalog-content\") pod \"certified-operators-8qzh9\" (UID: \"71da78fb-d6e2-460b-9b1f-54b2c6c9c33b\") " pod="openshift-marketplace/certified-operators-8qzh9" Nov 28 10:24:31 crc kubenswrapper[4838]: I1128 10:24:31.197461 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/71da78fb-d6e2-460b-9b1f-54b2c6c9c33b-utilities\") pod \"certified-operators-8qzh9\" (UID: \"71da78fb-d6e2-460b-9b1f-54b2c6c9c33b\") " pod="openshift-marketplace/certified-operators-8qzh9" Nov 28 10:24:31 crc kubenswrapper[4838]: I1128 10:24:31.299298 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n68vt\" (UniqueName: \"kubernetes.io/projected/71da78fb-d6e2-460b-9b1f-54b2c6c9c33b-kube-api-access-n68vt\") pod \"certified-operators-8qzh9\" (UID: \"71da78fb-d6e2-460b-9b1f-54b2c6c9c33b\") " pod="openshift-marketplace/certified-operators-8qzh9" Nov 28 10:24:31 crc kubenswrapper[4838]: I1128 10:24:31.299383 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/71da78fb-d6e2-460b-9b1f-54b2c6c9c33b-catalog-content\") pod \"certified-operators-8qzh9\" (UID: \"71da78fb-d6e2-460b-9b1f-54b2c6c9c33b\") " pod="openshift-marketplace/certified-operators-8qzh9" Nov 28 10:24:31 crc kubenswrapper[4838]: I1128 10:24:31.299477 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/71da78fb-d6e2-460b-9b1f-54b2c6c9c33b-utilities\") pod \"certified-operators-8qzh9\" (UID: \"71da78fb-d6e2-460b-9b1f-54b2c6c9c33b\") " pod="openshift-marketplace/certified-operators-8qzh9" Nov 28 10:24:31 crc kubenswrapper[4838]: I1128 10:24:31.300150 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/71da78fb-d6e2-460b-9b1f-54b2c6c9c33b-utilities\") pod \"certified-operators-8qzh9\" (UID: \"71da78fb-d6e2-460b-9b1f-54b2c6c9c33b\") " pod="openshift-marketplace/certified-operators-8qzh9" Nov 28 10:24:31 crc kubenswrapper[4838]: I1128 10:24:31.300202 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/71da78fb-d6e2-460b-9b1f-54b2c6c9c33b-catalog-content\") pod \"certified-operators-8qzh9\" (UID: \"71da78fb-d6e2-460b-9b1f-54b2c6c9c33b\") " pod="openshift-marketplace/certified-operators-8qzh9" Nov 28 10:24:31 crc kubenswrapper[4838]: I1128 10:24:31.335709 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n68vt\" (UniqueName: \"kubernetes.io/projected/71da78fb-d6e2-460b-9b1f-54b2c6c9c33b-kube-api-access-n68vt\") pod \"certified-operators-8qzh9\" (UID: \"71da78fb-d6e2-460b-9b1f-54b2c6c9c33b\") " pod="openshift-marketplace/certified-operators-8qzh9" Nov 28 10:24:31 crc kubenswrapper[4838]: I1128 10:24:31.446238 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-8qzh9" Nov 28 10:24:31 crc kubenswrapper[4838]: I1128 10:24:31.944033 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-8qzh9"] Nov 28 10:24:32 crc kubenswrapper[4838]: I1128 10:24:32.327501 4838 generic.go:334] "Generic (PLEG): container finished" podID="71da78fb-d6e2-460b-9b1f-54b2c6c9c33b" containerID="04f2954c3a931aa728b3c914f1a50d73aaf37c8ffc0b7ae04d32cf818b601bd8" exitCode=0 Nov 28 10:24:32 crc kubenswrapper[4838]: I1128 10:24:32.327542 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-8qzh9" event={"ID":"71da78fb-d6e2-460b-9b1f-54b2c6c9c33b","Type":"ContainerDied","Data":"04f2954c3a931aa728b3c914f1a50d73aaf37c8ffc0b7ae04d32cf818b601bd8"} Nov 28 10:24:32 crc kubenswrapper[4838]: I1128 10:24:32.327565 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-8qzh9" event={"ID":"71da78fb-d6e2-460b-9b1f-54b2c6c9c33b","Type":"ContainerStarted","Data":"6849d7be49ab621d8850d7c5f18add85573915836c5183245fa27524be5fe560"} Nov 28 10:24:34 crc kubenswrapper[4838]: I1128 10:24:34.349883 4838 generic.go:334] "Generic (PLEG): container finished" podID="4c8f2bc3-9c2d-43c2-99bb-3d7cb73233e0" containerID="d17fdebf7ddf7bf9d073a969b6fa6c85acd4d54a509d6cb4c67f1dad2fec6a0a" exitCode=0 Nov 28 10:24:34 crc kubenswrapper[4838]: I1128 10:24:34.349949 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-mf8cg" event={"ID":"4c8f2bc3-9c2d-43c2-99bb-3d7cb73233e0","Type":"ContainerDied","Data":"d17fdebf7ddf7bf9d073a969b6fa6c85acd4d54a509d6cb4c67f1dad2fec6a0a"} Nov 28 10:24:34 crc kubenswrapper[4838]: I1128 10:24:34.352762 4838 generic.go:334] "Generic (PLEG): container finished" podID="71da78fb-d6e2-460b-9b1f-54b2c6c9c33b" containerID="70b74241048310c2760b716793739ac75306194738f917ca35aa39dda6ed4098" exitCode=0 Nov 28 10:24:34 crc kubenswrapper[4838]: I1128 10:24:34.352801 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-8qzh9" event={"ID":"71da78fb-d6e2-460b-9b1f-54b2c6c9c33b","Type":"ContainerDied","Data":"70b74241048310c2760b716793739ac75306194738f917ca35aa39dda6ed4098"} Nov 28 10:24:35 crc kubenswrapper[4838]: I1128 10:24:35.373198 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-8qzh9" event={"ID":"71da78fb-d6e2-460b-9b1f-54b2c6c9c33b","Type":"ContainerStarted","Data":"e286faddb64ff53d76c13d35787965c4ddc0b66ff51c43693c41462310fee56b"} Nov 28 10:24:35 crc kubenswrapper[4838]: I1128 10:24:35.406915 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-8qzh9" podStartSLOduration=1.895722409 podStartE2EDuration="4.406885997s" podCreationTimestamp="2025-11-28 10:24:31 +0000 UTC" firstStartedPulling="2025-11-28 10:24:32.329120053 +0000 UTC m=+1644.028094223" lastFinishedPulling="2025-11-28 10:24:34.840283641 +0000 UTC m=+1646.539257811" observedRunningTime="2025-11-28 10:24:35.397296409 +0000 UTC m=+1647.096270589" watchObservedRunningTime="2025-11-28 10:24:35.406885997 +0000 UTC m=+1647.105860177" Nov 28 10:24:35 crc kubenswrapper[4838]: I1128 10:24:35.844393 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-mf8cg" Nov 28 10:24:35 crc kubenswrapper[4838]: I1128 10:24:35.888414 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4c8f2bc3-9c2d-43c2-99bb-3d7cb73233e0-bootstrap-combined-ca-bundle\") pod \"4c8f2bc3-9c2d-43c2-99bb-3d7cb73233e0\" (UID: \"4c8f2bc3-9c2d-43c2-99bb-3d7cb73233e0\") " Nov 28 10:24:35 crc kubenswrapper[4838]: I1128 10:24:35.888516 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xw6g5\" (UniqueName: \"kubernetes.io/projected/4c8f2bc3-9c2d-43c2-99bb-3d7cb73233e0-kube-api-access-xw6g5\") pod \"4c8f2bc3-9c2d-43c2-99bb-3d7cb73233e0\" (UID: \"4c8f2bc3-9c2d-43c2-99bb-3d7cb73233e0\") " Nov 28 10:24:35 crc kubenswrapper[4838]: I1128 10:24:35.888538 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4c8f2bc3-9c2d-43c2-99bb-3d7cb73233e0-inventory\") pod \"4c8f2bc3-9c2d-43c2-99bb-3d7cb73233e0\" (UID: \"4c8f2bc3-9c2d-43c2-99bb-3d7cb73233e0\") " Nov 28 10:24:35 crc kubenswrapper[4838]: I1128 10:24:35.888647 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/4c8f2bc3-9c2d-43c2-99bb-3d7cb73233e0-ssh-key\") pod \"4c8f2bc3-9c2d-43c2-99bb-3d7cb73233e0\" (UID: \"4c8f2bc3-9c2d-43c2-99bb-3d7cb73233e0\") " Nov 28 10:24:35 crc kubenswrapper[4838]: I1128 10:24:35.894438 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4c8f2bc3-9c2d-43c2-99bb-3d7cb73233e0-kube-api-access-xw6g5" (OuterVolumeSpecName: "kube-api-access-xw6g5") pod "4c8f2bc3-9c2d-43c2-99bb-3d7cb73233e0" (UID: "4c8f2bc3-9c2d-43c2-99bb-3d7cb73233e0"). InnerVolumeSpecName "kube-api-access-xw6g5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:24:35 crc kubenswrapper[4838]: I1128 10:24:35.896501 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4c8f2bc3-9c2d-43c2-99bb-3d7cb73233e0-bootstrap-combined-ca-bundle" (OuterVolumeSpecName: "bootstrap-combined-ca-bundle") pod "4c8f2bc3-9c2d-43c2-99bb-3d7cb73233e0" (UID: "4c8f2bc3-9c2d-43c2-99bb-3d7cb73233e0"). InnerVolumeSpecName "bootstrap-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:24:35 crc kubenswrapper[4838]: I1128 10:24:35.923597 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4c8f2bc3-9c2d-43c2-99bb-3d7cb73233e0-inventory" (OuterVolumeSpecName: "inventory") pod "4c8f2bc3-9c2d-43c2-99bb-3d7cb73233e0" (UID: "4c8f2bc3-9c2d-43c2-99bb-3d7cb73233e0"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:24:35 crc kubenswrapper[4838]: I1128 10:24:35.925051 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4c8f2bc3-9c2d-43c2-99bb-3d7cb73233e0-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "4c8f2bc3-9c2d-43c2-99bb-3d7cb73233e0" (UID: "4c8f2bc3-9c2d-43c2-99bb-3d7cb73233e0"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:24:35 crc kubenswrapper[4838]: I1128 10:24:35.990778 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xw6g5\" (UniqueName: \"kubernetes.io/projected/4c8f2bc3-9c2d-43c2-99bb-3d7cb73233e0-kube-api-access-xw6g5\") on node \"crc\" DevicePath \"\"" Nov 28 10:24:35 crc kubenswrapper[4838]: I1128 10:24:35.990835 4838 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4c8f2bc3-9c2d-43c2-99bb-3d7cb73233e0-inventory\") on node \"crc\" DevicePath \"\"" Nov 28 10:24:35 crc kubenswrapper[4838]: I1128 10:24:35.990852 4838 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/4c8f2bc3-9c2d-43c2-99bb-3d7cb73233e0-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 28 10:24:35 crc kubenswrapper[4838]: I1128 10:24:35.990874 4838 reconciler_common.go:293] "Volume detached for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4c8f2bc3-9c2d-43c2-99bb-3d7cb73233e0-bootstrap-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 10:24:36 crc kubenswrapper[4838]: I1128 10:24:36.381766 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-mf8cg" event={"ID":"4c8f2bc3-9c2d-43c2-99bb-3d7cb73233e0","Type":"ContainerDied","Data":"6f055b39bba8d82c73b609389d00348b30a9b5ee369bf5d10c14af8f1bd67aca"} Nov 28 10:24:36 crc kubenswrapper[4838]: I1128 10:24:36.381826 4838 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6f055b39bba8d82c73b609389d00348b30a9b5ee369bf5d10c14af8f1bd67aca" Nov 28 10:24:36 crc kubenswrapper[4838]: I1128 10:24:36.381782 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-mf8cg" Nov 28 10:24:36 crc kubenswrapper[4838]: I1128 10:24:36.471265 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-58bxs"] Nov 28 10:24:36 crc kubenswrapper[4838]: E1128 10:24:36.471585 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4c8f2bc3-9c2d-43c2-99bb-3d7cb73233e0" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Nov 28 10:24:36 crc kubenswrapper[4838]: I1128 10:24:36.471601 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="4c8f2bc3-9c2d-43c2-99bb-3d7cb73233e0" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Nov 28 10:24:36 crc kubenswrapper[4838]: I1128 10:24:36.471791 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="4c8f2bc3-9c2d-43c2-99bb-3d7cb73233e0" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Nov 28 10:24:36 crc kubenswrapper[4838]: I1128 10:24:36.472357 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-58bxs" Nov 28 10:24:36 crc kubenswrapper[4838]: I1128 10:24:36.474260 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 28 10:24:36 crc kubenswrapper[4838]: I1128 10:24:36.474479 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 28 10:24:36 crc kubenswrapper[4838]: I1128 10:24:36.474672 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 28 10:24:36 crc kubenswrapper[4838]: I1128 10:24:36.474845 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-t6dwn" Nov 28 10:24:36 crc kubenswrapper[4838]: I1128 10:24:36.486226 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-58bxs"] Nov 28 10:24:36 crc kubenswrapper[4838]: I1128 10:24:36.624424 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v6nxc\" (UniqueName: \"kubernetes.io/projected/8dfef46a-ee03-4662-b64d-c906a0c8759e-kube-api-access-v6nxc\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-58bxs\" (UID: \"8dfef46a-ee03-4662-b64d-c906a0c8759e\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-58bxs" Nov 28 10:24:36 crc kubenswrapper[4838]: I1128 10:24:36.624592 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/8dfef46a-ee03-4662-b64d-c906a0c8759e-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-58bxs\" (UID: \"8dfef46a-ee03-4662-b64d-c906a0c8759e\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-58bxs" Nov 28 10:24:36 crc kubenswrapper[4838]: I1128 10:24:36.624958 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/8dfef46a-ee03-4662-b64d-c906a0c8759e-ssh-key\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-58bxs\" (UID: \"8dfef46a-ee03-4662-b64d-c906a0c8759e\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-58bxs" Nov 28 10:24:36 crc kubenswrapper[4838]: I1128 10:24:36.726279 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/8dfef46a-ee03-4662-b64d-c906a0c8759e-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-58bxs\" (UID: \"8dfef46a-ee03-4662-b64d-c906a0c8759e\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-58bxs" Nov 28 10:24:36 crc kubenswrapper[4838]: I1128 10:24:36.726401 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/8dfef46a-ee03-4662-b64d-c906a0c8759e-ssh-key\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-58bxs\" (UID: \"8dfef46a-ee03-4662-b64d-c906a0c8759e\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-58bxs" Nov 28 10:24:36 crc kubenswrapper[4838]: I1128 10:24:36.726468 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v6nxc\" (UniqueName: \"kubernetes.io/projected/8dfef46a-ee03-4662-b64d-c906a0c8759e-kube-api-access-v6nxc\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-58bxs\" (UID: \"8dfef46a-ee03-4662-b64d-c906a0c8759e\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-58bxs" Nov 28 10:24:36 crc kubenswrapper[4838]: I1128 10:24:36.729929 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/8dfef46a-ee03-4662-b64d-c906a0c8759e-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-58bxs\" (UID: \"8dfef46a-ee03-4662-b64d-c906a0c8759e\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-58bxs" Nov 28 10:24:36 crc kubenswrapper[4838]: I1128 10:24:36.730564 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/8dfef46a-ee03-4662-b64d-c906a0c8759e-ssh-key\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-58bxs\" (UID: \"8dfef46a-ee03-4662-b64d-c906a0c8759e\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-58bxs" Nov 28 10:24:36 crc kubenswrapper[4838]: I1128 10:24:36.744490 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v6nxc\" (UniqueName: \"kubernetes.io/projected/8dfef46a-ee03-4662-b64d-c906a0c8759e-kube-api-access-v6nxc\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-58bxs\" (UID: \"8dfef46a-ee03-4662-b64d-c906a0c8759e\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-58bxs" Nov 28 10:24:36 crc kubenswrapper[4838]: I1128 10:24:36.837541 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-58bxs" Nov 28 10:24:37 crc kubenswrapper[4838]: I1128 10:24:37.447214 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-58bxs"] Nov 28 10:24:38 crc kubenswrapper[4838]: I1128 10:24:38.406148 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-58bxs" event={"ID":"8dfef46a-ee03-4662-b64d-c906a0c8759e","Type":"ContainerStarted","Data":"5b2baf7ce903c5ccbd42e05cc2c897c16f19a24844e1be9946e346db898df003"} Nov 28 10:24:38 crc kubenswrapper[4838]: I1128 10:24:38.406496 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-58bxs" event={"ID":"8dfef46a-ee03-4662-b64d-c906a0c8759e","Type":"ContainerStarted","Data":"d0c2dcc08e10c992f17bb67337da4a6cf4bc63aa097911dfb800a00d0972c633"} Nov 28 10:24:38 crc kubenswrapper[4838]: I1128 10:24:38.424338 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-58bxs" podStartSLOduration=1.732683529 podStartE2EDuration="2.424313848s" podCreationTimestamp="2025-11-28 10:24:36 +0000 UTC" firstStartedPulling="2025-11-28 10:24:37.450007923 +0000 UTC m=+1649.148982103" lastFinishedPulling="2025-11-28 10:24:38.141638242 +0000 UTC m=+1649.840612422" observedRunningTime="2025-11-28 10:24:38.421717168 +0000 UTC m=+1650.120691338" watchObservedRunningTime="2025-11-28 10:24:38.424313848 +0000 UTC m=+1650.123288048" Nov 28 10:24:41 crc kubenswrapper[4838]: I1128 10:24:41.446774 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-8qzh9" Nov 28 10:24:41 crc kubenswrapper[4838]: I1128 10:24:41.447378 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-8qzh9" Nov 28 10:24:41 crc kubenswrapper[4838]: I1128 10:24:41.556194 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-8qzh9" Nov 28 10:24:42 crc kubenswrapper[4838]: I1128 10:24:42.526831 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-8qzh9" Nov 28 10:24:42 crc kubenswrapper[4838]: I1128 10:24:42.586311 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-8qzh9"] Nov 28 10:24:44 crc kubenswrapper[4838]: I1128 10:24:44.470236 4838 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-8qzh9" podUID="71da78fb-d6e2-460b-9b1f-54b2c6c9c33b" containerName="registry-server" containerID="cri-o://e286faddb64ff53d76c13d35787965c4ddc0b66ff51c43693c41462310fee56b" gracePeriod=2 Nov 28 10:24:45 crc kubenswrapper[4838]: I1128 10:24:45.482542 4838 generic.go:334] "Generic (PLEG): container finished" podID="71da78fb-d6e2-460b-9b1f-54b2c6c9c33b" containerID="e286faddb64ff53d76c13d35787965c4ddc0b66ff51c43693c41462310fee56b" exitCode=0 Nov 28 10:24:45 crc kubenswrapper[4838]: I1128 10:24:45.482637 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-8qzh9" event={"ID":"71da78fb-d6e2-460b-9b1f-54b2c6c9c33b","Type":"ContainerDied","Data":"e286faddb64ff53d76c13d35787965c4ddc0b66ff51c43693c41462310fee56b"} Nov 28 10:24:45 crc kubenswrapper[4838]: I1128 10:24:45.483757 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-8qzh9" event={"ID":"71da78fb-d6e2-460b-9b1f-54b2c6c9c33b","Type":"ContainerDied","Data":"6849d7be49ab621d8850d7c5f18add85573915836c5183245fa27524be5fe560"} Nov 28 10:24:45 crc kubenswrapper[4838]: I1128 10:24:45.483893 4838 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6849d7be49ab621d8850d7c5f18add85573915836c5183245fa27524be5fe560" Nov 28 10:24:45 crc kubenswrapper[4838]: I1128 10:24:45.499115 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-8qzh9" Nov 28 10:24:45 crc kubenswrapper[4838]: I1128 10:24:45.620760 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-n68vt\" (UniqueName: \"kubernetes.io/projected/71da78fb-d6e2-460b-9b1f-54b2c6c9c33b-kube-api-access-n68vt\") pod \"71da78fb-d6e2-460b-9b1f-54b2c6c9c33b\" (UID: \"71da78fb-d6e2-460b-9b1f-54b2c6c9c33b\") " Nov 28 10:24:45 crc kubenswrapper[4838]: I1128 10:24:45.620829 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/71da78fb-d6e2-460b-9b1f-54b2c6c9c33b-catalog-content\") pod \"71da78fb-d6e2-460b-9b1f-54b2c6c9c33b\" (UID: \"71da78fb-d6e2-460b-9b1f-54b2c6c9c33b\") " Nov 28 10:24:45 crc kubenswrapper[4838]: I1128 10:24:45.620875 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/71da78fb-d6e2-460b-9b1f-54b2c6c9c33b-utilities\") pod \"71da78fb-d6e2-460b-9b1f-54b2c6c9c33b\" (UID: \"71da78fb-d6e2-460b-9b1f-54b2c6c9c33b\") " Nov 28 10:24:45 crc kubenswrapper[4838]: I1128 10:24:45.622041 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/71da78fb-d6e2-460b-9b1f-54b2c6c9c33b-utilities" (OuterVolumeSpecName: "utilities") pod "71da78fb-d6e2-460b-9b1f-54b2c6c9c33b" (UID: "71da78fb-d6e2-460b-9b1f-54b2c6c9c33b"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 10:24:45 crc kubenswrapper[4838]: I1128 10:24:45.628757 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/71da78fb-d6e2-460b-9b1f-54b2c6c9c33b-kube-api-access-n68vt" (OuterVolumeSpecName: "kube-api-access-n68vt") pod "71da78fb-d6e2-460b-9b1f-54b2c6c9c33b" (UID: "71da78fb-d6e2-460b-9b1f-54b2c6c9c33b"). InnerVolumeSpecName "kube-api-access-n68vt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:24:45 crc kubenswrapper[4838]: I1128 10:24:45.662898 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/71da78fb-d6e2-460b-9b1f-54b2c6c9c33b-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "71da78fb-d6e2-460b-9b1f-54b2c6c9c33b" (UID: "71da78fb-d6e2-460b-9b1f-54b2c6c9c33b"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 10:24:45 crc kubenswrapper[4838]: I1128 10:24:45.724298 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-n68vt\" (UniqueName: \"kubernetes.io/projected/71da78fb-d6e2-460b-9b1f-54b2c6c9c33b-kube-api-access-n68vt\") on node \"crc\" DevicePath \"\"" Nov 28 10:24:45 crc kubenswrapper[4838]: I1128 10:24:45.724339 4838 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/71da78fb-d6e2-460b-9b1f-54b2c6c9c33b-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 10:24:45 crc kubenswrapper[4838]: I1128 10:24:45.724352 4838 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/71da78fb-d6e2-460b-9b1f-54b2c6c9c33b-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 10:24:46 crc kubenswrapper[4838]: I1128 10:24:46.504373 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-8qzh9" Nov 28 10:24:46 crc kubenswrapper[4838]: I1128 10:24:46.590331 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-8qzh9"] Nov 28 10:24:46 crc kubenswrapper[4838]: I1128 10:24:46.599227 4838 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-8qzh9"] Nov 28 10:24:48 crc kubenswrapper[4838]: I1128 10:24:48.576111 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="71da78fb-d6e2-460b-9b1f-54b2c6c9c33b" path="/var/lib/kubelet/pods/71da78fb-d6e2-460b-9b1f-54b2c6c9c33b/volumes" Nov 28 10:24:53 crc kubenswrapper[4838]: I1128 10:24:53.939926 4838 patch_prober.go:28] interesting pod/machine-config-daemon-5dxdd container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 10:24:53 crc kubenswrapper[4838]: I1128 10:24:53.940582 4838 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 10:25:23 crc kubenswrapper[4838]: I1128 10:25:23.940280 4838 patch_prober.go:28] interesting pod/machine-config-daemon-5dxdd container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 10:25:23 crc kubenswrapper[4838]: I1128 10:25:23.940947 4838 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 10:25:36 crc kubenswrapper[4838]: I1128 10:25:36.068869 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-4158-account-create-update-qzc2x"] Nov 28 10:25:36 crc kubenswrapper[4838]: I1128 10:25:36.086856 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-create-bhznq"] Nov 28 10:25:36 crc kubenswrapper[4838]: I1128 10:25:36.097666 4838 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-4158-account-create-update-qzc2x"] Nov 28 10:25:36 crc kubenswrapper[4838]: I1128 10:25:36.112657 4838 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-create-bhznq"] Nov 28 10:25:36 crc kubenswrapper[4838]: I1128 10:25:36.577468 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1e2876f4-8898-4b65-b2e2-98eb5635ddd6" path="/var/lib/kubelet/pods/1e2876f4-8898-4b65-b2e2-98eb5635ddd6/volumes" Nov 28 10:25:36 crc kubenswrapper[4838]: I1128 10:25:36.580380 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5ed1c826-381e-40f2-bf20-470d6ceca80d" path="/var/lib/kubelet/pods/5ed1c826-381e-40f2-bf20-470d6ceca80d/volumes" Nov 28 10:25:47 crc kubenswrapper[4838]: I1128 10:25:47.067654 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-6815-account-create-update-kvdfv"] Nov 28 10:25:47 crc kubenswrapper[4838]: I1128 10:25:47.077876 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-create-g727w"] Nov 28 10:25:47 crc kubenswrapper[4838]: I1128 10:25:47.088146 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-create-ph2vv"] Nov 28 10:25:47 crc kubenswrapper[4838]: I1128 10:25:47.095119 4838 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-6815-account-create-update-kvdfv"] Nov 28 10:25:47 crc kubenswrapper[4838]: I1128 10:25:47.101476 4838 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-create-g727w"] Nov 28 10:25:47 crc kubenswrapper[4838]: I1128 10:25:47.107960 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-a5d1-account-create-update-2m49r"] Nov 28 10:25:47 crc kubenswrapper[4838]: I1128 10:25:47.114116 4838 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-create-ph2vv"] Nov 28 10:25:47 crc kubenswrapper[4838]: I1128 10:25:47.120085 4838 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-a5d1-account-create-update-2m49r"] Nov 28 10:25:48 crc kubenswrapper[4838]: I1128 10:25:48.581656 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8769e7d0-1a95-40da-b0e7-91cbc81f94e1" path="/var/lib/kubelet/pods/8769e7d0-1a95-40da-b0e7-91cbc81f94e1/volumes" Nov 28 10:25:48 crc kubenswrapper[4838]: I1128 10:25:48.583198 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8f0c6e8c-ce32-4db4-b903-d56ec1fec884" path="/var/lib/kubelet/pods/8f0c6e8c-ce32-4db4-b903-d56ec1fec884/volumes" Nov 28 10:25:48 crc kubenswrapper[4838]: I1128 10:25:48.584284 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a37aacc0-2d3e-4a20-abd2-3f0bcd87ab7e" path="/var/lib/kubelet/pods/a37aacc0-2d3e-4a20-abd2-3f0bcd87ab7e/volumes" Nov 28 10:25:48 crc kubenswrapper[4838]: I1128 10:25:48.596662 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b120d6f5-29b7-4dc8-9457-e908a8e4d6f0" path="/var/lib/kubelet/pods/b120d6f5-29b7-4dc8-9457-e908a8e4d6f0/volumes" Nov 28 10:25:53 crc kubenswrapper[4838]: I1128 10:25:53.940524 4838 patch_prober.go:28] interesting pod/machine-config-daemon-5dxdd container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 10:25:53 crc kubenswrapper[4838]: I1128 10:25:53.941321 4838 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 10:25:53 crc kubenswrapper[4838]: I1128 10:25:53.941386 4838 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" Nov 28 10:25:53 crc kubenswrapper[4838]: I1128 10:25:53.942514 4838 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"94f5e3e0d388a990bfe9cc3186f4655bc8ffbe5eaaa4558781704f164737d7ab"} pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 10:25:53 crc kubenswrapper[4838]: I1128 10:25:53.942623 4838 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" containerName="machine-config-daemon" containerID="cri-o://94f5e3e0d388a990bfe9cc3186f4655bc8ffbe5eaaa4558781704f164737d7ab" gracePeriod=600 Nov 28 10:25:54 crc kubenswrapper[4838]: E1128 10:25:54.084848 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-5dxdd_openshift-machine-config-operator(5c3daa53-8c4e-4e30-aeba-146602dd45cd)\"" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" Nov 28 10:25:54 crc kubenswrapper[4838]: I1128 10:25:54.262674 4838 generic.go:334] "Generic (PLEG): container finished" podID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" containerID="94f5e3e0d388a990bfe9cc3186f4655bc8ffbe5eaaa4558781704f164737d7ab" exitCode=0 Nov 28 10:25:54 crc kubenswrapper[4838]: I1128 10:25:54.262746 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" event={"ID":"5c3daa53-8c4e-4e30-aeba-146602dd45cd","Type":"ContainerDied","Data":"94f5e3e0d388a990bfe9cc3186f4655bc8ffbe5eaaa4558781704f164737d7ab"} Nov 28 10:25:54 crc kubenswrapper[4838]: I1128 10:25:54.262786 4838 scope.go:117] "RemoveContainer" containerID="37eb1fd382bf7ac855fc0bf19ecaa14a9b60925b9775992a84755c27a44467c5" Nov 28 10:25:54 crc kubenswrapper[4838]: I1128 10:25:54.263588 4838 scope.go:117] "RemoveContainer" containerID="94f5e3e0d388a990bfe9cc3186f4655bc8ffbe5eaaa4558781704f164737d7ab" Nov 28 10:25:54 crc kubenswrapper[4838]: E1128 10:25:54.263892 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-5dxdd_openshift-machine-config-operator(5c3daa53-8c4e-4e30-aeba-146602dd45cd)\"" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" Nov 28 10:25:55 crc kubenswrapper[4838]: I1128 10:25:55.277249 4838 generic.go:334] "Generic (PLEG): container finished" podID="8dfef46a-ee03-4662-b64d-c906a0c8759e" containerID="5b2baf7ce903c5ccbd42e05cc2c897c16f19a24844e1be9946e346db898df003" exitCode=0 Nov 28 10:25:55 crc kubenswrapper[4838]: I1128 10:25:55.277355 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-58bxs" event={"ID":"8dfef46a-ee03-4662-b64d-c906a0c8759e","Type":"ContainerDied","Data":"5b2baf7ce903c5ccbd42e05cc2c897c16f19a24844e1be9946e346db898df003"} Nov 28 10:25:56 crc kubenswrapper[4838]: I1128 10:25:56.824221 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-58bxs" Nov 28 10:25:56 crc kubenswrapper[4838]: I1128 10:25:56.909654 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/8dfef46a-ee03-4662-b64d-c906a0c8759e-ssh-key\") pod \"8dfef46a-ee03-4662-b64d-c906a0c8759e\" (UID: \"8dfef46a-ee03-4662-b64d-c906a0c8759e\") " Nov 28 10:25:56 crc kubenswrapper[4838]: I1128 10:25:56.909854 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/8dfef46a-ee03-4662-b64d-c906a0c8759e-inventory\") pod \"8dfef46a-ee03-4662-b64d-c906a0c8759e\" (UID: \"8dfef46a-ee03-4662-b64d-c906a0c8759e\") " Nov 28 10:25:56 crc kubenswrapper[4838]: I1128 10:25:56.909923 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v6nxc\" (UniqueName: \"kubernetes.io/projected/8dfef46a-ee03-4662-b64d-c906a0c8759e-kube-api-access-v6nxc\") pod \"8dfef46a-ee03-4662-b64d-c906a0c8759e\" (UID: \"8dfef46a-ee03-4662-b64d-c906a0c8759e\") " Nov 28 10:25:56 crc kubenswrapper[4838]: I1128 10:25:56.914630 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8dfef46a-ee03-4662-b64d-c906a0c8759e-kube-api-access-v6nxc" (OuterVolumeSpecName: "kube-api-access-v6nxc") pod "8dfef46a-ee03-4662-b64d-c906a0c8759e" (UID: "8dfef46a-ee03-4662-b64d-c906a0c8759e"). InnerVolumeSpecName "kube-api-access-v6nxc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:25:56 crc kubenswrapper[4838]: I1128 10:25:56.934625 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8dfef46a-ee03-4662-b64d-c906a0c8759e-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "8dfef46a-ee03-4662-b64d-c906a0c8759e" (UID: "8dfef46a-ee03-4662-b64d-c906a0c8759e"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:25:56 crc kubenswrapper[4838]: I1128 10:25:56.944307 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8dfef46a-ee03-4662-b64d-c906a0c8759e-inventory" (OuterVolumeSpecName: "inventory") pod "8dfef46a-ee03-4662-b64d-c906a0c8759e" (UID: "8dfef46a-ee03-4662-b64d-c906a0c8759e"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:25:57 crc kubenswrapper[4838]: I1128 10:25:57.015861 4838 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/8dfef46a-ee03-4662-b64d-c906a0c8759e-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 28 10:25:57 crc kubenswrapper[4838]: I1128 10:25:57.015930 4838 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/8dfef46a-ee03-4662-b64d-c906a0c8759e-inventory\") on node \"crc\" DevicePath \"\"" Nov 28 10:25:57 crc kubenswrapper[4838]: I1128 10:25:57.015946 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v6nxc\" (UniqueName: \"kubernetes.io/projected/8dfef46a-ee03-4662-b64d-c906a0c8759e-kube-api-access-v6nxc\") on node \"crc\" DevicePath \"\"" Nov 28 10:25:57 crc kubenswrapper[4838]: I1128 10:25:57.307510 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-58bxs" event={"ID":"8dfef46a-ee03-4662-b64d-c906a0c8759e","Type":"ContainerDied","Data":"d0c2dcc08e10c992f17bb67337da4a6cf4bc63aa097911dfb800a00d0972c633"} Nov 28 10:25:57 crc kubenswrapper[4838]: I1128 10:25:57.307560 4838 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d0c2dcc08e10c992f17bb67337da4a6cf4bc63aa097911dfb800a00d0972c633" Nov 28 10:25:57 crc kubenswrapper[4838]: I1128 10:25:57.307635 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-58bxs" Nov 28 10:25:57 crc kubenswrapper[4838]: I1128 10:25:57.420331 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-5skrj"] Nov 28 10:25:57 crc kubenswrapper[4838]: E1128 10:25:57.420981 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="71da78fb-d6e2-460b-9b1f-54b2c6c9c33b" containerName="extract-utilities" Nov 28 10:25:57 crc kubenswrapper[4838]: I1128 10:25:57.421010 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="71da78fb-d6e2-460b-9b1f-54b2c6c9c33b" containerName="extract-utilities" Nov 28 10:25:57 crc kubenswrapper[4838]: E1128 10:25:57.421065 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="71da78fb-d6e2-460b-9b1f-54b2c6c9c33b" containerName="registry-server" Nov 28 10:25:57 crc kubenswrapper[4838]: I1128 10:25:57.421079 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="71da78fb-d6e2-460b-9b1f-54b2c6c9c33b" containerName="registry-server" Nov 28 10:25:57 crc kubenswrapper[4838]: E1128 10:25:57.421112 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8dfef46a-ee03-4662-b64d-c906a0c8759e" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Nov 28 10:25:57 crc kubenswrapper[4838]: I1128 10:25:57.421128 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="8dfef46a-ee03-4662-b64d-c906a0c8759e" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Nov 28 10:25:57 crc kubenswrapper[4838]: E1128 10:25:57.421150 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="71da78fb-d6e2-460b-9b1f-54b2c6c9c33b" containerName="extract-content" Nov 28 10:25:57 crc kubenswrapper[4838]: I1128 10:25:57.421162 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="71da78fb-d6e2-460b-9b1f-54b2c6c9c33b" containerName="extract-content" Nov 28 10:25:57 crc kubenswrapper[4838]: I1128 10:25:57.421461 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="8dfef46a-ee03-4662-b64d-c906a0c8759e" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Nov 28 10:25:57 crc kubenswrapper[4838]: I1128 10:25:57.421502 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="71da78fb-d6e2-460b-9b1f-54b2c6c9c33b" containerName="registry-server" Nov 28 10:25:57 crc kubenswrapper[4838]: I1128 10:25:57.422486 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-5skrj" Nov 28 10:25:57 crc kubenswrapper[4838]: I1128 10:25:57.432447 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 28 10:25:57 crc kubenswrapper[4838]: I1128 10:25:57.432522 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 28 10:25:57 crc kubenswrapper[4838]: I1128 10:25:57.433146 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-t6dwn" Nov 28 10:25:57 crc kubenswrapper[4838]: I1128 10:25:57.434485 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 28 10:25:57 crc kubenswrapper[4838]: I1128 10:25:57.441665 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-5skrj"] Nov 28 10:25:57 crc kubenswrapper[4838]: I1128 10:25:57.525977 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/86cb5779-8a22-4602-8bda-e9f2fb2cb78d-ssh-key\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-5skrj\" (UID: \"86cb5779-8a22-4602-8bda-e9f2fb2cb78d\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-5skrj" Nov 28 10:25:57 crc kubenswrapper[4838]: I1128 10:25:57.526081 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/86cb5779-8a22-4602-8bda-e9f2fb2cb78d-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-5skrj\" (UID: \"86cb5779-8a22-4602-8bda-e9f2fb2cb78d\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-5skrj" Nov 28 10:25:57 crc kubenswrapper[4838]: I1128 10:25:57.526158 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vsnl2\" (UniqueName: \"kubernetes.io/projected/86cb5779-8a22-4602-8bda-e9f2fb2cb78d-kube-api-access-vsnl2\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-5skrj\" (UID: \"86cb5779-8a22-4602-8bda-e9f2fb2cb78d\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-5skrj" Nov 28 10:25:57 crc kubenswrapper[4838]: I1128 10:25:57.627984 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vsnl2\" (UniqueName: \"kubernetes.io/projected/86cb5779-8a22-4602-8bda-e9f2fb2cb78d-kube-api-access-vsnl2\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-5skrj\" (UID: \"86cb5779-8a22-4602-8bda-e9f2fb2cb78d\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-5skrj" Nov 28 10:25:57 crc kubenswrapper[4838]: I1128 10:25:57.628101 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/86cb5779-8a22-4602-8bda-e9f2fb2cb78d-ssh-key\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-5skrj\" (UID: \"86cb5779-8a22-4602-8bda-e9f2fb2cb78d\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-5skrj" Nov 28 10:25:57 crc kubenswrapper[4838]: I1128 10:25:57.628256 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/86cb5779-8a22-4602-8bda-e9f2fb2cb78d-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-5skrj\" (UID: \"86cb5779-8a22-4602-8bda-e9f2fb2cb78d\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-5skrj" Nov 28 10:25:57 crc kubenswrapper[4838]: I1128 10:25:57.632677 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/86cb5779-8a22-4602-8bda-e9f2fb2cb78d-ssh-key\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-5skrj\" (UID: \"86cb5779-8a22-4602-8bda-e9f2fb2cb78d\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-5skrj" Nov 28 10:25:57 crc kubenswrapper[4838]: I1128 10:25:57.634195 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/86cb5779-8a22-4602-8bda-e9f2fb2cb78d-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-5skrj\" (UID: \"86cb5779-8a22-4602-8bda-e9f2fb2cb78d\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-5skrj" Nov 28 10:25:57 crc kubenswrapper[4838]: I1128 10:25:57.651003 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vsnl2\" (UniqueName: \"kubernetes.io/projected/86cb5779-8a22-4602-8bda-e9f2fb2cb78d-kube-api-access-vsnl2\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-5skrj\" (UID: \"86cb5779-8a22-4602-8bda-e9f2fb2cb78d\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-5skrj" Nov 28 10:25:57 crc kubenswrapper[4838]: I1128 10:25:57.750993 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-5skrj" Nov 28 10:25:58 crc kubenswrapper[4838]: I1128 10:25:58.379213 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-5skrj"] Nov 28 10:25:58 crc kubenswrapper[4838]: I1128 10:25:58.384685 4838 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 28 10:25:59 crc kubenswrapper[4838]: I1128 10:25:59.329003 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-5skrj" event={"ID":"86cb5779-8a22-4602-8bda-e9f2fb2cb78d","Type":"ContainerStarted","Data":"2c902d090bc4e211d8794bdd4817846666a7efbafc414323168640c86902b25a"} Nov 28 10:25:59 crc kubenswrapper[4838]: I1128 10:25:59.329509 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-5skrj" event={"ID":"86cb5779-8a22-4602-8bda-e9f2fb2cb78d","Type":"ContainerStarted","Data":"05f331ecc9057f8cbbeaf22e5e36e947af139cfe87f97fc06168ba6eb5e3275b"} Nov 28 10:25:59 crc kubenswrapper[4838]: I1128 10:25:59.358324 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-5skrj" podStartSLOduration=1.877485537 podStartE2EDuration="2.358275855s" podCreationTimestamp="2025-11-28 10:25:57 +0000 UTC" firstStartedPulling="2025-11-28 10:25:58.384358037 +0000 UTC m=+1730.083332227" lastFinishedPulling="2025-11-28 10:25:58.865148375 +0000 UTC m=+1730.564122545" observedRunningTime="2025-11-28 10:25:59.35303734 +0000 UTC m=+1731.052011530" watchObservedRunningTime="2025-11-28 10:25:59.358275855 +0000 UTC m=+1731.057250035" Nov 28 10:26:04 crc kubenswrapper[4838]: I1128 10:26:04.562555 4838 scope.go:117] "RemoveContainer" containerID="94f5e3e0d388a990bfe9cc3186f4655bc8ffbe5eaaa4558781704f164737d7ab" Nov 28 10:26:04 crc kubenswrapper[4838]: E1128 10:26:04.565547 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-5dxdd_openshift-machine-config-operator(5c3daa53-8c4e-4e30-aeba-146602dd45cd)\"" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" Nov 28 10:26:05 crc kubenswrapper[4838]: I1128 10:26:05.403548 4838 generic.go:334] "Generic (PLEG): container finished" podID="86cb5779-8a22-4602-8bda-e9f2fb2cb78d" containerID="2c902d090bc4e211d8794bdd4817846666a7efbafc414323168640c86902b25a" exitCode=0 Nov 28 10:26:05 crc kubenswrapper[4838]: I1128 10:26:05.403609 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-5skrj" event={"ID":"86cb5779-8a22-4602-8bda-e9f2fb2cb78d","Type":"ContainerDied","Data":"2c902d090bc4e211d8794bdd4817846666a7efbafc414323168640c86902b25a"} Nov 28 10:26:06 crc kubenswrapper[4838]: I1128 10:26:06.917462 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-5skrj" Nov 28 10:26:07 crc kubenswrapper[4838]: I1128 10:26:07.022094 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/86cb5779-8a22-4602-8bda-e9f2fb2cb78d-inventory\") pod \"86cb5779-8a22-4602-8bda-e9f2fb2cb78d\" (UID: \"86cb5779-8a22-4602-8bda-e9f2fb2cb78d\") " Nov 28 10:26:07 crc kubenswrapper[4838]: I1128 10:26:07.022168 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/86cb5779-8a22-4602-8bda-e9f2fb2cb78d-ssh-key\") pod \"86cb5779-8a22-4602-8bda-e9f2fb2cb78d\" (UID: \"86cb5779-8a22-4602-8bda-e9f2fb2cb78d\") " Nov 28 10:26:07 crc kubenswrapper[4838]: I1128 10:26:07.022236 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vsnl2\" (UniqueName: \"kubernetes.io/projected/86cb5779-8a22-4602-8bda-e9f2fb2cb78d-kube-api-access-vsnl2\") pod \"86cb5779-8a22-4602-8bda-e9f2fb2cb78d\" (UID: \"86cb5779-8a22-4602-8bda-e9f2fb2cb78d\") " Nov 28 10:26:07 crc kubenswrapper[4838]: I1128 10:26:07.027307 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/86cb5779-8a22-4602-8bda-e9f2fb2cb78d-kube-api-access-vsnl2" (OuterVolumeSpecName: "kube-api-access-vsnl2") pod "86cb5779-8a22-4602-8bda-e9f2fb2cb78d" (UID: "86cb5779-8a22-4602-8bda-e9f2fb2cb78d"). InnerVolumeSpecName "kube-api-access-vsnl2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:26:07 crc kubenswrapper[4838]: I1128 10:26:07.053333 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/86cb5779-8a22-4602-8bda-e9f2fb2cb78d-inventory" (OuterVolumeSpecName: "inventory") pod "86cb5779-8a22-4602-8bda-e9f2fb2cb78d" (UID: "86cb5779-8a22-4602-8bda-e9f2fb2cb78d"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:26:07 crc kubenswrapper[4838]: I1128 10:26:07.057297 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/86cb5779-8a22-4602-8bda-e9f2fb2cb78d-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "86cb5779-8a22-4602-8bda-e9f2fb2cb78d" (UID: "86cb5779-8a22-4602-8bda-e9f2fb2cb78d"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:26:07 crc kubenswrapper[4838]: I1128 10:26:07.124291 4838 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/86cb5779-8a22-4602-8bda-e9f2fb2cb78d-inventory\") on node \"crc\" DevicePath \"\"" Nov 28 10:26:07 crc kubenswrapper[4838]: I1128 10:26:07.124327 4838 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/86cb5779-8a22-4602-8bda-e9f2fb2cb78d-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 28 10:26:07 crc kubenswrapper[4838]: I1128 10:26:07.124342 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vsnl2\" (UniqueName: \"kubernetes.io/projected/86cb5779-8a22-4602-8bda-e9f2fb2cb78d-kube-api-access-vsnl2\") on node \"crc\" DevicePath \"\"" Nov 28 10:26:07 crc kubenswrapper[4838]: I1128 10:26:07.433093 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-5skrj" event={"ID":"86cb5779-8a22-4602-8bda-e9f2fb2cb78d","Type":"ContainerDied","Data":"05f331ecc9057f8cbbeaf22e5e36e947af139cfe87f97fc06168ba6eb5e3275b"} Nov 28 10:26:07 crc kubenswrapper[4838]: I1128 10:26:07.433147 4838 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="05f331ecc9057f8cbbeaf22e5e36e947af139cfe87f97fc06168ba6eb5e3275b" Nov 28 10:26:07 crc kubenswrapper[4838]: I1128 10:26:07.433227 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-5skrj" Nov 28 10:26:07 crc kubenswrapper[4838]: I1128 10:26:07.507087 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-gg2f7"] Nov 28 10:26:07 crc kubenswrapper[4838]: E1128 10:26:07.510318 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="86cb5779-8a22-4602-8bda-e9f2fb2cb78d" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Nov 28 10:26:07 crc kubenswrapper[4838]: I1128 10:26:07.510491 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="86cb5779-8a22-4602-8bda-e9f2fb2cb78d" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Nov 28 10:26:07 crc kubenswrapper[4838]: I1128 10:26:07.510917 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="86cb5779-8a22-4602-8bda-e9f2fb2cb78d" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Nov 28 10:26:07 crc kubenswrapper[4838]: I1128 10:26:07.511968 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-gg2f7" Nov 28 10:26:07 crc kubenswrapper[4838]: I1128 10:26:07.518016 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 28 10:26:07 crc kubenswrapper[4838]: I1128 10:26:07.518217 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 28 10:26:07 crc kubenswrapper[4838]: I1128 10:26:07.518220 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 28 10:26:07 crc kubenswrapper[4838]: I1128 10:26:07.523946 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-t6dwn" Nov 28 10:26:07 crc kubenswrapper[4838]: I1128 10:26:07.526818 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-gg2f7"] Nov 28 10:26:07 crc kubenswrapper[4838]: I1128 10:26:07.634850 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s8w8v\" (UniqueName: \"kubernetes.io/projected/9e6f900b-6658-4957-a107-a89a5e77fefa-kube-api-access-s8w8v\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-gg2f7\" (UID: \"9e6f900b-6658-4957-a107-a89a5e77fefa\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-gg2f7" Nov 28 10:26:07 crc kubenswrapper[4838]: I1128 10:26:07.635036 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/9e6f900b-6658-4957-a107-a89a5e77fefa-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-gg2f7\" (UID: \"9e6f900b-6658-4957-a107-a89a5e77fefa\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-gg2f7" Nov 28 10:26:07 crc kubenswrapper[4838]: I1128 10:26:07.635230 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/9e6f900b-6658-4957-a107-a89a5e77fefa-ssh-key\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-gg2f7\" (UID: \"9e6f900b-6658-4957-a107-a89a5e77fefa\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-gg2f7" Nov 28 10:26:07 crc kubenswrapper[4838]: I1128 10:26:07.737291 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/9e6f900b-6658-4957-a107-a89a5e77fefa-ssh-key\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-gg2f7\" (UID: \"9e6f900b-6658-4957-a107-a89a5e77fefa\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-gg2f7" Nov 28 10:26:07 crc kubenswrapper[4838]: I1128 10:26:07.737809 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s8w8v\" (UniqueName: \"kubernetes.io/projected/9e6f900b-6658-4957-a107-a89a5e77fefa-kube-api-access-s8w8v\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-gg2f7\" (UID: \"9e6f900b-6658-4957-a107-a89a5e77fefa\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-gg2f7" Nov 28 10:26:07 crc kubenswrapper[4838]: I1128 10:26:07.738196 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/9e6f900b-6658-4957-a107-a89a5e77fefa-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-gg2f7\" (UID: \"9e6f900b-6658-4957-a107-a89a5e77fefa\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-gg2f7" Nov 28 10:26:07 crc kubenswrapper[4838]: I1128 10:26:07.747554 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/9e6f900b-6658-4957-a107-a89a5e77fefa-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-gg2f7\" (UID: \"9e6f900b-6658-4957-a107-a89a5e77fefa\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-gg2f7" Nov 28 10:26:07 crc kubenswrapper[4838]: I1128 10:26:07.747975 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/9e6f900b-6658-4957-a107-a89a5e77fefa-ssh-key\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-gg2f7\" (UID: \"9e6f900b-6658-4957-a107-a89a5e77fefa\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-gg2f7" Nov 28 10:26:07 crc kubenswrapper[4838]: I1128 10:26:07.755646 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s8w8v\" (UniqueName: \"kubernetes.io/projected/9e6f900b-6658-4957-a107-a89a5e77fefa-kube-api-access-s8w8v\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-gg2f7\" (UID: \"9e6f900b-6658-4957-a107-a89a5e77fefa\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-gg2f7" Nov 28 10:26:07 crc kubenswrapper[4838]: I1128 10:26:07.844993 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-gg2f7" Nov 28 10:26:08 crc kubenswrapper[4838]: I1128 10:26:08.242158 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-gg2f7"] Nov 28 10:26:08 crc kubenswrapper[4838]: W1128 10:26:08.243623 4838 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9e6f900b_6658_4957_a107_a89a5e77fefa.slice/crio-da1f61f9894b8dc9d37632e2aaedd813a4fa1e295b5100bd0ce93c19081300d4 WatchSource:0}: Error finding container da1f61f9894b8dc9d37632e2aaedd813a4fa1e295b5100bd0ce93c19081300d4: Status 404 returned error can't find the container with id da1f61f9894b8dc9d37632e2aaedd813a4fa1e295b5100bd0ce93c19081300d4 Nov 28 10:26:08 crc kubenswrapper[4838]: I1128 10:26:08.444330 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-gg2f7" event={"ID":"9e6f900b-6658-4957-a107-a89a5e77fefa","Type":"ContainerStarted","Data":"da1f61f9894b8dc9d37632e2aaedd813a4fa1e295b5100bd0ce93c19081300d4"} Nov 28 10:26:08 crc kubenswrapper[4838]: I1128 10:26:08.815705 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 28 10:26:09 crc kubenswrapper[4838]: I1128 10:26:09.454927 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-gg2f7" event={"ID":"9e6f900b-6658-4957-a107-a89a5e77fefa","Type":"ContainerStarted","Data":"950708e299668559966871d4c19b3b71c412706cae58d0e7f8b312cff60bb2b8"} Nov 28 10:26:09 crc kubenswrapper[4838]: I1128 10:26:09.474639 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-gg2f7" podStartSLOduration=1.908993602 podStartE2EDuration="2.474617511s" podCreationTimestamp="2025-11-28 10:26:07 +0000 UTC" firstStartedPulling="2025-11-28 10:26:08.246819866 +0000 UTC m=+1739.945794076" lastFinishedPulling="2025-11-28 10:26:08.812443805 +0000 UTC m=+1740.511417985" observedRunningTime="2025-11-28 10:26:09.46879635 +0000 UTC m=+1741.167770520" watchObservedRunningTime="2025-11-28 10:26:09.474617511 +0000 UTC m=+1741.173591681" Nov 28 10:26:12 crc kubenswrapper[4838]: I1128 10:26:12.034460 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-sync-98gmp"] Nov 28 10:26:12 crc kubenswrapper[4838]: I1128 10:26:12.048650 4838 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-sync-98gmp"] Nov 28 10:26:12 crc kubenswrapper[4838]: I1128 10:26:12.580208 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b46f0d10-b6df-4e90-aae8-b903a29d6898" path="/var/lib/kubelet/pods/b46f0d10-b6df-4e90-aae8-b903a29d6898/volumes" Nov 28 10:26:15 crc kubenswrapper[4838]: I1128 10:26:15.264163 4838 scope.go:117] "RemoveContainer" containerID="ef7f17a99d5227a45089b42792eab52941ae18c70ff92518ecb0418c605f120d" Nov 28 10:26:15 crc kubenswrapper[4838]: I1128 10:26:15.294109 4838 scope.go:117] "RemoveContainer" containerID="4542a47699cd4c04eee312be65e9216e34f3f4a139fde038b0e9a5dc4cae7f1c" Nov 28 10:26:15 crc kubenswrapper[4838]: I1128 10:26:15.376275 4838 scope.go:117] "RemoveContainer" containerID="ac8e37be0b5e03f7348a3aa2a55c18240a23b542e0614f054771d1c6ffd021b8" Nov 28 10:26:15 crc kubenswrapper[4838]: I1128 10:26:15.421744 4838 scope.go:117] "RemoveContainer" containerID="fcee5ec30121bc8f1755560539788ffa4888e2ba8580cfcf6d4dcd0acb0f6b55" Nov 28 10:26:15 crc kubenswrapper[4838]: I1128 10:26:15.461602 4838 scope.go:117] "RemoveContainer" containerID="1eab55f2714789723b2aa474258fd66c62df590e0987ecf2ef5eec85f115aba6" Nov 28 10:26:15 crc kubenswrapper[4838]: I1128 10:26:15.519181 4838 scope.go:117] "RemoveContainer" containerID="f1d4c74768a48ebc6c8fe4c28696b5b71356d149280aa1308c657b6fad5b6201" Nov 28 10:26:15 crc kubenswrapper[4838]: I1128 10:26:15.548472 4838 scope.go:117] "RemoveContainer" containerID="310c9b59a557c1ea060de7a9365d8688f8ad07acd971b29c5645ad6803fc2a14" Nov 28 10:26:18 crc kubenswrapper[4838]: I1128 10:26:18.571584 4838 scope.go:117] "RemoveContainer" containerID="94f5e3e0d388a990bfe9cc3186f4655bc8ffbe5eaaa4558781704f164737d7ab" Nov 28 10:26:18 crc kubenswrapper[4838]: E1128 10:26:18.572371 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-5dxdd_openshift-machine-config-operator(5c3daa53-8c4e-4e30-aeba-146602dd45cd)\"" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" Nov 28 10:26:24 crc kubenswrapper[4838]: I1128 10:26:24.058476 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-f31a-account-create-update-nk65p"] Nov 28 10:26:24 crc kubenswrapper[4838]: I1128 10:26:24.114850 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-8c24-account-create-update-grgdd"] Nov 28 10:26:24 crc kubenswrapper[4838]: I1128 10:26:24.124874 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-create-jbjbb"] Nov 28 10:26:24 crc kubenswrapper[4838]: I1128 10:26:24.141255 4838 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-f31a-account-create-update-nk65p"] Nov 28 10:26:24 crc kubenswrapper[4838]: I1128 10:26:24.148914 4838 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-8c24-account-create-update-grgdd"] Nov 28 10:26:24 crc kubenswrapper[4838]: I1128 10:26:24.157004 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-create-27vq9"] Nov 28 10:26:24 crc kubenswrapper[4838]: I1128 10:26:24.164412 4838 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-create-jbjbb"] Nov 28 10:26:24 crc kubenswrapper[4838]: I1128 10:26:24.171603 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-e066-account-create-update-t26s2"] Nov 28 10:26:24 crc kubenswrapper[4838]: I1128 10:26:24.179592 4838 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-create-27vq9"] Nov 28 10:26:24 crc kubenswrapper[4838]: I1128 10:26:24.187117 4838 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-e066-account-create-update-t26s2"] Nov 28 10:26:24 crc kubenswrapper[4838]: I1128 10:26:24.194596 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-create-pnr4n"] Nov 28 10:26:24 crc kubenswrapper[4838]: I1128 10:26:24.201744 4838 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-create-pnr4n"] Nov 28 10:26:24 crc kubenswrapper[4838]: I1128 10:26:24.580243 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="35d4d06e-5f03-40aa-9363-17421ac37e64" path="/var/lib/kubelet/pods/35d4d06e-5f03-40aa-9363-17421ac37e64/volumes" Nov 28 10:26:24 crc kubenswrapper[4838]: I1128 10:26:24.581665 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="43c88e97-d64f-4155-bd4a-691c588527b2" path="/var/lib/kubelet/pods/43c88e97-d64f-4155-bd4a-691c588527b2/volumes" Nov 28 10:26:24 crc kubenswrapper[4838]: I1128 10:26:24.583461 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="44f940a0-f05c-4d6a-b5d3-310bec612088" path="/var/lib/kubelet/pods/44f940a0-f05c-4d6a-b5d3-310bec612088/volumes" Nov 28 10:26:24 crc kubenswrapper[4838]: I1128 10:26:24.584769 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="47572f0c-e812-4be7-b4de-2a4a1045553f" path="/var/lib/kubelet/pods/47572f0c-e812-4be7-b4de-2a4a1045553f/volumes" Nov 28 10:26:24 crc kubenswrapper[4838]: I1128 10:26:24.587136 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8a69eff2-209f-4644-95ac-d9490f525525" path="/var/lib/kubelet/pods/8a69eff2-209f-4644-95ac-d9490f525525/volumes" Nov 28 10:26:24 crc kubenswrapper[4838]: I1128 10:26:24.588887 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="da642c1a-1fea-497a-85af-f966ce5ddaad" path="/var/lib/kubelet/pods/da642c1a-1fea-497a-85af-f966ce5ddaad/volumes" Nov 28 10:26:31 crc kubenswrapper[4838]: I1128 10:26:31.562274 4838 scope.go:117] "RemoveContainer" containerID="94f5e3e0d388a990bfe9cc3186f4655bc8ffbe5eaaa4558781704f164737d7ab" Nov 28 10:26:31 crc kubenswrapper[4838]: E1128 10:26:31.563414 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-5dxdd_openshift-machine-config-operator(5c3daa53-8c4e-4e30-aeba-146602dd45cd)\"" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" Nov 28 10:26:32 crc kubenswrapper[4838]: I1128 10:26:32.032055 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-sync-z98d5"] Nov 28 10:26:32 crc kubenswrapper[4838]: I1128 10:26:32.038527 4838 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-sync-z98d5"] Nov 28 10:26:32 crc kubenswrapper[4838]: I1128 10:26:32.576309 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4c451dac-becb-4d7e-ae6b-4ee9864113b6" path="/var/lib/kubelet/pods/4c451dac-becb-4d7e-ae6b-4ee9864113b6/volumes" Nov 28 10:26:42 crc kubenswrapper[4838]: I1128 10:26:42.562455 4838 scope.go:117] "RemoveContainer" containerID="94f5e3e0d388a990bfe9cc3186f4655bc8ffbe5eaaa4558781704f164737d7ab" Nov 28 10:26:42 crc kubenswrapper[4838]: E1128 10:26:42.563446 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-5dxdd_openshift-machine-config-operator(5c3daa53-8c4e-4e30-aeba-146602dd45cd)\"" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" Nov 28 10:26:54 crc kubenswrapper[4838]: I1128 10:26:54.952361 4838 generic.go:334] "Generic (PLEG): container finished" podID="9e6f900b-6658-4957-a107-a89a5e77fefa" containerID="950708e299668559966871d4c19b3b71c412706cae58d0e7f8b312cff60bb2b8" exitCode=0 Nov 28 10:26:54 crc kubenswrapper[4838]: I1128 10:26:54.952497 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-gg2f7" event={"ID":"9e6f900b-6658-4957-a107-a89a5e77fefa","Type":"ContainerDied","Data":"950708e299668559966871d4c19b3b71c412706cae58d0e7f8b312cff60bb2b8"} Nov 28 10:26:56 crc kubenswrapper[4838]: I1128 10:26:56.494897 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-gg2f7" Nov 28 10:26:56 crc kubenswrapper[4838]: I1128 10:26:56.561756 4838 scope.go:117] "RemoveContainer" containerID="94f5e3e0d388a990bfe9cc3186f4655bc8ffbe5eaaa4558781704f164737d7ab" Nov 28 10:26:56 crc kubenswrapper[4838]: E1128 10:26:56.562091 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-5dxdd_openshift-machine-config-operator(5c3daa53-8c4e-4e30-aeba-146602dd45cd)\"" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" Nov 28 10:26:56 crc kubenswrapper[4838]: I1128 10:26:56.635445 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/9e6f900b-6658-4957-a107-a89a5e77fefa-inventory\") pod \"9e6f900b-6658-4957-a107-a89a5e77fefa\" (UID: \"9e6f900b-6658-4957-a107-a89a5e77fefa\") " Nov 28 10:26:56 crc kubenswrapper[4838]: I1128 10:26:56.635610 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s8w8v\" (UniqueName: \"kubernetes.io/projected/9e6f900b-6658-4957-a107-a89a5e77fefa-kube-api-access-s8w8v\") pod \"9e6f900b-6658-4957-a107-a89a5e77fefa\" (UID: \"9e6f900b-6658-4957-a107-a89a5e77fefa\") " Nov 28 10:26:56 crc kubenswrapper[4838]: I1128 10:26:56.635872 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/9e6f900b-6658-4957-a107-a89a5e77fefa-ssh-key\") pod \"9e6f900b-6658-4957-a107-a89a5e77fefa\" (UID: \"9e6f900b-6658-4957-a107-a89a5e77fefa\") " Nov 28 10:26:56 crc kubenswrapper[4838]: I1128 10:26:56.642388 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9e6f900b-6658-4957-a107-a89a5e77fefa-kube-api-access-s8w8v" (OuterVolumeSpecName: "kube-api-access-s8w8v") pod "9e6f900b-6658-4957-a107-a89a5e77fefa" (UID: "9e6f900b-6658-4957-a107-a89a5e77fefa"). InnerVolumeSpecName "kube-api-access-s8w8v". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:26:56 crc kubenswrapper[4838]: I1128 10:26:56.664764 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9e6f900b-6658-4957-a107-a89a5e77fefa-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "9e6f900b-6658-4957-a107-a89a5e77fefa" (UID: "9e6f900b-6658-4957-a107-a89a5e77fefa"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:26:56 crc kubenswrapper[4838]: I1128 10:26:56.681300 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9e6f900b-6658-4957-a107-a89a5e77fefa-inventory" (OuterVolumeSpecName: "inventory") pod "9e6f900b-6658-4957-a107-a89a5e77fefa" (UID: "9e6f900b-6658-4957-a107-a89a5e77fefa"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:26:56 crc kubenswrapper[4838]: I1128 10:26:56.738348 4838 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/9e6f900b-6658-4957-a107-a89a5e77fefa-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 28 10:26:56 crc kubenswrapper[4838]: I1128 10:26:56.738380 4838 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/9e6f900b-6658-4957-a107-a89a5e77fefa-inventory\") on node \"crc\" DevicePath \"\"" Nov 28 10:26:56 crc kubenswrapper[4838]: I1128 10:26:56.738390 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s8w8v\" (UniqueName: \"kubernetes.io/projected/9e6f900b-6658-4957-a107-a89a5e77fefa-kube-api-access-s8w8v\") on node \"crc\" DevicePath \"\"" Nov 28 10:26:56 crc kubenswrapper[4838]: I1128 10:26:56.993583 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-gg2f7" event={"ID":"9e6f900b-6658-4957-a107-a89a5e77fefa","Type":"ContainerDied","Data":"da1f61f9894b8dc9d37632e2aaedd813a4fa1e295b5100bd0ce93c19081300d4"} Nov 28 10:26:56 crc kubenswrapper[4838]: I1128 10:26:56.993680 4838 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="da1f61f9894b8dc9d37632e2aaedd813a4fa1e295b5100bd0ce93c19081300d4" Nov 28 10:26:56 crc kubenswrapper[4838]: I1128 10:26:56.994053 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-gg2f7" Nov 28 10:26:57 crc kubenswrapper[4838]: I1128 10:26:57.084532 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-52cwj"] Nov 28 10:26:57 crc kubenswrapper[4838]: E1128 10:26:57.085150 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9e6f900b-6658-4957-a107-a89a5e77fefa" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Nov 28 10:26:57 crc kubenswrapper[4838]: I1128 10:26:57.085179 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="9e6f900b-6658-4957-a107-a89a5e77fefa" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Nov 28 10:26:57 crc kubenswrapper[4838]: I1128 10:26:57.085454 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="9e6f900b-6658-4957-a107-a89a5e77fefa" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Nov 28 10:26:57 crc kubenswrapper[4838]: I1128 10:26:57.086305 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-52cwj" Nov 28 10:26:57 crc kubenswrapper[4838]: I1128 10:26:57.091004 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 28 10:26:57 crc kubenswrapper[4838]: I1128 10:26:57.091318 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 28 10:26:57 crc kubenswrapper[4838]: I1128 10:26:57.091472 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 28 10:26:57 crc kubenswrapper[4838]: I1128 10:26:57.091598 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-t6dwn" Nov 28 10:26:57 crc kubenswrapper[4838]: I1128 10:26:57.097568 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-52cwj"] Nov 28 10:26:57 crc kubenswrapper[4838]: I1128 10:26:57.250888 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/64d0c9f2-be2c-41e0-b740-25b053504b1b-inventory\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-52cwj\" (UID: \"64d0c9f2-be2c-41e0-b740-25b053504b1b\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-52cwj" Nov 28 10:26:57 crc kubenswrapper[4838]: I1128 10:26:57.251074 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/64d0c9f2-be2c-41e0-b740-25b053504b1b-ssh-key\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-52cwj\" (UID: \"64d0c9f2-be2c-41e0-b740-25b053504b1b\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-52cwj" Nov 28 10:26:57 crc kubenswrapper[4838]: I1128 10:26:57.251129 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rb9xr\" (UniqueName: \"kubernetes.io/projected/64d0c9f2-be2c-41e0-b740-25b053504b1b-kube-api-access-rb9xr\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-52cwj\" (UID: \"64d0c9f2-be2c-41e0-b740-25b053504b1b\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-52cwj" Nov 28 10:26:57 crc kubenswrapper[4838]: I1128 10:26:57.353407 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/64d0c9f2-be2c-41e0-b740-25b053504b1b-inventory\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-52cwj\" (UID: \"64d0c9f2-be2c-41e0-b740-25b053504b1b\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-52cwj" Nov 28 10:26:57 crc kubenswrapper[4838]: I1128 10:26:57.354549 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/64d0c9f2-be2c-41e0-b740-25b053504b1b-ssh-key\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-52cwj\" (UID: \"64d0c9f2-be2c-41e0-b740-25b053504b1b\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-52cwj" Nov 28 10:26:57 crc kubenswrapper[4838]: I1128 10:26:57.354848 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rb9xr\" (UniqueName: \"kubernetes.io/projected/64d0c9f2-be2c-41e0-b740-25b053504b1b-kube-api-access-rb9xr\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-52cwj\" (UID: \"64d0c9f2-be2c-41e0-b740-25b053504b1b\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-52cwj" Nov 28 10:26:57 crc kubenswrapper[4838]: I1128 10:26:57.359646 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/64d0c9f2-be2c-41e0-b740-25b053504b1b-inventory\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-52cwj\" (UID: \"64d0c9f2-be2c-41e0-b740-25b053504b1b\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-52cwj" Nov 28 10:26:57 crc kubenswrapper[4838]: I1128 10:26:57.360327 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/64d0c9f2-be2c-41e0-b740-25b053504b1b-ssh-key\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-52cwj\" (UID: \"64d0c9f2-be2c-41e0-b740-25b053504b1b\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-52cwj" Nov 28 10:26:57 crc kubenswrapper[4838]: I1128 10:26:57.386093 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rb9xr\" (UniqueName: \"kubernetes.io/projected/64d0c9f2-be2c-41e0-b740-25b053504b1b-kube-api-access-rb9xr\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-52cwj\" (UID: \"64d0c9f2-be2c-41e0-b740-25b053504b1b\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-52cwj" Nov 28 10:26:57 crc kubenswrapper[4838]: I1128 10:26:57.408861 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-52cwj" Nov 28 10:26:58 crc kubenswrapper[4838]: I1128 10:26:58.071835 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-52cwj"] Nov 28 10:26:59 crc kubenswrapper[4838]: I1128 10:26:59.012652 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-52cwj" event={"ID":"64d0c9f2-be2c-41e0-b740-25b053504b1b","Type":"ContainerStarted","Data":"5249709a56fbb7f50ade42d23f25807cc5e7f09704e007521228725b0a4ed4a7"} Nov 28 10:27:00 crc kubenswrapper[4838]: I1128 10:27:00.024868 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-52cwj" event={"ID":"64d0c9f2-be2c-41e0-b740-25b053504b1b","Type":"ContainerStarted","Data":"a0dcb66973648868beeca41e5062a4dd24f77347bb64641cbf7e023c1ce65737"} Nov 28 10:27:00 crc kubenswrapper[4838]: I1128 10:27:00.063617 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-52cwj" podStartSLOduration=2.262180805 podStartE2EDuration="3.063589421s" podCreationTimestamp="2025-11-28 10:26:57 +0000 UTC" firstStartedPulling="2025-11-28 10:26:58.065348528 +0000 UTC m=+1789.764322708" lastFinishedPulling="2025-11-28 10:26:58.866757114 +0000 UTC m=+1790.565731324" observedRunningTime="2025-11-28 10:27:00.046889558 +0000 UTC m=+1791.745863758" watchObservedRunningTime="2025-11-28 10:27:00.063589421 +0000 UTC m=+1791.762563631" Nov 28 10:27:04 crc kubenswrapper[4838]: I1128 10:27:04.063673 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-sync-jg6k6"] Nov 28 10:27:04 crc kubenswrapper[4838]: I1128 10:27:04.066994 4838 generic.go:334] "Generic (PLEG): container finished" podID="64d0c9f2-be2c-41e0-b740-25b053504b1b" containerID="a0dcb66973648868beeca41e5062a4dd24f77347bb64641cbf7e023c1ce65737" exitCode=0 Nov 28 10:27:04 crc kubenswrapper[4838]: I1128 10:27:04.067034 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-52cwj" event={"ID":"64d0c9f2-be2c-41e0-b740-25b053504b1b","Type":"ContainerDied","Data":"a0dcb66973648868beeca41e5062a4dd24f77347bb64641cbf7e023c1ce65737"} Nov 28 10:27:04 crc kubenswrapper[4838]: I1128 10:27:04.075304 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-sync-s4dkv"] Nov 28 10:27:04 crc kubenswrapper[4838]: I1128 10:27:04.087054 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-sync-8ttzd"] Nov 28 10:27:04 crc kubenswrapper[4838]: I1128 10:27:04.097560 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-xhqld"] Nov 28 10:27:04 crc kubenswrapper[4838]: I1128 10:27:04.104454 4838 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-sync-s4dkv"] Nov 28 10:27:04 crc kubenswrapper[4838]: I1128 10:27:04.112198 4838 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-xhqld"] Nov 28 10:27:04 crc kubenswrapper[4838]: I1128 10:27:04.118041 4838 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-sync-jg6k6"] Nov 28 10:27:04 crc kubenswrapper[4838]: I1128 10:27:04.123763 4838 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-sync-8ttzd"] Nov 28 10:27:04 crc kubenswrapper[4838]: I1128 10:27:04.577887 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1f0e700a-b642-4062-94fd-dec398ba7a22" path="/var/lib/kubelet/pods/1f0e700a-b642-4062-94fd-dec398ba7a22/volumes" Nov 28 10:27:04 crc kubenswrapper[4838]: I1128 10:27:04.578706 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="435894c3-ccee-4f57-8afa-d225888db755" path="/var/lib/kubelet/pods/435894c3-ccee-4f57-8afa-d225888db755/volumes" Nov 28 10:27:04 crc kubenswrapper[4838]: I1128 10:27:04.579578 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="57482c21-bcdb-4a48-93ed-41ddca82a9fb" path="/var/lib/kubelet/pods/57482c21-bcdb-4a48-93ed-41ddca82a9fb/volumes" Nov 28 10:27:04 crc kubenswrapper[4838]: I1128 10:27:04.581142 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="acd4b461-b9cd-4f4b-89e9-8f4e46112938" path="/var/lib/kubelet/pods/acd4b461-b9cd-4f4b-89e9-8f4e46112938/volumes" Nov 28 10:27:05 crc kubenswrapper[4838]: I1128 10:27:05.520850 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-52cwj" Nov 28 10:27:05 crc kubenswrapper[4838]: I1128 10:27:05.716083 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/64d0c9f2-be2c-41e0-b740-25b053504b1b-ssh-key\") pod \"64d0c9f2-be2c-41e0-b740-25b053504b1b\" (UID: \"64d0c9f2-be2c-41e0-b740-25b053504b1b\") " Nov 28 10:27:05 crc kubenswrapper[4838]: I1128 10:27:05.716491 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/64d0c9f2-be2c-41e0-b740-25b053504b1b-inventory\") pod \"64d0c9f2-be2c-41e0-b740-25b053504b1b\" (UID: \"64d0c9f2-be2c-41e0-b740-25b053504b1b\") " Nov 28 10:27:05 crc kubenswrapper[4838]: I1128 10:27:05.716775 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rb9xr\" (UniqueName: \"kubernetes.io/projected/64d0c9f2-be2c-41e0-b740-25b053504b1b-kube-api-access-rb9xr\") pod \"64d0c9f2-be2c-41e0-b740-25b053504b1b\" (UID: \"64d0c9f2-be2c-41e0-b740-25b053504b1b\") " Nov 28 10:27:05 crc kubenswrapper[4838]: I1128 10:27:05.728991 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/64d0c9f2-be2c-41e0-b740-25b053504b1b-kube-api-access-rb9xr" (OuterVolumeSpecName: "kube-api-access-rb9xr") pod "64d0c9f2-be2c-41e0-b740-25b053504b1b" (UID: "64d0c9f2-be2c-41e0-b740-25b053504b1b"). InnerVolumeSpecName "kube-api-access-rb9xr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:27:05 crc kubenswrapper[4838]: I1128 10:27:05.743305 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/64d0c9f2-be2c-41e0-b740-25b053504b1b-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "64d0c9f2-be2c-41e0-b740-25b053504b1b" (UID: "64d0c9f2-be2c-41e0-b740-25b053504b1b"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:27:05 crc kubenswrapper[4838]: I1128 10:27:05.767890 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/64d0c9f2-be2c-41e0-b740-25b053504b1b-inventory" (OuterVolumeSpecName: "inventory") pod "64d0c9f2-be2c-41e0-b740-25b053504b1b" (UID: "64d0c9f2-be2c-41e0-b740-25b053504b1b"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:27:05 crc kubenswrapper[4838]: I1128 10:27:05.818765 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rb9xr\" (UniqueName: \"kubernetes.io/projected/64d0c9f2-be2c-41e0-b740-25b053504b1b-kube-api-access-rb9xr\") on node \"crc\" DevicePath \"\"" Nov 28 10:27:05 crc kubenswrapper[4838]: I1128 10:27:05.818790 4838 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/64d0c9f2-be2c-41e0-b740-25b053504b1b-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 28 10:27:05 crc kubenswrapper[4838]: I1128 10:27:05.818816 4838 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/64d0c9f2-be2c-41e0-b740-25b053504b1b-inventory\") on node \"crc\" DevicePath \"\"" Nov 28 10:27:06 crc kubenswrapper[4838]: I1128 10:27:06.089049 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-52cwj" event={"ID":"64d0c9f2-be2c-41e0-b740-25b053504b1b","Type":"ContainerDied","Data":"5249709a56fbb7f50ade42d23f25807cc5e7f09704e007521228725b0a4ed4a7"} Nov 28 10:27:06 crc kubenswrapper[4838]: I1128 10:27:06.089102 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-52cwj" Nov 28 10:27:06 crc kubenswrapper[4838]: I1128 10:27:06.089108 4838 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5249709a56fbb7f50ade42d23f25807cc5e7f09704e007521228725b0a4ed4a7" Nov 28 10:27:06 crc kubenswrapper[4838]: I1128 10:27:06.161478 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-lgq9n"] Nov 28 10:27:06 crc kubenswrapper[4838]: E1128 10:27:06.162262 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="64d0c9f2-be2c-41e0-b740-25b053504b1b" containerName="ceph-hci-pre-edpm-deployment-openstack-edpm-ipam" Nov 28 10:27:06 crc kubenswrapper[4838]: I1128 10:27:06.162301 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="64d0c9f2-be2c-41e0-b740-25b053504b1b" containerName="ceph-hci-pre-edpm-deployment-openstack-edpm-ipam" Nov 28 10:27:06 crc kubenswrapper[4838]: I1128 10:27:06.162871 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="64d0c9f2-be2c-41e0-b740-25b053504b1b" containerName="ceph-hci-pre-edpm-deployment-openstack-edpm-ipam" Nov 28 10:27:06 crc kubenswrapper[4838]: I1128 10:27:06.164278 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-lgq9n" Nov 28 10:27:06 crc kubenswrapper[4838]: I1128 10:27:06.166216 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-t6dwn" Nov 28 10:27:06 crc kubenswrapper[4838]: I1128 10:27:06.166786 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 28 10:27:06 crc kubenswrapper[4838]: I1128 10:27:06.169390 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 28 10:27:06 crc kubenswrapper[4838]: I1128 10:27:06.169405 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 28 10:27:06 crc kubenswrapper[4838]: I1128 10:27:06.180432 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-lgq9n"] Nov 28 10:27:06 crc kubenswrapper[4838]: I1128 10:27:06.326874 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/06110a37-1e85-48b3-9e24-aad19c7b062e-ssh-key\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-lgq9n\" (UID: \"06110a37-1e85-48b3-9e24-aad19c7b062e\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-lgq9n" Nov 28 10:27:06 crc kubenswrapper[4838]: I1128 10:27:06.326941 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/06110a37-1e85-48b3-9e24-aad19c7b062e-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-lgq9n\" (UID: \"06110a37-1e85-48b3-9e24-aad19c7b062e\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-lgq9n" Nov 28 10:27:06 crc kubenswrapper[4838]: I1128 10:27:06.327880 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8vw46\" (UniqueName: \"kubernetes.io/projected/06110a37-1e85-48b3-9e24-aad19c7b062e-kube-api-access-8vw46\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-lgq9n\" (UID: \"06110a37-1e85-48b3-9e24-aad19c7b062e\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-lgq9n" Nov 28 10:27:06 crc kubenswrapper[4838]: I1128 10:27:06.430026 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/06110a37-1e85-48b3-9e24-aad19c7b062e-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-lgq9n\" (UID: \"06110a37-1e85-48b3-9e24-aad19c7b062e\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-lgq9n" Nov 28 10:27:06 crc kubenswrapper[4838]: I1128 10:27:06.430271 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8vw46\" (UniqueName: \"kubernetes.io/projected/06110a37-1e85-48b3-9e24-aad19c7b062e-kube-api-access-8vw46\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-lgq9n\" (UID: \"06110a37-1e85-48b3-9e24-aad19c7b062e\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-lgq9n" Nov 28 10:27:06 crc kubenswrapper[4838]: I1128 10:27:06.430836 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/06110a37-1e85-48b3-9e24-aad19c7b062e-ssh-key\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-lgq9n\" (UID: \"06110a37-1e85-48b3-9e24-aad19c7b062e\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-lgq9n" Nov 28 10:27:06 crc kubenswrapper[4838]: I1128 10:27:06.434206 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/06110a37-1e85-48b3-9e24-aad19c7b062e-ssh-key\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-lgq9n\" (UID: \"06110a37-1e85-48b3-9e24-aad19c7b062e\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-lgq9n" Nov 28 10:27:06 crc kubenswrapper[4838]: I1128 10:27:06.435651 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/06110a37-1e85-48b3-9e24-aad19c7b062e-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-lgq9n\" (UID: \"06110a37-1e85-48b3-9e24-aad19c7b062e\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-lgq9n" Nov 28 10:27:06 crc kubenswrapper[4838]: I1128 10:27:06.452099 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8vw46\" (UniqueName: \"kubernetes.io/projected/06110a37-1e85-48b3-9e24-aad19c7b062e-kube-api-access-8vw46\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-lgq9n\" (UID: \"06110a37-1e85-48b3-9e24-aad19c7b062e\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-lgq9n" Nov 28 10:27:06 crc kubenswrapper[4838]: I1128 10:27:06.488452 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-lgq9n" Nov 28 10:27:06 crc kubenswrapper[4838]: I1128 10:27:06.861261 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-lgq9n"] Nov 28 10:27:07 crc kubenswrapper[4838]: I1128 10:27:07.099899 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-lgq9n" event={"ID":"06110a37-1e85-48b3-9e24-aad19c7b062e","Type":"ContainerStarted","Data":"8ad3641c6f79e3af1fa4a6c25d8b826d54fe360932913b70642472e60cda6c2a"} Nov 28 10:27:08 crc kubenswrapper[4838]: I1128 10:27:08.113670 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-lgq9n" event={"ID":"06110a37-1e85-48b3-9e24-aad19c7b062e","Type":"ContainerStarted","Data":"edc61b47789223a89855bbc90c1306cf10270325b7fe22400441260338ae823a"} Nov 28 10:27:08 crc kubenswrapper[4838]: I1128 10:27:08.137882 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-lgq9n" podStartSLOduration=1.639936284 podStartE2EDuration="2.137855957s" podCreationTimestamp="2025-11-28 10:27:06 +0000 UTC" firstStartedPulling="2025-11-28 10:27:06.868302793 +0000 UTC m=+1798.567276973" lastFinishedPulling="2025-11-28 10:27:07.366222436 +0000 UTC m=+1799.065196646" observedRunningTime="2025-11-28 10:27:08.131962403 +0000 UTC m=+1799.830936613" watchObservedRunningTime="2025-11-28 10:27:08.137855957 +0000 UTC m=+1799.836830137" Nov 28 10:27:11 crc kubenswrapper[4838]: I1128 10:27:11.563395 4838 scope.go:117] "RemoveContainer" containerID="94f5e3e0d388a990bfe9cc3186f4655bc8ffbe5eaaa4558781704f164737d7ab" Nov 28 10:27:11 crc kubenswrapper[4838]: E1128 10:27:11.565034 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-5dxdd_openshift-machine-config-operator(5c3daa53-8c4e-4e30-aeba-146602dd45cd)\"" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" Nov 28 10:27:15 crc kubenswrapper[4838]: I1128 10:27:15.731423 4838 scope.go:117] "RemoveContainer" containerID="1723ce3d5dc802ce733bacd1dce7082bccaa22dfdcb736551d99aa7778a25c4e" Nov 28 10:27:15 crc kubenswrapper[4838]: I1128 10:27:15.764038 4838 scope.go:117] "RemoveContainer" containerID="6bc60708c6d9af83c454a275a74a4e0638bf9b4d6ea4add004e86546538258cb" Nov 28 10:27:15 crc kubenswrapper[4838]: I1128 10:27:15.838782 4838 scope.go:117] "RemoveContainer" containerID="fca4003b91f3c167ba46164180040e73f3865609d4bc8a2ca58435b309b5f535" Nov 28 10:27:15 crc kubenswrapper[4838]: I1128 10:27:15.880514 4838 scope.go:117] "RemoveContainer" containerID="5c2d62fce023cec20e9f4c9a6dee170efd85240c407b7988a7e57868731435b2" Nov 28 10:27:15 crc kubenswrapper[4838]: I1128 10:27:15.934563 4838 scope.go:117] "RemoveContainer" containerID="1e791633c3e1b899c75ddc4230587da1b7e592a0064d665ce153f947754e9429" Nov 28 10:27:15 crc kubenswrapper[4838]: I1128 10:27:15.972454 4838 scope.go:117] "RemoveContainer" containerID="ea7a51e6574a25bb9640eadf80f21965c39cbc6410bec5c8c57593439ee47c45" Nov 28 10:27:16 crc kubenswrapper[4838]: I1128 10:27:16.029988 4838 scope.go:117] "RemoveContainer" containerID="98b9dd4ba1006b6494657179047d568cd92be833abeccc5dd03f5ecbd3ccdc02" Nov 28 10:27:16 crc kubenswrapper[4838]: I1128 10:27:16.071733 4838 scope.go:117] "RemoveContainer" containerID="d67a5083bff388d76a707e1d845b786425d0ef8b1e67f6174ffb4130fc1239e8" Nov 28 10:27:16 crc kubenswrapper[4838]: I1128 10:27:16.094588 4838 scope.go:117] "RemoveContainer" containerID="92361b6868108e2b152342a4687d4a9e4a553b690331375fdf73138aa8897d11" Nov 28 10:27:16 crc kubenswrapper[4838]: I1128 10:27:16.116592 4838 scope.go:117] "RemoveContainer" containerID="2e70d115c83ef7518fb3d01d81b48bb4b8aa260315d3a261933d60e58503faed" Nov 28 10:27:16 crc kubenswrapper[4838]: I1128 10:27:16.150554 4838 scope.go:117] "RemoveContainer" containerID="7d8a363cab24054d2ba2b9141f07207bcfd30a07ac3f6023ba2cd123e67272da" Nov 28 10:27:22 crc kubenswrapper[4838]: I1128 10:27:22.057670 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-sync-qdl6f"] Nov 28 10:27:22 crc kubenswrapper[4838]: I1128 10:27:22.071644 4838 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-sync-qdl6f"] Nov 28 10:27:22 crc kubenswrapper[4838]: I1128 10:27:22.571067 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d4f1cd16-7995-4964-87d8-ab904bc11ca5" path="/var/lib/kubelet/pods/d4f1cd16-7995-4964-87d8-ab904bc11ca5/volumes" Nov 28 10:27:25 crc kubenswrapper[4838]: I1128 10:27:25.561919 4838 scope.go:117] "RemoveContainer" containerID="94f5e3e0d388a990bfe9cc3186f4655bc8ffbe5eaaa4558781704f164737d7ab" Nov 28 10:27:25 crc kubenswrapper[4838]: E1128 10:27:25.562750 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-5dxdd_openshift-machine-config-operator(5c3daa53-8c4e-4e30-aeba-146602dd45cd)\"" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" Nov 28 10:27:40 crc kubenswrapper[4838]: I1128 10:27:40.562887 4838 scope.go:117] "RemoveContainer" containerID="94f5e3e0d388a990bfe9cc3186f4655bc8ffbe5eaaa4558781704f164737d7ab" Nov 28 10:27:40 crc kubenswrapper[4838]: E1128 10:27:40.563769 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-5dxdd_openshift-machine-config-operator(5c3daa53-8c4e-4e30-aeba-146602dd45cd)\"" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" Nov 28 10:27:53 crc kubenswrapper[4838]: I1128 10:27:53.064605 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-db-create-5rgrd"] Nov 28 10:27:53 crc kubenswrapper[4838]: I1128 10:27:53.072533 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-db-create-h4wpj"] Nov 28 10:27:53 crc kubenswrapper[4838]: I1128 10:27:53.080443 4838 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-db-create-h4wpj"] Nov 28 10:27:53 crc kubenswrapper[4838]: I1128 10:27:53.088059 4838 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-db-create-5rgrd"] Nov 28 10:27:54 crc kubenswrapper[4838]: I1128 10:27:54.032257 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-cfc5-account-create-update-cfjq4"] Nov 28 10:27:54 crc kubenswrapper[4838]: I1128 10:27:54.041767 4838 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-cfc5-account-create-update-cfjq4"] Nov 28 10:27:54 crc kubenswrapper[4838]: I1128 10:27:54.051536 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-d90b-account-create-update-lklt7"] Nov 28 10:27:54 crc kubenswrapper[4838]: I1128 10:27:54.062259 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-1abe-account-create-update-74trh"] Nov 28 10:27:54 crc kubenswrapper[4838]: I1128 10:27:54.071457 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-db-create-r7rf2"] Nov 28 10:27:54 crc kubenswrapper[4838]: I1128 10:27:54.102222 4838 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-d90b-account-create-update-lklt7"] Nov 28 10:27:54 crc kubenswrapper[4838]: I1128 10:27:54.112374 4838 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-db-create-r7rf2"] Nov 28 10:27:54 crc kubenswrapper[4838]: I1128 10:27:54.121217 4838 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-1abe-account-create-update-74trh"] Nov 28 10:27:54 crc kubenswrapper[4838]: I1128 10:27:54.573691 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0a796e35-158d-43c9-a806-92dda81c78f2" path="/var/lib/kubelet/pods/0a796e35-158d-43c9-a806-92dda81c78f2/volumes" Nov 28 10:27:54 crc kubenswrapper[4838]: I1128 10:27:54.575054 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2c180cf9-7d38-4e43-9723-1fa20242ff56" path="/var/lib/kubelet/pods/2c180cf9-7d38-4e43-9723-1fa20242ff56/volumes" Nov 28 10:27:54 crc kubenswrapper[4838]: I1128 10:27:54.576557 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2d048538-8370-4422-89b8-1f4733ae72b1" path="/var/lib/kubelet/pods/2d048538-8370-4422-89b8-1f4733ae72b1/volumes" Nov 28 10:27:54 crc kubenswrapper[4838]: I1128 10:27:54.577967 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3af8b444-f619-4a2c-bfcc-1dbb7966eb62" path="/var/lib/kubelet/pods/3af8b444-f619-4a2c-bfcc-1dbb7966eb62/volumes" Nov 28 10:27:54 crc kubenswrapper[4838]: I1128 10:27:54.580228 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6f84685f-a9f2-45b4-af58-fb0218060369" path="/var/lib/kubelet/pods/6f84685f-a9f2-45b4-af58-fb0218060369/volumes" Nov 28 10:27:54 crc kubenswrapper[4838]: I1128 10:27:54.581481 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ed9862ca-36d5-4ece-9aa7-4ed71b713e15" path="/var/lib/kubelet/pods/ed9862ca-36d5-4ece-9aa7-4ed71b713e15/volumes" Nov 28 10:27:55 crc kubenswrapper[4838]: I1128 10:27:55.562951 4838 scope.go:117] "RemoveContainer" containerID="94f5e3e0d388a990bfe9cc3186f4655bc8ffbe5eaaa4558781704f164737d7ab" Nov 28 10:27:55 crc kubenswrapper[4838]: E1128 10:27:55.563487 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-5dxdd_openshift-machine-config-operator(5c3daa53-8c4e-4e30-aeba-146602dd45cd)\"" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" Nov 28 10:28:07 crc kubenswrapper[4838]: I1128 10:28:07.561978 4838 scope.go:117] "RemoveContainer" containerID="94f5e3e0d388a990bfe9cc3186f4655bc8ffbe5eaaa4558781704f164737d7ab" Nov 28 10:28:07 crc kubenswrapper[4838]: E1128 10:28:07.563363 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-5dxdd_openshift-machine-config-operator(5c3daa53-8c4e-4e30-aeba-146602dd45cd)\"" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" Nov 28 10:28:08 crc kubenswrapper[4838]: I1128 10:28:08.817663 4838 generic.go:334] "Generic (PLEG): container finished" podID="06110a37-1e85-48b3-9e24-aad19c7b062e" containerID="edc61b47789223a89855bbc90c1306cf10270325b7fe22400441260338ae823a" exitCode=0 Nov 28 10:28:08 crc kubenswrapper[4838]: I1128 10:28:08.817861 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-lgq9n" event={"ID":"06110a37-1e85-48b3-9e24-aad19c7b062e","Type":"ContainerDied","Data":"edc61b47789223a89855bbc90c1306cf10270325b7fe22400441260338ae823a"} Nov 28 10:28:10 crc kubenswrapper[4838]: I1128 10:28:10.318532 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-lgq9n" Nov 28 10:28:10 crc kubenswrapper[4838]: I1128 10:28:10.417471 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/06110a37-1e85-48b3-9e24-aad19c7b062e-ssh-key\") pod \"06110a37-1e85-48b3-9e24-aad19c7b062e\" (UID: \"06110a37-1e85-48b3-9e24-aad19c7b062e\") " Nov 28 10:28:10 crc kubenswrapper[4838]: I1128 10:28:10.417537 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/06110a37-1e85-48b3-9e24-aad19c7b062e-inventory\") pod \"06110a37-1e85-48b3-9e24-aad19c7b062e\" (UID: \"06110a37-1e85-48b3-9e24-aad19c7b062e\") " Nov 28 10:28:10 crc kubenswrapper[4838]: I1128 10:28:10.417571 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8vw46\" (UniqueName: \"kubernetes.io/projected/06110a37-1e85-48b3-9e24-aad19c7b062e-kube-api-access-8vw46\") pod \"06110a37-1e85-48b3-9e24-aad19c7b062e\" (UID: \"06110a37-1e85-48b3-9e24-aad19c7b062e\") " Nov 28 10:28:10 crc kubenswrapper[4838]: I1128 10:28:10.424974 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/06110a37-1e85-48b3-9e24-aad19c7b062e-kube-api-access-8vw46" (OuterVolumeSpecName: "kube-api-access-8vw46") pod "06110a37-1e85-48b3-9e24-aad19c7b062e" (UID: "06110a37-1e85-48b3-9e24-aad19c7b062e"). InnerVolumeSpecName "kube-api-access-8vw46". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:28:10 crc kubenswrapper[4838]: I1128 10:28:10.452420 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/06110a37-1e85-48b3-9e24-aad19c7b062e-inventory" (OuterVolumeSpecName: "inventory") pod "06110a37-1e85-48b3-9e24-aad19c7b062e" (UID: "06110a37-1e85-48b3-9e24-aad19c7b062e"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:28:10 crc kubenswrapper[4838]: I1128 10:28:10.463553 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/06110a37-1e85-48b3-9e24-aad19c7b062e-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "06110a37-1e85-48b3-9e24-aad19c7b062e" (UID: "06110a37-1e85-48b3-9e24-aad19c7b062e"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:28:10 crc kubenswrapper[4838]: I1128 10:28:10.520494 4838 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/06110a37-1e85-48b3-9e24-aad19c7b062e-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 28 10:28:10 crc kubenswrapper[4838]: I1128 10:28:10.520550 4838 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/06110a37-1e85-48b3-9e24-aad19c7b062e-inventory\") on node \"crc\" DevicePath \"\"" Nov 28 10:28:10 crc kubenswrapper[4838]: I1128 10:28:10.520574 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8vw46\" (UniqueName: \"kubernetes.io/projected/06110a37-1e85-48b3-9e24-aad19c7b062e-kube-api-access-8vw46\") on node \"crc\" DevicePath \"\"" Nov 28 10:28:10 crc kubenswrapper[4838]: I1128 10:28:10.844989 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-lgq9n" event={"ID":"06110a37-1e85-48b3-9e24-aad19c7b062e","Type":"ContainerDied","Data":"8ad3641c6f79e3af1fa4a6c25d8b826d54fe360932913b70642472e60cda6c2a"} Nov 28 10:28:10 crc kubenswrapper[4838]: I1128 10:28:10.845457 4838 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8ad3641c6f79e3af1fa4a6c25d8b826d54fe360932913b70642472e60cda6c2a" Nov 28 10:28:10 crc kubenswrapper[4838]: I1128 10:28:10.845116 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-lgq9n" Nov 28 10:28:10 crc kubenswrapper[4838]: I1128 10:28:10.955094 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-8476r"] Nov 28 10:28:10 crc kubenswrapper[4838]: E1128 10:28:10.956016 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="06110a37-1e85-48b3-9e24-aad19c7b062e" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Nov 28 10:28:10 crc kubenswrapper[4838]: I1128 10:28:10.956049 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="06110a37-1e85-48b3-9e24-aad19c7b062e" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Nov 28 10:28:10 crc kubenswrapper[4838]: I1128 10:28:10.956438 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="06110a37-1e85-48b3-9e24-aad19c7b062e" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Nov 28 10:28:10 crc kubenswrapper[4838]: I1128 10:28:10.957442 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-8476r" Nov 28 10:28:10 crc kubenswrapper[4838]: I1128 10:28:10.960167 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 28 10:28:10 crc kubenswrapper[4838]: I1128 10:28:10.960936 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 28 10:28:10 crc kubenswrapper[4838]: I1128 10:28:10.961330 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 28 10:28:10 crc kubenswrapper[4838]: I1128 10:28:10.962237 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-8476r"] Nov 28 10:28:10 crc kubenswrapper[4838]: I1128 10:28:10.969844 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-t6dwn" Nov 28 10:28:11 crc kubenswrapper[4838]: I1128 10:28:11.130810 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/9b2a3bd0-add6-473d-b3fe-3f0e1f211230-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-8476r\" (UID: \"9b2a3bd0-add6-473d-b3fe-3f0e1f211230\") " pod="openstack/ssh-known-hosts-edpm-deployment-8476r" Nov 28 10:28:11 crc kubenswrapper[4838]: I1128 10:28:11.130873 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/9b2a3bd0-add6-473d-b3fe-3f0e1f211230-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-8476r\" (UID: \"9b2a3bd0-add6-473d-b3fe-3f0e1f211230\") " pod="openstack/ssh-known-hosts-edpm-deployment-8476r" Nov 28 10:28:11 crc kubenswrapper[4838]: I1128 10:28:11.131016 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-flmqc\" (UniqueName: \"kubernetes.io/projected/9b2a3bd0-add6-473d-b3fe-3f0e1f211230-kube-api-access-flmqc\") pod \"ssh-known-hosts-edpm-deployment-8476r\" (UID: \"9b2a3bd0-add6-473d-b3fe-3f0e1f211230\") " pod="openstack/ssh-known-hosts-edpm-deployment-8476r" Nov 28 10:28:11 crc kubenswrapper[4838]: I1128 10:28:11.232393 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-flmqc\" (UniqueName: \"kubernetes.io/projected/9b2a3bd0-add6-473d-b3fe-3f0e1f211230-kube-api-access-flmqc\") pod \"ssh-known-hosts-edpm-deployment-8476r\" (UID: \"9b2a3bd0-add6-473d-b3fe-3f0e1f211230\") " pod="openstack/ssh-known-hosts-edpm-deployment-8476r" Nov 28 10:28:11 crc kubenswrapper[4838]: I1128 10:28:11.232508 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/9b2a3bd0-add6-473d-b3fe-3f0e1f211230-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-8476r\" (UID: \"9b2a3bd0-add6-473d-b3fe-3f0e1f211230\") " pod="openstack/ssh-known-hosts-edpm-deployment-8476r" Nov 28 10:28:11 crc kubenswrapper[4838]: I1128 10:28:11.232539 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/9b2a3bd0-add6-473d-b3fe-3f0e1f211230-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-8476r\" (UID: \"9b2a3bd0-add6-473d-b3fe-3f0e1f211230\") " pod="openstack/ssh-known-hosts-edpm-deployment-8476r" Nov 28 10:28:11 crc kubenswrapper[4838]: I1128 10:28:11.240615 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/9b2a3bd0-add6-473d-b3fe-3f0e1f211230-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-8476r\" (UID: \"9b2a3bd0-add6-473d-b3fe-3f0e1f211230\") " pod="openstack/ssh-known-hosts-edpm-deployment-8476r" Nov 28 10:28:11 crc kubenswrapper[4838]: I1128 10:28:11.243818 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/9b2a3bd0-add6-473d-b3fe-3f0e1f211230-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-8476r\" (UID: \"9b2a3bd0-add6-473d-b3fe-3f0e1f211230\") " pod="openstack/ssh-known-hosts-edpm-deployment-8476r" Nov 28 10:28:11 crc kubenswrapper[4838]: I1128 10:28:11.265235 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-flmqc\" (UniqueName: \"kubernetes.io/projected/9b2a3bd0-add6-473d-b3fe-3f0e1f211230-kube-api-access-flmqc\") pod \"ssh-known-hosts-edpm-deployment-8476r\" (UID: \"9b2a3bd0-add6-473d-b3fe-3f0e1f211230\") " pod="openstack/ssh-known-hosts-edpm-deployment-8476r" Nov 28 10:28:11 crc kubenswrapper[4838]: I1128 10:28:11.285955 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-8476r" Nov 28 10:28:11 crc kubenswrapper[4838]: I1128 10:28:11.906604 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-8476r"] Nov 28 10:28:11 crc kubenswrapper[4838]: W1128 10:28:11.912203 4838 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9b2a3bd0_add6_473d_b3fe_3f0e1f211230.slice/crio-202c7b8e139974df4683a042d805fcd57c2de461be195af2fc7ca1bcab49fb72 WatchSource:0}: Error finding container 202c7b8e139974df4683a042d805fcd57c2de461be195af2fc7ca1bcab49fb72: Status 404 returned error can't find the container with id 202c7b8e139974df4683a042d805fcd57c2de461be195af2fc7ca1bcab49fb72 Nov 28 10:28:12 crc kubenswrapper[4838]: I1128 10:28:12.868785 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-8476r" event={"ID":"9b2a3bd0-add6-473d-b3fe-3f0e1f211230","Type":"ContainerStarted","Data":"202c7b8e139974df4683a042d805fcd57c2de461be195af2fc7ca1bcab49fb72"} Nov 28 10:28:13 crc kubenswrapper[4838]: I1128 10:28:13.883788 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-8476r" event={"ID":"9b2a3bd0-add6-473d-b3fe-3f0e1f211230","Type":"ContainerStarted","Data":"d323e0c824079bddcc7b03bd7d94d47d1c10284ddf75ea050f3d02d6544b7146"} Nov 28 10:28:13 crc kubenswrapper[4838]: I1128 10:28:13.911419 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ssh-known-hosts-edpm-deployment-8476r" podStartSLOduration=3.162596582 podStartE2EDuration="3.911314847s" podCreationTimestamp="2025-11-28 10:28:10 +0000 UTC" firstStartedPulling="2025-11-28 10:28:11.91615562 +0000 UTC m=+1863.615129800" lastFinishedPulling="2025-11-28 10:28:12.664873885 +0000 UTC m=+1864.363848065" observedRunningTime="2025-11-28 10:28:13.909022644 +0000 UTC m=+1865.607996894" watchObservedRunningTime="2025-11-28 10:28:13.911314847 +0000 UTC m=+1865.610289057" Nov 28 10:28:16 crc kubenswrapper[4838]: I1128 10:28:16.426797 4838 scope.go:117] "RemoveContainer" containerID="e267b5e3e37639284b8d19984fefe7e01c4f917c413b0bad084bdfb7db50b0a2" Nov 28 10:28:16 crc kubenswrapper[4838]: I1128 10:28:16.463487 4838 scope.go:117] "RemoveContainer" containerID="e4931c9a00687c16d0a181006a54c48ef31e57bde057b26cf3de5d3c31fe1f74" Nov 28 10:28:16 crc kubenswrapper[4838]: I1128 10:28:16.553381 4838 scope.go:117] "RemoveContainer" containerID="4f0a73c500f34a37642c1fabed5297418f5aee5797a32290e0851d0c8cf90373" Nov 28 10:28:16 crc kubenswrapper[4838]: I1128 10:28:16.595575 4838 scope.go:117] "RemoveContainer" containerID="d7d294e1c7be7a054cb72311f4c8f259dd90aedcf039dcbb12f05bce0acb0190" Nov 28 10:28:16 crc kubenswrapper[4838]: I1128 10:28:16.655175 4838 scope.go:117] "RemoveContainer" containerID="8ea3d7f756744baeaf8a841103f3055446d42e8273f27480a363f2c30d253dd8" Nov 28 10:28:16 crc kubenswrapper[4838]: I1128 10:28:16.682302 4838 scope.go:117] "RemoveContainer" containerID="8c43108d54dac7881693c1f429052dea6f2cd9b16d9ba8706bd53ed601704e96" Nov 28 10:28:16 crc kubenswrapper[4838]: I1128 10:28:16.723008 4838 scope.go:117] "RemoveContainer" containerID="f503451f64a3d6ce7301f5078be9807294088199d72366ea053d013481055e92" Nov 28 10:28:20 crc kubenswrapper[4838]: I1128 10:28:20.562437 4838 scope.go:117] "RemoveContainer" containerID="94f5e3e0d388a990bfe9cc3186f4655bc8ffbe5eaaa4558781704f164737d7ab" Nov 28 10:28:20 crc kubenswrapper[4838]: E1128 10:28:20.562951 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-5dxdd_openshift-machine-config-operator(5c3daa53-8c4e-4e30-aeba-146602dd45cd)\"" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" Nov 28 10:28:20 crc kubenswrapper[4838]: I1128 10:28:20.961978 4838 generic.go:334] "Generic (PLEG): container finished" podID="9b2a3bd0-add6-473d-b3fe-3f0e1f211230" containerID="d323e0c824079bddcc7b03bd7d94d47d1c10284ddf75ea050f3d02d6544b7146" exitCode=0 Nov 28 10:28:20 crc kubenswrapper[4838]: I1128 10:28:20.962048 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-8476r" event={"ID":"9b2a3bd0-add6-473d-b3fe-3f0e1f211230","Type":"ContainerDied","Data":"d323e0c824079bddcc7b03bd7d94d47d1c10284ddf75ea050f3d02d6544b7146"} Nov 28 10:28:22 crc kubenswrapper[4838]: I1128 10:28:22.428064 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-8476r" Nov 28 10:28:22 crc kubenswrapper[4838]: I1128 10:28:22.589288 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-flmqc\" (UniqueName: \"kubernetes.io/projected/9b2a3bd0-add6-473d-b3fe-3f0e1f211230-kube-api-access-flmqc\") pod \"9b2a3bd0-add6-473d-b3fe-3f0e1f211230\" (UID: \"9b2a3bd0-add6-473d-b3fe-3f0e1f211230\") " Nov 28 10:28:22 crc kubenswrapper[4838]: I1128 10:28:22.589456 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/9b2a3bd0-add6-473d-b3fe-3f0e1f211230-ssh-key-openstack-edpm-ipam\") pod \"9b2a3bd0-add6-473d-b3fe-3f0e1f211230\" (UID: \"9b2a3bd0-add6-473d-b3fe-3f0e1f211230\") " Nov 28 10:28:22 crc kubenswrapper[4838]: I1128 10:28:22.589602 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/9b2a3bd0-add6-473d-b3fe-3f0e1f211230-inventory-0\") pod \"9b2a3bd0-add6-473d-b3fe-3f0e1f211230\" (UID: \"9b2a3bd0-add6-473d-b3fe-3f0e1f211230\") " Nov 28 10:28:22 crc kubenswrapper[4838]: I1128 10:28:22.594814 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9b2a3bd0-add6-473d-b3fe-3f0e1f211230-kube-api-access-flmqc" (OuterVolumeSpecName: "kube-api-access-flmqc") pod "9b2a3bd0-add6-473d-b3fe-3f0e1f211230" (UID: "9b2a3bd0-add6-473d-b3fe-3f0e1f211230"). InnerVolumeSpecName "kube-api-access-flmqc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:28:22 crc kubenswrapper[4838]: I1128 10:28:22.618920 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9b2a3bd0-add6-473d-b3fe-3f0e1f211230-inventory-0" (OuterVolumeSpecName: "inventory-0") pod "9b2a3bd0-add6-473d-b3fe-3f0e1f211230" (UID: "9b2a3bd0-add6-473d-b3fe-3f0e1f211230"). InnerVolumeSpecName "inventory-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:28:22 crc kubenswrapper[4838]: I1128 10:28:22.623915 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9b2a3bd0-add6-473d-b3fe-3f0e1f211230-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "9b2a3bd0-add6-473d-b3fe-3f0e1f211230" (UID: "9b2a3bd0-add6-473d-b3fe-3f0e1f211230"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:28:22 crc kubenswrapper[4838]: I1128 10:28:22.691742 4838 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/9b2a3bd0-add6-473d-b3fe-3f0e1f211230-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Nov 28 10:28:22 crc kubenswrapper[4838]: I1128 10:28:22.691803 4838 reconciler_common.go:293] "Volume detached for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/9b2a3bd0-add6-473d-b3fe-3f0e1f211230-inventory-0\") on node \"crc\" DevicePath \"\"" Nov 28 10:28:22 crc kubenswrapper[4838]: I1128 10:28:22.691822 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-flmqc\" (UniqueName: \"kubernetes.io/projected/9b2a3bd0-add6-473d-b3fe-3f0e1f211230-kube-api-access-flmqc\") on node \"crc\" DevicePath \"\"" Nov 28 10:28:22 crc kubenswrapper[4838]: I1128 10:28:22.988294 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-8476r" event={"ID":"9b2a3bd0-add6-473d-b3fe-3f0e1f211230","Type":"ContainerDied","Data":"202c7b8e139974df4683a042d805fcd57c2de461be195af2fc7ca1bcab49fb72"} Nov 28 10:28:22 crc kubenswrapper[4838]: I1128 10:28:22.988817 4838 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="202c7b8e139974df4683a042d805fcd57c2de461be195af2fc7ca1bcab49fb72" Nov 28 10:28:22 crc kubenswrapper[4838]: I1128 10:28:22.988342 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-8476r" Nov 28 10:28:23 crc kubenswrapper[4838]: I1128 10:28:23.076431 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-84ph6"] Nov 28 10:28:23 crc kubenswrapper[4838]: I1128 10:28:23.085912 4838 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-84ph6"] Nov 28 10:28:23 crc kubenswrapper[4838]: I1128 10:28:23.093335 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-ghldt"] Nov 28 10:28:23 crc kubenswrapper[4838]: E1128 10:28:23.093710 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9b2a3bd0-add6-473d-b3fe-3f0e1f211230" containerName="ssh-known-hosts-edpm-deployment" Nov 28 10:28:23 crc kubenswrapper[4838]: I1128 10:28:23.093803 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="9b2a3bd0-add6-473d-b3fe-3f0e1f211230" containerName="ssh-known-hosts-edpm-deployment" Nov 28 10:28:23 crc kubenswrapper[4838]: I1128 10:28:23.094018 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="9b2a3bd0-add6-473d-b3fe-3f0e1f211230" containerName="ssh-known-hosts-edpm-deployment" Nov 28 10:28:23 crc kubenswrapper[4838]: I1128 10:28:23.094613 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-ghldt" Nov 28 10:28:23 crc kubenswrapper[4838]: I1128 10:28:23.097289 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-t6dwn" Nov 28 10:28:23 crc kubenswrapper[4838]: I1128 10:28:23.097355 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 28 10:28:23 crc kubenswrapper[4838]: I1128 10:28:23.097491 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 28 10:28:23 crc kubenswrapper[4838]: I1128 10:28:23.103643 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 28 10:28:23 crc kubenswrapper[4838]: I1128 10:28:23.122241 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-ghldt"] Nov 28 10:28:23 crc kubenswrapper[4838]: I1128 10:28:23.200502 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e8600285-a3ab-424a-8b28-94560f292e43-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-ghldt\" (UID: \"e8600285-a3ab-424a-8b28-94560f292e43\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-ghldt" Nov 28 10:28:23 crc kubenswrapper[4838]: I1128 10:28:23.200552 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/e8600285-a3ab-424a-8b28-94560f292e43-ssh-key\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-ghldt\" (UID: \"e8600285-a3ab-424a-8b28-94560f292e43\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-ghldt" Nov 28 10:28:23 crc kubenswrapper[4838]: I1128 10:28:23.200692 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cc9ln\" (UniqueName: \"kubernetes.io/projected/e8600285-a3ab-424a-8b28-94560f292e43-kube-api-access-cc9ln\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-ghldt\" (UID: \"e8600285-a3ab-424a-8b28-94560f292e43\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-ghldt" Nov 28 10:28:23 crc kubenswrapper[4838]: I1128 10:28:23.303532 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e8600285-a3ab-424a-8b28-94560f292e43-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-ghldt\" (UID: \"e8600285-a3ab-424a-8b28-94560f292e43\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-ghldt" Nov 28 10:28:23 crc kubenswrapper[4838]: I1128 10:28:23.303614 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/e8600285-a3ab-424a-8b28-94560f292e43-ssh-key\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-ghldt\" (UID: \"e8600285-a3ab-424a-8b28-94560f292e43\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-ghldt" Nov 28 10:28:23 crc kubenswrapper[4838]: I1128 10:28:23.303973 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cc9ln\" (UniqueName: \"kubernetes.io/projected/e8600285-a3ab-424a-8b28-94560f292e43-kube-api-access-cc9ln\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-ghldt\" (UID: \"e8600285-a3ab-424a-8b28-94560f292e43\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-ghldt" Nov 28 10:28:23 crc kubenswrapper[4838]: I1128 10:28:23.316365 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/e8600285-a3ab-424a-8b28-94560f292e43-ssh-key\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-ghldt\" (UID: \"e8600285-a3ab-424a-8b28-94560f292e43\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-ghldt" Nov 28 10:28:23 crc kubenswrapper[4838]: I1128 10:28:23.326008 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e8600285-a3ab-424a-8b28-94560f292e43-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-ghldt\" (UID: \"e8600285-a3ab-424a-8b28-94560f292e43\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-ghldt" Nov 28 10:28:23 crc kubenswrapper[4838]: I1128 10:28:23.359498 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cc9ln\" (UniqueName: \"kubernetes.io/projected/e8600285-a3ab-424a-8b28-94560f292e43-kube-api-access-cc9ln\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-ghldt\" (UID: \"e8600285-a3ab-424a-8b28-94560f292e43\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-ghldt" Nov 28 10:28:23 crc kubenswrapper[4838]: I1128 10:28:23.418792 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-ghldt" Nov 28 10:28:24 crc kubenswrapper[4838]: I1128 10:28:24.368188 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-ghldt"] Nov 28 10:28:24 crc kubenswrapper[4838]: I1128 10:28:24.582804 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="61223154-98ac-45e7-af63-714d3c3cb7d7" path="/var/lib/kubelet/pods/61223154-98ac-45e7-af63-714d3c3cb7d7/volumes" Nov 28 10:28:25 crc kubenswrapper[4838]: I1128 10:28:25.007679 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-ghldt" event={"ID":"e8600285-a3ab-424a-8b28-94560f292e43","Type":"ContainerStarted","Data":"356e4c97b09e6dd9a67c846b18b372fac4d4dcc334165ed62288cae73d595724"} Nov 28 10:28:26 crc kubenswrapper[4838]: I1128 10:28:26.019776 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-ghldt" event={"ID":"e8600285-a3ab-424a-8b28-94560f292e43","Type":"ContainerStarted","Data":"0f8a47eb02a89f674bf42fb3ff1d041cb6a372e412565529ac9dd10cb4091e40"} Nov 28 10:28:26 crc kubenswrapper[4838]: I1128 10:28:26.043028 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-ghldt" podStartSLOduration=2.583828951 podStartE2EDuration="3.04300982s" podCreationTimestamp="2025-11-28 10:28:23 +0000 UTC" firstStartedPulling="2025-11-28 10:28:24.368010797 +0000 UTC m=+1876.066984967" lastFinishedPulling="2025-11-28 10:28:24.827191646 +0000 UTC m=+1876.526165836" observedRunningTime="2025-11-28 10:28:26.040381857 +0000 UTC m=+1877.739356067" watchObservedRunningTime="2025-11-28 10:28:26.04300982 +0000 UTC m=+1877.741983990" Nov 28 10:28:31 crc kubenswrapper[4838]: I1128 10:28:31.562818 4838 scope.go:117] "RemoveContainer" containerID="94f5e3e0d388a990bfe9cc3186f4655bc8ffbe5eaaa4558781704f164737d7ab" Nov 28 10:28:31 crc kubenswrapper[4838]: E1128 10:28:31.564231 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-5dxdd_openshift-machine-config-operator(5c3daa53-8c4e-4e30-aeba-146602dd45cd)\"" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" Nov 28 10:28:35 crc kubenswrapper[4838]: I1128 10:28:35.120660 4838 generic.go:334] "Generic (PLEG): container finished" podID="e8600285-a3ab-424a-8b28-94560f292e43" containerID="0f8a47eb02a89f674bf42fb3ff1d041cb6a372e412565529ac9dd10cb4091e40" exitCode=0 Nov 28 10:28:35 crc kubenswrapper[4838]: I1128 10:28:35.120786 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-ghldt" event={"ID":"e8600285-a3ab-424a-8b28-94560f292e43","Type":"ContainerDied","Data":"0f8a47eb02a89f674bf42fb3ff1d041cb6a372e412565529ac9dd10cb4091e40"} Nov 28 10:28:36 crc kubenswrapper[4838]: I1128 10:28:36.601923 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-ghldt" Nov 28 10:28:36 crc kubenswrapper[4838]: I1128 10:28:36.711082 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e8600285-a3ab-424a-8b28-94560f292e43-inventory\") pod \"e8600285-a3ab-424a-8b28-94560f292e43\" (UID: \"e8600285-a3ab-424a-8b28-94560f292e43\") " Nov 28 10:28:36 crc kubenswrapper[4838]: I1128 10:28:36.711214 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cc9ln\" (UniqueName: \"kubernetes.io/projected/e8600285-a3ab-424a-8b28-94560f292e43-kube-api-access-cc9ln\") pod \"e8600285-a3ab-424a-8b28-94560f292e43\" (UID: \"e8600285-a3ab-424a-8b28-94560f292e43\") " Nov 28 10:28:36 crc kubenswrapper[4838]: I1128 10:28:36.711261 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/e8600285-a3ab-424a-8b28-94560f292e43-ssh-key\") pod \"e8600285-a3ab-424a-8b28-94560f292e43\" (UID: \"e8600285-a3ab-424a-8b28-94560f292e43\") " Nov 28 10:28:36 crc kubenswrapper[4838]: I1128 10:28:36.719357 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e8600285-a3ab-424a-8b28-94560f292e43-kube-api-access-cc9ln" (OuterVolumeSpecName: "kube-api-access-cc9ln") pod "e8600285-a3ab-424a-8b28-94560f292e43" (UID: "e8600285-a3ab-424a-8b28-94560f292e43"). InnerVolumeSpecName "kube-api-access-cc9ln". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:28:36 crc kubenswrapper[4838]: I1128 10:28:36.752704 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e8600285-a3ab-424a-8b28-94560f292e43-inventory" (OuterVolumeSpecName: "inventory") pod "e8600285-a3ab-424a-8b28-94560f292e43" (UID: "e8600285-a3ab-424a-8b28-94560f292e43"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:28:36 crc kubenswrapper[4838]: I1128 10:28:36.756389 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e8600285-a3ab-424a-8b28-94560f292e43-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "e8600285-a3ab-424a-8b28-94560f292e43" (UID: "e8600285-a3ab-424a-8b28-94560f292e43"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:28:36 crc kubenswrapper[4838]: I1128 10:28:36.813516 4838 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e8600285-a3ab-424a-8b28-94560f292e43-inventory\") on node \"crc\" DevicePath \"\"" Nov 28 10:28:36 crc kubenswrapper[4838]: I1128 10:28:36.813565 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cc9ln\" (UniqueName: \"kubernetes.io/projected/e8600285-a3ab-424a-8b28-94560f292e43-kube-api-access-cc9ln\") on node \"crc\" DevicePath \"\"" Nov 28 10:28:36 crc kubenswrapper[4838]: I1128 10:28:36.813590 4838 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/e8600285-a3ab-424a-8b28-94560f292e43-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 28 10:28:37 crc kubenswrapper[4838]: I1128 10:28:37.152482 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-ghldt" event={"ID":"e8600285-a3ab-424a-8b28-94560f292e43","Type":"ContainerDied","Data":"356e4c97b09e6dd9a67c846b18b372fac4d4dcc334165ed62288cae73d595724"} Nov 28 10:28:37 crc kubenswrapper[4838]: I1128 10:28:37.152566 4838 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="356e4c97b09e6dd9a67c846b18b372fac4d4dcc334165ed62288cae73d595724" Nov 28 10:28:37 crc kubenswrapper[4838]: I1128 10:28:37.152700 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-ghldt" Nov 28 10:28:37 crc kubenswrapper[4838]: I1128 10:28:37.283364 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-c2xjz"] Nov 28 10:28:37 crc kubenswrapper[4838]: E1128 10:28:37.283846 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e8600285-a3ab-424a-8b28-94560f292e43" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Nov 28 10:28:37 crc kubenswrapper[4838]: I1128 10:28:37.283869 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="e8600285-a3ab-424a-8b28-94560f292e43" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Nov 28 10:28:37 crc kubenswrapper[4838]: I1128 10:28:37.284122 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="e8600285-a3ab-424a-8b28-94560f292e43" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Nov 28 10:28:37 crc kubenswrapper[4838]: I1128 10:28:37.284868 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-c2xjz" Nov 28 10:28:37 crc kubenswrapper[4838]: I1128 10:28:37.289144 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 28 10:28:37 crc kubenswrapper[4838]: I1128 10:28:37.289454 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 28 10:28:37 crc kubenswrapper[4838]: I1128 10:28:37.289677 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 28 10:28:37 crc kubenswrapper[4838]: I1128 10:28:37.289865 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-t6dwn" Nov 28 10:28:37 crc kubenswrapper[4838]: I1128 10:28:37.300840 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-c2xjz"] Nov 28 10:28:37 crc kubenswrapper[4838]: I1128 10:28:37.325117 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/6da99ea6-330f-43fa-8586-fbfed3a9a4d1-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-c2xjz\" (UID: \"6da99ea6-330f-43fa-8586-fbfed3a9a4d1\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-c2xjz" Nov 28 10:28:37 crc kubenswrapper[4838]: I1128 10:28:37.325169 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kwr88\" (UniqueName: \"kubernetes.io/projected/6da99ea6-330f-43fa-8586-fbfed3a9a4d1-kube-api-access-kwr88\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-c2xjz\" (UID: \"6da99ea6-330f-43fa-8586-fbfed3a9a4d1\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-c2xjz" Nov 28 10:28:37 crc kubenswrapper[4838]: I1128 10:28:37.325202 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/6da99ea6-330f-43fa-8586-fbfed3a9a4d1-ssh-key\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-c2xjz\" (UID: \"6da99ea6-330f-43fa-8586-fbfed3a9a4d1\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-c2xjz" Nov 28 10:28:37 crc kubenswrapper[4838]: I1128 10:28:37.427633 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/6da99ea6-330f-43fa-8586-fbfed3a9a4d1-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-c2xjz\" (UID: \"6da99ea6-330f-43fa-8586-fbfed3a9a4d1\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-c2xjz" Nov 28 10:28:37 crc kubenswrapper[4838]: I1128 10:28:37.427673 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kwr88\" (UniqueName: \"kubernetes.io/projected/6da99ea6-330f-43fa-8586-fbfed3a9a4d1-kube-api-access-kwr88\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-c2xjz\" (UID: \"6da99ea6-330f-43fa-8586-fbfed3a9a4d1\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-c2xjz" Nov 28 10:28:37 crc kubenswrapper[4838]: I1128 10:28:37.427695 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/6da99ea6-330f-43fa-8586-fbfed3a9a4d1-ssh-key\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-c2xjz\" (UID: \"6da99ea6-330f-43fa-8586-fbfed3a9a4d1\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-c2xjz" Nov 28 10:28:37 crc kubenswrapper[4838]: I1128 10:28:37.431619 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/6da99ea6-330f-43fa-8586-fbfed3a9a4d1-ssh-key\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-c2xjz\" (UID: \"6da99ea6-330f-43fa-8586-fbfed3a9a4d1\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-c2xjz" Nov 28 10:28:37 crc kubenswrapper[4838]: I1128 10:28:37.431858 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/6da99ea6-330f-43fa-8586-fbfed3a9a4d1-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-c2xjz\" (UID: \"6da99ea6-330f-43fa-8586-fbfed3a9a4d1\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-c2xjz" Nov 28 10:28:37 crc kubenswrapper[4838]: I1128 10:28:37.447927 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kwr88\" (UniqueName: \"kubernetes.io/projected/6da99ea6-330f-43fa-8586-fbfed3a9a4d1-kube-api-access-kwr88\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-c2xjz\" (UID: \"6da99ea6-330f-43fa-8586-fbfed3a9a4d1\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-c2xjz" Nov 28 10:28:37 crc kubenswrapper[4838]: I1128 10:28:37.650663 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-c2xjz" Nov 28 10:28:38 crc kubenswrapper[4838]: I1128 10:28:38.060286 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-c2xjz"] Nov 28 10:28:38 crc kubenswrapper[4838]: W1128 10:28:38.062061 4838 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6da99ea6_330f_43fa_8586_fbfed3a9a4d1.slice/crio-2deb71467429570191eea0f3b349acc1afb1ef75ced4e76434a818514922c1b9 WatchSource:0}: Error finding container 2deb71467429570191eea0f3b349acc1afb1ef75ced4e76434a818514922c1b9: Status 404 returned error can't find the container with id 2deb71467429570191eea0f3b349acc1afb1ef75ced4e76434a818514922c1b9 Nov 28 10:28:38 crc kubenswrapper[4838]: I1128 10:28:38.162966 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-c2xjz" event={"ID":"6da99ea6-330f-43fa-8586-fbfed3a9a4d1","Type":"ContainerStarted","Data":"2deb71467429570191eea0f3b349acc1afb1ef75ced4e76434a818514922c1b9"} Nov 28 10:28:40 crc kubenswrapper[4838]: I1128 10:28:40.190545 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-c2xjz" event={"ID":"6da99ea6-330f-43fa-8586-fbfed3a9a4d1","Type":"ContainerStarted","Data":"099284e5e0e738e61107a3db7a5198a5fbf09832d7385fbce4d250d5f911cd50"} Nov 28 10:28:40 crc kubenswrapper[4838]: I1128 10:28:40.212893 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-c2xjz" podStartSLOduration=2.220877133 podStartE2EDuration="3.212878402s" podCreationTimestamp="2025-11-28 10:28:37 +0000 UTC" firstStartedPulling="2025-11-28 10:28:38.066978926 +0000 UTC m=+1889.765953136" lastFinishedPulling="2025-11-28 10:28:39.058980235 +0000 UTC m=+1890.757954405" observedRunningTime="2025-11-28 10:28:40.210332362 +0000 UTC m=+1891.909306552" watchObservedRunningTime="2025-11-28 10:28:40.212878402 +0000 UTC m=+1891.911852572" Nov 28 10:28:42 crc kubenswrapper[4838]: I1128 10:28:42.562840 4838 scope.go:117] "RemoveContainer" containerID="94f5e3e0d388a990bfe9cc3186f4655bc8ffbe5eaaa4558781704f164737d7ab" Nov 28 10:28:42 crc kubenswrapper[4838]: E1128 10:28:42.563572 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-5dxdd_openshift-machine-config-operator(5c3daa53-8c4e-4e30-aeba-146602dd45cd)\"" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" Nov 28 10:28:45 crc kubenswrapper[4838]: I1128 10:28:45.047475 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-cell-mapping-m8nk4"] Nov 28 10:28:45 crc kubenswrapper[4838]: I1128 10:28:45.058270 4838 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-cell-mapping-m8nk4"] Nov 28 10:28:46 crc kubenswrapper[4838]: I1128 10:28:46.576332 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c7190b24-4e06-4c59-9498-33d64a31067d" path="/var/lib/kubelet/pods/c7190b24-4e06-4c59-9498-33d64a31067d/volumes" Nov 28 10:28:47 crc kubenswrapper[4838]: I1128 10:28:47.045830 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-p7qxj"] Nov 28 10:28:47 crc kubenswrapper[4838]: I1128 10:28:47.064330 4838 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-p7qxj"] Nov 28 10:28:48 crc kubenswrapper[4838]: I1128 10:28:48.587787 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2a6d8b81-2f74-472c-b289-f08fe548ec2f" path="/var/lib/kubelet/pods/2a6d8b81-2f74-472c-b289-f08fe548ec2f/volumes" Nov 28 10:28:50 crc kubenswrapper[4838]: I1128 10:28:50.321817 4838 generic.go:334] "Generic (PLEG): container finished" podID="6da99ea6-330f-43fa-8586-fbfed3a9a4d1" containerID="099284e5e0e738e61107a3db7a5198a5fbf09832d7385fbce4d250d5f911cd50" exitCode=0 Nov 28 10:28:50 crc kubenswrapper[4838]: I1128 10:28:50.321925 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-c2xjz" event={"ID":"6da99ea6-330f-43fa-8586-fbfed3a9a4d1","Type":"ContainerDied","Data":"099284e5e0e738e61107a3db7a5198a5fbf09832d7385fbce4d250d5f911cd50"} Nov 28 10:28:51 crc kubenswrapper[4838]: I1128 10:28:51.830149 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-c2xjz" Nov 28 10:28:51 crc kubenswrapper[4838]: I1128 10:28:51.850609 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kwr88\" (UniqueName: \"kubernetes.io/projected/6da99ea6-330f-43fa-8586-fbfed3a9a4d1-kube-api-access-kwr88\") pod \"6da99ea6-330f-43fa-8586-fbfed3a9a4d1\" (UID: \"6da99ea6-330f-43fa-8586-fbfed3a9a4d1\") " Nov 28 10:28:51 crc kubenswrapper[4838]: I1128 10:28:51.850800 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/6da99ea6-330f-43fa-8586-fbfed3a9a4d1-ssh-key\") pod \"6da99ea6-330f-43fa-8586-fbfed3a9a4d1\" (UID: \"6da99ea6-330f-43fa-8586-fbfed3a9a4d1\") " Nov 28 10:28:51 crc kubenswrapper[4838]: I1128 10:28:51.850833 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/6da99ea6-330f-43fa-8586-fbfed3a9a4d1-inventory\") pod \"6da99ea6-330f-43fa-8586-fbfed3a9a4d1\" (UID: \"6da99ea6-330f-43fa-8586-fbfed3a9a4d1\") " Nov 28 10:28:51 crc kubenswrapper[4838]: I1128 10:28:51.864128 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6da99ea6-330f-43fa-8586-fbfed3a9a4d1-kube-api-access-kwr88" (OuterVolumeSpecName: "kube-api-access-kwr88") pod "6da99ea6-330f-43fa-8586-fbfed3a9a4d1" (UID: "6da99ea6-330f-43fa-8586-fbfed3a9a4d1"). InnerVolumeSpecName "kube-api-access-kwr88". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:28:51 crc kubenswrapper[4838]: I1128 10:28:51.890617 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6da99ea6-330f-43fa-8586-fbfed3a9a4d1-inventory" (OuterVolumeSpecName: "inventory") pod "6da99ea6-330f-43fa-8586-fbfed3a9a4d1" (UID: "6da99ea6-330f-43fa-8586-fbfed3a9a4d1"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:28:51 crc kubenswrapper[4838]: I1128 10:28:51.895259 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6da99ea6-330f-43fa-8586-fbfed3a9a4d1-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "6da99ea6-330f-43fa-8586-fbfed3a9a4d1" (UID: "6da99ea6-330f-43fa-8586-fbfed3a9a4d1"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:28:51 crc kubenswrapper[4838]: I1128 10:28:51.958638 4838 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/6da99ea6-330f-43fa-8586-fbfed3a9a4d1-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 28 10:28:51 crc kubenswrapper[4838]: I1128 10:28:51.958701 4838 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/6da99ea6-330f-43fa-8586-fbfed3a9a4d1-inventory\") on node \"crc\" DevicePath \"\"" Nov 28 10:28:51 crc kubenswrapper[4838]: I1128 10:28:51.958751 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kwr88\" (UniqueName: \"kubernetes.io/projected/6da99ea6-330f-43fa-8586-fbfed3a9a4d1-kube-api-access-kwr88\") on node \"crc\" DevicePath \"\"" Nov 28 10:28:52 crc kubenswrapper[4838]: I1128 10:28:52.344140 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-c2xjz" event={"ID":"6da99ea6-330f-43fa-8586-fbfed3a9a4d1","Type":"ContainerDied","Data":"2deb71467429570191eea0f3b349acc1afb1ef75ced4e76434a818514922c1b9"} Nov 28 10:28:52 crc kubenswrapper[4838]: I1128 10:28:52.344192 4838 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2deb71467429570191eea0f3b349acc1afb1ef75ced4e76434a818514922c1b9" Nov 28 10:28:52 crc kubenswrapper[4838]: I1128 10:28:52.344230 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-c2xjz" Nov 28 10:28:57 crc kubenswrapper[4838]: I1128 10:28:57.563214 4838 scope.go:117] "RemoveContainer" containerID="94f5e3e0d388a990bfe9cc3186f4655bc8ffbe5eaaa4558781704f164737d7ab" Nov 28 10:28:57 crc kubenswrapper[4838]: E1128 10:28:57.564640 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-5dxdd_openshift-machine-config-operator(5c3daa53-8c4e-4e30-aeba-146602dd45cd)\"" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" Nov 28 10:29:12 crc kubenswrapper[4838]: I1128 10:29:12.562764 4838 scope.go:117] "RemoveContainer" containerID="94f5e3e0d388a990bfe9cc3186f4655bc8ffbe5eaaa4558781704f164737d7ab" Nov 28 10:29:12 crc kubenswrapper[4838]: E1128 10:29:12.563597 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-5dxdd_openshift-machine-config-operator(5c3daa53-8c4e-4e30-aeba-146602dd45cd)\"" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" Nov 28 10:29:16 crc kubenswrapper[4838]: I1128 10:29:16.923248 4838 scope.go:117] "RemoveContainer" containerID="fac63343239611b1603ce78b83b375c44ea11140c340c65753db0a1c491d5f48" Nov 28 10:29:16 crc kubenswrapper[4838]: I1128 10:29:16.991096 4838 scope.go:117] "RemoveContainer" containerID="52f4d7bae6dd59014073affb4fb7b22afdce4f3fb975f9f5ee9c730f8edadab0" Nov 28 10:29:17 crc kubenswrapper[4838]: I1128 10:29:17.022854 4838 scope.go:117] "RemoveContainer" containerID="0a4317f4ad9fe07c39b10f20cae3421c50a515a76bf5fc88db86705e2bdf6788" Nov 28 10:29:25 crc kubenswrapper[4838]: I1128 10:29:25.563298 4838 scope.go:117] "RemoveContainer" containerID="94f5e3e0d388a990bfe9cc3186f4655bc8ffbe5eaaa4558781704f164737d7ab" Nov 28 10:29:25 crc kubenswrapper[4838]: E1128 10:29:25.564450 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-5dxdd_openshift-machine-config-operator(5c3daa53-8c4e-4e30-aeba-146602dd45cd)\"" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" Nov 28 10:29:29 crc kubenswrapper[4838]: I1128 10:29:29.058006 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-cell-mapping-cv5n2"] Nov 28 10:29:29 crc kubenswrapper[4838]: I1128 10:29:29.068863 4838 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-cell-mapping-cv5n2"] Nov 28 10:29:30 crc kubenswrapper[4838]: I1128 10:29:30.579591 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="768b2764-c429-421c-8def-15e4caee8eb8" path="/var/lib/kubelet/pods/768b2764-c429-421c-8def-15e4caee8eb8/volumes" Nov 28 10:29:38 crc kubenswrapper[4838]: I1128 10:29:38.568503 4838 scope.go:117] "RemoveContainer" containerID="94f5e3e0d388a990bfe9cc3186f4655bc8ffbe5eaaa4558781704f164737d7ab" Nov 28 10:29:38 crc kubenswrapper[4838]: E1128 10:29:38.569285 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-5dxdd_openshift-machine-config-operator(5c3daa53-8c4e-4e30-aeba-146602dd45cd)\"" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" Nov 28 10:29:50 crc kubenswrapper[4838]: I1128 10:29:50.562227 4838 scope.go:117] "RemoveContainer" containerID="94f5e3e0d388a990bfe9cc3186f4655bc8ffbe5eaaa4558781704f164737d7ab" Nov 28 10:29:50 crc kubenswrapper[4838]: E1128 10:29:50.563236 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-5dxdd_openshift-machine-config-operator(5c3daa53-8c4e-4e30-aeba-146602dd45cd)\"" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" Nov 28 10:30:00 crc kubenswrapper[4838]: I1128 10:30:00.167777 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405430-q6b6w"] Nov 28 10:30:00 crc kubenswrapper[4838]: E1128 10:30:00.168818 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6da99ea6-330f-43fa-8586-fbfed3a9a4d1" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Nov 28 10:30:00 crc kubenswrapper[4838]: I1128 10:30:00.168840 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="6da99ea6-330f-43fa-8586-fbfed3a9a4d1" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Nov 28 10:30:00 crc kubenswrapper[4838]: I1128 10:30:00.169106 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="6da99ea6-330f-43fa-8586-fbfed3a9a4d1" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Nov 28 10:30:00 crc kubenswrapper[4838]: I1128 10:30:00.170035 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405430-q6b6w" Nov 28 10:30:00 crc kubenswrapper[4838]: I1128 10:30:00.172521 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 28 10:30:00 crc kubenswrapper[4838]: I1128 10:30:00.173037 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 28 10:30:00 crc kubenswrapper[4838]: I1128 10:30:00.198879 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405430-q6b6w"] Nov 28 10:30:00 crc kubenswrapper[4838]: I1128 10:30:00.343364 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n4g9l\" (UniqueName: \"kubernetes.io/projected/87f04482-860f-49ec-ade3-aebc08f2887c-kube-api-access-n4g9l\") pod \"collect-profiles-29405430-q6b6w\" (UID: \"87f04482-860f-49ec-ade3-aebc08f2887c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405430-q6b6w" Nov 28 10:30:00 crc kubenswrapper[4838]: I1128 10:30:00.343510 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87f04482-860f-49ec-ade3-aebc08f2887c-config-volume\") pod \"collect-profiles-29405430-q6b6w\" (UID: \"87f04482-860f-49ec-ade3-aebc08f2887c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405430-q6b6w" Nov 28 10:30:00 crc kubenswrapper[4838]: I1128 10:30:00.344340 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/87f04482-860f-49ec-ade3-aebc08f2887c-secret-volume\") pod \"collect-profiles-29405430-q6b6w\" (UID: \"87f04482-860f-49ec-ade3-aebc08f2887c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405430-q6b6w" Nov 28 10:30:00 crc kubenswrapper[4838]: I1128 10:30:00.445552 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/87f04482-860f-49ec-ade3-aebc08f2887c-secret-volume\") pod \"collect-profiles-29405430-q6b6w\" (UID: \"87f04482-860f-49ec-ade3-aebc08f2887c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405430-q6b6w" Nov 28 10:30:00 crc kubenswrapper[4838]: I1128 10:30:00.445629 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n4g9l\" (UniqueName: \"kubernetes.io/projected/87f04482-860f-49ec-ade3-aebc08f2887c-kube-api-access-n4g9l\") pod \"collect-profiles-29405430-q6b6w\" (UID: \"87f04482-860f-49ec-ade3-aebc08f2887c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405430-q6b6w" Nov 28 10:30:00 crc kubenswrapper[4838]: I1128 10:30:00.445745 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87f04482-860f-49ec-ade3-aebc08f2887c-config-volume\") pod \"collect-profiles-29405430-q6b6w\" (UID: \"87f04482-860f-49ec-ade3-aebc08f2887c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405430-q6b6w" Nov 28 10:30:00 crc kubenswrapper[4838]: I1128 10:30:00.446948 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87f04482-860f-49ec-ade3-aebc08f2887c-config-volume\") pod \"collect-profiles-29405430-q6b6w\" (UID: \"87f04482-860f-49ec-ade3-aebc08f2887c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405430-q6b6w" Nov 28 10:30:00 crc kubenswrapper[4838]: I1128 10:30:00.451628 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/87f04482-860f-49ec-ade3-aebc08f2887c-secret-volume\") pod \"collect-profiles-29405430-q6b6w\" (UID: \"87f04482-860f-49ec-ade3-aebc08f2887c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405430-q6b6w" Nov 28 10:30:00 crc kubenswrapper[4838]: I1128 10:30:00.465110 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n4g9l\" (UniqueName: \"kubernetes.io/projected/87f04482-860f-49ec-ade3-aebc08f2887c-kube-api-access-n4g9l\") pod \"collect-profiles-29405430-q6b6w\" (UID: \"87f04482-860f-49ec-ade3-aebc08f2887c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405430-q6b6w" Nov 28 10:30:00 crc kubenswrapper[4838]: I1128 10:30:00.501433 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405430-q6b6w" Nov 28 10:30:00 crc kubenswrapper[4838]: I1128 10:30:00.973676 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405430-q6b6w"] Nov 28 10:30:01 crc kubenswrapper[4838]: I1128 10:30:01.109119 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405430-q6b6w" event={"ID":"87f04482-860f-49ec-ade3-aebc08f2887c","Type":"ContainerStarted","Data":"9cb03a51a9f207812b6d7ef59ce5a7927f29afbc9e6c301a96a20aa4a400000b"} Nov 28 10:30:02 crc kubenswrapper[4838]: I1128 10:30:02.122800 4838 generic.go:334] "Generic (PLEG): container finished" podID="87f04482-860f-49ec-ade3-aebc08f2887c" containerID="ed54aeed2cce2fcd18ac1cb31258dd61eaaebb97db5e04a6255d1c6ff9500798" exitCode=0 Nov 28 10:30:02 crc kubenswrapper[4838]: I1128 10:30:02.122955 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405430-q6b6w" event={"ID":"87f04482-860f-49ec-ade3-aebc08f2887c","Type":"ContainerDied","Data":"ed54aeed2cce2fcd18ac1cb31258dd61eaaebb97db5e04a6255d1c6ff9500798"} Nov 28 10:30:03 crc kubenswrapper[4838]: I1128 10:30:03.508695 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405430-q6b6w" Nov 28 10:30:03 crc kubenswrapper[4838]: I1128 10:30:03.659361 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-n4g9l\" (UniqueName: \"kubernetes.io/projected/87f04482-860f-49ec-ade3-aebc08f2887c-kube-api-access-n4g9l\") pod \"87f04482-860f-49ec-ade3-aebc08f2887c\" (UID: \"87f04482-860f-49ec-ade3-aebc08f2887c\") " Nov 28 10:30:03 crc kubenswrapper[4838]: I1128 10:30:03.659452 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87f04482-860f-49ec-ade3-aebc08f2887c-config-volume\") pod \"87f04482-860f-49ec-ade3-aebc08f2887c\" (UID: \"87f04482-860f-49ec-ade3-aebc08f2887c\") " Nov 28 10:30:03 crc kubenswrapper[4838]: I1128 10:30:03.659550 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/87f04482-860f-49ec-ade3-aebc08f2887c-secret-volume\") pod \"87f04482-860f-49ec-ade3-aebc08f2887c\" (UID: \"87f04482-860f-49ec-ade3-aebc08f2887c\") " Nov 28 10:30:03 crc kubenswrapper[4838]: I1128 10:30:03.660610 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/87f04482-860f-49ec-ade3-aebc08f2887c-config-volume" (OuterVolumeSpecName: "config-volume") pod "87f04482-860f-49ec-ade3-aebc08f2887c" (UID: "87f04482-860f-49ec-ade3-aebc08f2887c"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 10:30:03 crc kubenswrapper[4838]: I1128 10:30:03.665467 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/87f04482-860f-49ec-ade3-aebc08f2887c-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "87f04482-860f-49ec-ade3-aebc08f2887c" (UID: "87f04482-860f-49ec-ade3-aebc08f2887c"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:30:03 crc kubenswrapper[4838]: I1128 10:30:03.666940 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/87f04482-860f-49ec-ade3-aebc08f2887c-kube-api-access-n4g9l" (OuterVolumeSpecName: "kube-api-access-n4g9l") pod "87f04482-860f-49ec-ade3-aebc08f2887c" (UID: "87f04482-860f-49ec-ade3-aebc08f2887c"). InnerVolumeSpecName "kube-api-access-n4g9l". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:30:03 crc kubenswrapper[4838]: I1128 10:30:03.763240 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-n4g9l\" (UniqueName: \"kubernetes.io/projected/87f04482-860f-49ec-ade3-aebc08f2887c-kube-api-access-n4g9l\") on node \"crc\" DevicePath \"\"" Nov 28 10:30:03 crc kubenswrapper[4838]: I1128 10:30:03.763406 4838 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87f04482-860f-49ec-ade3-aebc08f2887c-config-volume\") on node \"crc\" DevicePath \"\"" Nov 28 10:30:03 crc kubenswrapper[4838]: I1128 10:30:03.763445 4838 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/87f04482-860f-49ec-ade3-aebc08f2887c-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 28 10:30:04 crc kubenswrapper[4838]: I1128 10:30:04.149583 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405430-q6b6w" event={"ID":"87f04482-860f-49ec-ade3-aebc08f2887c","Type":"ContainerDied","Data":"9cb03a51a9f207812b6d7ef59ce5a7927f29afbc9e6c301a96a20aa4a400000b"} Nov 28 10:30:04 crc kubenswrapper[4838]: I1128 10:30:04.149640 4838 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9cb03a51a9f207812b6d7ef59ce5a7927f29afbc9e6c301a96a20aa4a400000b" Nov 28 10:30:04 crc kubenswrapper[4838]: I1128 10:30:04.149646 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405430-q6b6w" Nov 28 10:30:05 crc kubenswrapper[4838]: I1128 10:30:05.563778 4838 scope.go:117] "RemoveContainer" containerID="94f5e3e0d388a990bfe9cc3186f4655bc8ffbe5eaaa4558781704f164737d7ab" Nov 28 10:30:05 crc kubenswrapper[4838]: E1128 10:30:05.564920 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-5dxdd_openshift-machine-config-operator(5c3daa53-8c4e-4e30-aeba-146602dd45cd)\"" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" Nov 28 10:30:17 crc kubenswrapper[4838]: I1128 10:30:17.140109 4838 scope.go:117] "RemoveContainer" containerID="998d9df52c8721870fc8421468bc009017bdfaa5ae767988063ae7fdca6086a7" Nov 28 10:30:18 crc kubenswrapper[4838]: I1128 10:30:18.572367 4838 scope.go:117] "RemoveContainer" containerID="94f5e3e0d388a990bfe9cc3186f4655bc8ffbe5eaaa4558781704f164737d7ab" Nov 28 10:30:18 crc kubenswrapper[4838]: E1128 10:30:18.573128 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-5dxdd_openshift-machine-config-operator(5c3daa53-8c4e-4e30-aeba-146602dd45cd)\"" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" Nov 28 10:30:33 crc kubenswrapper[4838]: I1128 10:30:33.562623 4838 scope.go:117] "RemoveContainer" containerID="94f5e3e0d388a990bfe9cc3186f4655bc8ffbe5eaaa4558781704f164737d7ab" Nov 28 10:30:33 crc kubenswrapper[4838]: E1128 10:30:33.563698 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-5dxdd_openshift-machine-config-operator(5c3daa53-8c4e-4e30-aeba-146602dd45cd)\"" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" Nov 28 10:30:47 crc kubenswrapper[4838]: I1128 10:30:47.562037 4838 scope.go:117] "RemoveContainer" containerID="94f5e3e0d388a990bfe9cc3186f4655bc8ffbe5eaaa4558781704f164737d7ab" Nov 28 10:30:47 crc kubenswrapper[4838]: E1128 10:30:47.562819 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-5dxdd_openshift-machine-config-operator(5c3daa53-8c4e-4e30-aeba-146602dd45cd)\"" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" Nov 28 10:30:59 crc kubenswrapper[4838]: I1128 10:30:59.238169 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-m4hvs"] Nov 28 10:30:59 crc kubenswrapper[4838]: E1128 10:30:59.239447 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="87f04482-860f-49ec-ade3-aebc08f2887c" containerName="collect-profiles" Nov 28 10:30:59 crc kubenswrapper[4838]: I1128 10:30:59.239470 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="87f04482-860f-49ec-ade3-aebc08f2887c" containerName="collect-profiles" Nov 28 10:30:59 crc kubenswrapper[4838]: I1128 10:30:59.239910 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="87f04482-860f-49ec-ade3-aebc08f2887c" containerName="collect-profiles" Nov 28 10:30:59 crc kubenswrapper[4838]: I1128 10:30:59.242512 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-m4hvs" Nov 28 10:30:59 crc kubenswrapper[4838]: I1128 10:30:59.266589 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-m4hvs"] Nov 28 10:30:59 crc kubenswrapper[4838]: I1128 10:30:59.349266 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4c753d7f-a113-45e3-a1a4-f2346a7a0278-catalog-content\") pod \"redhat-operators-m4hvs\" (UID: \"4c753d7f-a113-45e3-a1a4-f2346a7a0278\") " pod="openshift-marketplace/redhat-operators-m4hvs" Nov 28 10:30:59 crc kubenswrapper[4838]: I1128 10:30:59.349351 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4c753d7f-a113-45e3-a1a4-f2346a7a0278-utilities\") pod \"redhat-operators-m4hvs\" (UID: \"4c753d7f-a113-45e3-a1a4-f2346a7a0278\") " pod="openshift-marketplace/redhat-operators-m4hvs" Nov 28 10:30:59 crc kubenswrapper[4838]: I1128 10:30:59.349444 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4r89z\" (UniqueName: \"kubernetes.io/projected/4c753d7f-a113-45e3-a1a4-f2346a7a0278-kube-api-access-4r89z\") pod \"redhat-operators-m4hvs\" (UID: \"4c753d7f-a113-45e3-a1a4-f2346a7a0278\") " pod="openshift-marketplace/redhat-operators-m4hvs" Nov 28 10:30:59 crc kubenswrapper[4838]: I1128 10:30:59.451402 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4c753d7f-a113-45e3-a1a4-f2346a7a0278-catalog-content\") pod \"redhat-operators-m4hvs\" (UID: \"4c753d7f-a113-45e3-a1a4-f2346a7a0278\") " pod="openshift-marketplace/redhat-operators-m4hvs" Nov 28 10:30:59 crc kubenswrapper[4838]: I1128 10:30:59.451909 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4c753d7f-a113-45e3-a1a4-f2346a7a0278-utilities\") pod \"redhat-operators-m4hvs\" (UID: \"4c753d7f-a113-45e3-a1a4-f2346a7a0278\") " pod="openshift-marketplace/redhat-operators-m4hvs" Nov 28 10:30:59 crc kubenswrapper[4838]: I1128 10:30:59.452105 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4r89z\" (UniqueName: \"kubernetes.io/projected/4c753d7f-a113-45e3-a1a4-f2346a7a0278-kube-api-access-4r89z\") pod \"redhat-operators-m4hvs\" (UID: \"4c753d7f-a113-45e3-a1a4-f2346a7a0278\") " pod="openshift-marketplace/redhat-operators-m4hvs" Nov 28 10:30:59 crc kubenswrapper[4838]: I1128 10:30:59.452391 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4c753d7f-a113-45e3-a1a4-f2346a7a0278-catalog-content\") pod \"redhat-operators-m4hvs\" (UID: \"4c753d7f-a113-45e3-a1a4-f2346a7a0278\") " pod="openshift-marketplace/redhat-operators-m4hvs" Nov 28 10:30:59 crc kubenswrapper[4838]: I1128 10:30:59.452479 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4c753d7f-a113-45e3-a1a4-f2346a7a0278-utilities\") pod \"redhat-operators-m4hvs\" (UID: \"4c753d7f-a113-45e3-a1a4-f2346a7a0278\") " pod="openshift-marketplace/redhat-operators-m4hvs" Nov 28 10:30:59 crc kubenswrapper[4838]: I1128 10:30:59.482496 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4r89z\" (UniqueName: \"kubernetes.io/projected/4c753d7f-a113-45e3-a1a4-f2346a7a0278-kube-api-access-4r89z\") pod \"redhat-operators-m4hvs\" (UID: \"4c753d7f-a113-45e3-a1a4-f2346a7a0278\") " pod="openshift-marketplace/redhat-operators-m4hvs" Nov 28 10:30:59 crc kubenswrapper[4838]: I1128 10:30:59.562507 4838 scope.go:117] "RemoveContainer" containerID="94f5e3e0d388a990bfe9cc3186f4655bc8ffbe5eaaa4558781704f164737d7ab" Nov 28 10:30:59 crc kubenswrapper[4838]: I1128 10:30:59.580686 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-m4hvs" Nov 28 10:31:00 crc kubenswrapper[4838]: I1128 10:31:00.044565 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-m4hvs"] Nov 28 10:31:00 crc kubenswrapper[4838]: W1128 10:31:00.053490 4838 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4c753d7f_a113_45e3_a1a4_f2346a7a0278.slice/crio-79acb465ad9ace2618dcdfb0c9ae63202a217eeecf27f2cd037c6d0238f7e95f WatchSource:0}: Error finding container 79acb465ad9ace2618dcdfb0c9ae63202a217eeecf27f2cd037c6d0238f7e95f: Status 404 returned error can't find the container with id 79acb465ad9ace2618dcdfb0c9ae63202a217eeecf27f2cd037c6d0238f7e95f Nov 28 10:31:00 crc kubenswrapper[4838]: I1128 10:31:00.789575 4838 generic.go:334] "Generic (PLEG): container finished" podID="4c753d7f-a113-45e3-a1a4-f2346a7a0278" containerID="3fd9f00d5b98d03166392b7a483f9b20de65c9db713656d99d5fb91edb73602b" exitCode=0 Nov 28 10:31:00 crc kubenswrapper[4838]: I1128 10:31:00.789888 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-m4hvs" event={"ID":"4c753d7f-a113-45e3-a1a4-f2346a7a0278","Type":"ContainerDied","Data":"3fd9f00d5b98d03166392b7a483f9b20de65c9db713656d99d5fb91edb73602b"} Nov 28 10:31:00 crc kubenswrapper[4838]: I1128 10:31:00.792362 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-m4hvs" event={"ID":"4c753d7f-a113-45e3-a1a4-f2346a7a0278","Type":"ContainerStarted","Data":"79acb465ad9ace2618dcdfb0c9ae63202a217eeecf27f2cd037c6d0238f7e95f"} Nov 28 10:31:00 crc kubenswrapper[4838]: I1128 10:31:00.792026 4838 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 28 10:31:00 crc kubenswrapper[4838]: I1128 10:31:00.796215 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" event={"ID":"5c3daa53-8c4e-4e30-aeba-146602dd45cd","Type":"ContainerStarted","Data":"5afd39ddd492746c0199d772a63313b45e117b315a570579a80048ab6e189d3f"} Nov 28 10:31:02 crc kubenswrapper[4838]: I1128 10:31:02.824688 4838 generic.go:334] "Generic (PLEG): container finished" podID="4c753d7f-a113-45e3-a1a4-f2346a7a0278" containerID="4c6b0829c533ff8c3432403a81b36b09a11a09a201fac36484b71a527ad5c0cf" exitCode=0 Nov 28 10:31:02 crc kubenswrapper[4838]: I1128 10:31:02.824977 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-m4hvs" event={"ID":"4c753d7f-a113-45e3-a1a4-f2346a7a0278","Type":"ContainerDied","Data":"4c6b0829c533ff8c3432403a81b36b09a11a09a201fac36484b71a527ad5c0cf"} Nov 28 10:31:03 crc kubenswrapper[4838]: I1128 10:31:03.838942 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-m4hvs" event={"ID":"4c753d7f-a113-45e3-a1a4-f2346a7a0278","Type":"ContainerStarted","Data":"6646dbaad86ea5aff754cf32d78bef992fc4ba55da78a90325f62d7b4c5ea6fe"} Nov 28 10:31:03 crc kubenswrapper[4838]: I1128 10:31:03.878491 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-m4hvs" podStartSLOduration=2.218812175 podStartE2EDuration="4.878458616s" podCreationTimestamp="2025-11-28 10:30:59 +0000 UTC" firstStartedPulling="2025-11-28 10:31:00.791810641 +0000 UTC m=+2032.490784811" lastFinishedPulling="2025-11-28 10:31:03.451457042 +0000 UTC m=+2035.150431252" observedRunningTime="2025-11-28 10:31:03.867307142 +0000 UTC m=+2035.566281382" watchObservedRunningTime="2025-11-28 10:31:03.878458616 +0000 UTC m=+2035.577432876" Nov 28 10:31:09 crc kubenswrapper[4838]: I1128 10:31:09.580954 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-m4hvs" Nov 28 10:31:09 crc kubenswrapper[4838]: I1128 10:31:09.581618 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-m4hvs" Nov 28 10:31:10 crc kubenswrapper[4838]: I1128 10:31:10.630622 4838 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-m4hvs" podUID="4c753d7f-a113-45e3-a1a4-f2346a7a0278" containerName="registry-server" probeResult="failure" output=< Nov 28 10:31:10 crc kubenswrapper[4838]: timeout: failed to connect service ":50051" within 1s Nov 28 10:31:10 crc kubenswrapper[4838]: > Nov 28 10:31:17 crc kubenswrapper[4838]: I1128 10:31:17.234782 4838 scope.go:117] "RemoveContainer" containerID="70b74241048310c2760b716793739ac75306194738f917ca35aa39dda6ed4098" Nov 28 10:31:17 crc kubenswrapper[4838]: I1128 10:31:17.267451 4838 scope.go:117] "RemoveContainer" containerID="04f2954c3a931aa728b3c914f1a50d73aaf37c8ffc0b7ae04d32cf818b601bd8" Nov 28 10:31:17 crc kubenswrapper[4838]: I1128 10:31:17.319379 4838 scope.go:117] "RemoveContainer" containerID="e286faddb64ff53d76c13d35787965c4ddc0b66ff51c43693c41462310fee56b" Nov 28 10:31:19 crc kubenswrapper[4838]: I1128 10:31:19.674946 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-m4hvs" Nov 28 10:31:19 crc kubenswrapper[4838]: I1128 10:31:19.751771 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-m4hvs" Nov 28 10:31:19 crc kubenswrapper[4838]: I1128 10:31:19.910395 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-m4hvs"] Nov 28 10:31:21 crc kubenswrapper[4838]: I1128 10:31:21.014417 4838 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-m4hvs" podUID="4c753d7f-a113-45e3-a1a4-f2346a7a0278" containerName="registry-server" containerID="cri-o://6646dbaad86ea5aff754cf32d78bef992fc4ba55da78a90325f62d7b4c5ea6fe" gracePeriod=2 Nov 28 10:31:21 crc kubenswrapper[4838]: I1128 10:31:21.513252 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-m4hvs" Nov 28 10:31:21 crc kubenswrapper[4838]: I1128 10:31:21.702943 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4c753d7f-a113-45e3-a1a4-f2346a7a0278-utilities\") pod \"4c753d7f-a113-45e3-a1a4-f2346a7a0278\" (UID: \"4c753d7f-a113-45e3-a1a4-f2346a7a0278\") " Nov 28 10:31:21 crc kubenswrapper[4838]: I1128 10:31:21.703033 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4r89z\" (UniqueName: \"kubernetes.io/projected/4c753d7f-a113-45e3-a1a4-f2346a7a0278-kube-api-access-4r89z\") pod \"4c753d7f-a113-45e3-a1a4-f2346a7a0278\" (UID: \"4c753d7f-a113-45e3-a1a4-f2346a7a0278\") " Nov 28 10:31:21 crc kubenswrapper[4838]: I1128 10:31:21.703087 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4c753d7f-a113-45e3-a1a4-f2346a7a0278-catalog-content\") pod \"4c753d7f-a113-45e3-a1a4-f2346a7a0278\" (UID: \"4c753d7f-a113-45e3-a1a4-f2346a7a0278\") " Nov 28 10:31:21 crc kubenswrapper[4838]: I1128 10:31:21.704004 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4c753d7f-a113-45e3-a1a4-f2346a7a0278-utilities" (OuterVolumeSpecName: "utilities") pod "4c753d7f-a113-45e3-a1a4-f2346a7a0278" (UID: "4c753d7f-a113-45e3-a1a4-f2346a7a0278"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 10:31:21 crc kubenswrapper[4838]: I1128 10:31:21.712100 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4c753d7f-a113-45e3-a1a4-f2346a7a0278-kube-api-access-4r89z" (OuterVolumeSpecName: "kube-api-access-4r89z") pod "4c753d7f-a113-45e3-a1a4-f2346a7a0278" (UID: "4c753d7f-a113-45e3-a1a4-f2346a7a0278"). InnerVolumeSpecName "kube-api-access-4r89z". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:31:21 crc kubenswrapper[4838]: I1128 10:31:21.804825 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4r89z\" (UniqueName: \"kubernetes.io/projected/4c753d7f-a113-45e3-a1a4-f2346a7a0278-kube-api-access-4r89z\") on node \"crc\" DevicePath \"\"" Nov 28 10:31:21 crc kubenswrapper[4838]: I1128 10:31:21.804862 4838 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4c753d7f-a113-45e3-a1a4-f2346a7a0278-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 10:31:21 crc kubenswrapper[4838]: I1128 10:31:21.841364 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4c753d7f-a113-45e3-a1a4-f2346a7a0278-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "4c753d7f-a113-45e3-a1a4-f2346a7a0278" (UID: "4c753d7f-a113-45e3-a1a4-f2346a7a0278"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 10:31:21 crc kubenswrapper[4838]: I1128 10:31:21.906790 4838 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4c753d7f-a113-45e3-a1a4-f2346a7a0278-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 10:31:22 crc kubenswrapper[4838]: I1128 10:31:22.027826 4838 generic.go:334] "Generic (PLEG): container finished" podID="4c753d7f-a113-45e3-a1a4-f2346a7a0278" containerID="6646dbaad86ea5aff754cf32d78bef992fc4ba55da78a90325f62d7b4c5ea6fe" exitCode=0 Nov 28 10:31:22 crc kubenswrapper[4838]: I1128 10:31:22.027888 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-m4hvs" event={"ID":"4c753d7f-a113-45e3-a1a4-f2346a7a0278","Type":"ContainerDied","Data":"6646dbaad86ea5aff754cf32d78bef992fc4ba55da78a90325f62d7b4c5ea6fe"} Nov 28 10:31:22 crc kubenswrapper[4838]: I1128 10:31:22.027978 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-m4hvs" event={"ID":"4c753d7f-a113-45e3-a1a4-f2346a7a0278","Type":"ContainerDied","Data":"79acb465ad9ace2618dcdfb0c9ae63202a217eeecf27f2cd037c6d0238f7e95f"} Nov 28 10:31:22 crc kubenswrapper[4838]: I1128 10:31:22.028002 4838 scope.go:117] "RemoveContainer" containerID="6646dbaad86ea5aff754cf32d78bef992fc4ba55da78a90325f62d7b4c5ea6fe" Nov 28 10:31:22 crc kubenswrapper[4838]: I1128 10:31:22.027918 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-m4hvs" Nov 28 10:31:22 crc kubenswrapper[4838]: I1128 10:31:22.050282 4838 scope.go:117] "RemoveContainer" containerID="4c6b0829c533ff8c3432403a81b36b09a11a09a201fac36484b71a527ad5c0cf" Nov 28 10:31:22 crc kubenswrapper[4838]: I1128 10:31:22.069552 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-m4hvs"] Nov 28 10:31:22 crc kubenswrapper[4838]: I1128 10:31:22.078371 4838 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-m4hvs"] Nov 28 10:31:22 crc kubenswrapper[4838]: I1128 10:31:22.099250 4838 scope.go:117] "RemoveContainer" containerID="3fd9f00d5b98d03166392b7a483f9b20de65c9db713656d99d5fb91edb73602b" Nov 28 10:31:22 crc kubenswrapper[4838]: I1128 10:31:22.121032 4838 scope.go:117] "RemoveContainer" containerID="6646dbaad86ea5aff754cf32d78bef992fc4ba55da78a90325f62d7b4c5ea6fe" Nov 28 10:31:22 crc kubenswrapper[4838]: E1128 10:31:22.121467 4838 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6646dbaad86ea5aff754cf32d78bef992fc4ba55da78a90325f62d7b4c5ea6fe\": container with ID starting with 6646dbaad86ea5aff754cf32d78bef992fc4ba55da78a90325f62d7b4c5ea6fe not found: ID does not exist" containerID="6646dbaad86ea5aff754cf32d78bef992fc4ba55da78a90325f62d7b4c5ea6fe" Nov 28 10:31:22 crc kubenswrapper[4838]: I1128 10:31:22.121507 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6646dbaad86ea5aff754cf32d78bef992fc4ba55da78a90325f62d7b4c5ea6fe"} err="failed to get container status \"6646dbaad86ea5aff754cf32d78bef992fc4ba55da78a90325f62d7b4c5ea6fe\": rpc error: code = NotFound desc = could not find container \"6646dbaad86ea5aff754cf32d78bef992fc4ba55da78a90325f62d7b4c5ea6fe\": container with ID starting with 6646dbaad86ea5aff754cf32d78bef992fc4ba55da78a90325f62d7b4c5ea6fe not found: ID does not exist" Nov 28 10:31:22 crc kubenswrapper[4838]: I1128 10:31:22.121531 4838 scope.go:117] "RemoveContainer" containerID="4c6b0829c533ff8c3432403a81b36b09a11a09a201fac36484b71a527ad5c0cf" Nov 28 10:31:22 crc kubenswrapper[4838]: E1128 10:31:22.121987 4838 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4c6b0829c533ff8c3432403a81b36b09a11a09a201fac36484b71a527ad5c0cf\": container with ID starting with 4c6b0829c533ff8c3432403a81b36b09a11a09a201fac36484b71a527ad5c0cf not found: ID does not exist" containerID="4c6b0829c533ff8c3432403a81b36b09a11a09a201fac36484b71a527ad5c0cf" Nov 28 10:31:22 crc kubenswrapper[4838]: I1128 10:31:22.122025 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4c6b0829c533ff8c3432403a81b36b09a11a09a201fac36484b71a527ad5c0cf"} err="failed to get container status \"4c6b0829c533ff8c3432403a81b36b09a11a09a201fac36484b71a527ad5c0cf\": rpc error: code = NotFound desc = could not find container \"4c6b0829c533ff8c3432403a81b36b09a11a09a201fac36484b71a527ad5c0cf\": container with ID starting with 4c6b0829c533ff8c3432403a81b36b09a11a09a201fac36484b71a527ad5c0cf not found: ID does not exist" Nov 28 10:31:22 crc kubenswrapper[4838]: I1128 10:31:22.122051 4838 scope.go:117] "RemoveContainer" containerID="3fd9f00d5b98d03166392b7a483f9b20de65c9db713656d99d5fb91edb73602b" Nov 28 10:31:22 crc kubenswrapper[4838]: E1128 10:31:22.122424 4838 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3fd9f00d5b98d03166392b7a483f9b20de65c9db713656d99d5fb91edb73602b\": container with ID starting with 3fd9f00d5b98d03166392b7a483f9b20de65c9db713656d99d5fb91edb73602b not found: ID does not exist" containerID="3fd9f00d5b98d03166392b7a483f9b20de65c9db713656d99d5fb91edb73602b" Nov 28 10:31:22 crc kubenswrapper[4838]: I1128 10:31:22.122490 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3fd9f00d5b98d03166392b7a483f9b20de65c9db713656d99d5fb91edb73602b"} err="failed to get container status \"3fd9f00d5b98d03166392b7a483f9b20de65c9db713656d99d5fb91edb73602b\": rpc error: code = NotFound desc = could not find container \"3fd9f00d5b98d03166392b7a483f9b20de65c9db713656d99d5fb91edb73602b\": container with ID starting with 3fd9f00d5b98d03166392b7a483f9b20de65c9db713656d99d5fb91edb73602b not found: ID does not exist" Nov 28 10:31:22 crc kubenswrapper[4838]: I1128 10:31:22.576424 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4c753d7f-a113-45e3-a1a4-f2346a7a0278" path="/var/lib/kubelet/pods/4c753d7f-a113-45e3-a1a4-f2346a7a0278/volumes" Nov 28 10:31:39 crc kubenswrapper[4838]: I1128 10:31:39.687032 4838 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-6mmw4 container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.63:8080/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 28 10:31:39 crc kubenswrapper[4838]: I1128 10:31:39.688970 4838 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-6mmw4" podUID="b0c9680e-7b0a-47a9-87dc-4da8cfbfce77" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.63:8080/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 28 10:31:39 crc kubenswrapper[4838]: I1128 10:31:39.735946 4838 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-6mmw4 container/marketplace-operator namespace/openshift-marketplace: Liveness probe status=failure output="Get \"http://10.217.0.63:8080/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 28 10:31:39 crc kubenswrapper[4838]: I1128 10:31:39.736307 4838 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-marketplace/marketplace-operator-79b997595-6mmw4" podUID="b0c9680e-7b0a-47a9-87dc-4da8cfbfce77" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.63:8080/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 28 10:33:20 crc kubenswrapper[4838]: E1128 10:33:20.551094 4838 upgradeaware.go:427] Error proxying data from client to backend: readfrom tcp 38.102.83.65:32934->38.102.83.65:35709: write tcp 38.102.83.65:32934->38.102.83.65:35709: write: broken pipe Nov 28 10:33:23 crc kubenswrapper[4838]: I1128 10:33:23.940174 4838 patch_prober.go:28] interesting pod/machine-config-daemon-5dxdd container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 10:33:23 crc kubenswrapper[4838]: I1128 10:33:23.940524 4838 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 10:33:29 crc kubenswrapper[4838]: I1128 10:33:29.290993 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-rpltz"] Nov 28 10:33:29 crc kubenswrapper[4838]: E1128 10:33:29.292137 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4c753d7f-a113-45e3-a1a4-f2346a7a0278" containerName="registry-server" Nov 28 10:33:29 crc kubenswrapper[4838]: I1128 10:33:29.292156 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="4c753d7f-a113-45e3-a1a4-f2346a7a0278" containerName="registry-server" Nov 28 10:33:29 crc kubenswrapper[4838]: E1128 10:33:29.292661 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4c753d7f-a113-45e3-a1a4-f2346a7a0278" containerName="extract-content" Nov 28 10:33:29 crc kubenswrapper[4838]: I1128 10:33:29.292674 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="4c753d7f-a113-45e3-a1a4-f2346a7a0278" containerName="extract-content" Nov 28 10:33:29 crc kubenswrapper[4838]: E1128 10:33:29.292819 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4c753d7f-a113-45e3-a1a4-f2346a7a0278" containerName="extract-utilities" Nov 28 10:33:29 crc kubenswrapper[4838]: I1128 10:33:29.292832 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="4c753d7f-a113-45e3-a1a4-f2346a7a0278" containerName="extract-utilities" Nov 28 10:33:29 crc kubenswrapper[4838]: I1128 10:33:29.293052 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="4c753d7f-a113-45e3-a1a4-f2346a7a0278" containerName="registry-server" Nov 28 10:33:29 crc kubenswrapper[4838]: I1128 10:33:29.294832 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rpltz" Nov 28 10:33:29 crc kubenswrapper[4838]: I1128 10:33:29.346410 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-rpltz"] Nov 28 10:33:29 crc kubenswrapper[4838]: I1128 10:33:29.477403 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7fd2419c-4e14-4687-b9a7-43626faa3e55-utilities\") pod \"redhat-marketplace-rpltz\" (UID: \"7fd2419c-4e14-4687-b9a7-43626faa3e55\") " pod="openshift-marketplace/redhat-marketplace-rpltz" Nov 28 10:33:29 crc kubenswrapper[4838]: I1128 10:33:29.477474 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7fd2419c-4e14-4687-b9a7-43626faa3e55-catalog-content\") pod \"redhat-marketplace-rpltz\" (UID: \"7fd2419c-4e14-4687-b9a7-43626faa3e55\") " pod="openshift-marketplace/redhat-marketplace-rpltz" Nov 28 10:33:29 crc kubenswrapper[4838]: I1128 10:33:29.478410 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gspqs\" (UniqueName: \"kubernetes.io/projected/7fd2419c-4e14-4687-b9a7-43626faa3e55-kube-api-access-gspqs\") pod \"redhat-marketplace-rpltz\" (UID: \"7fd2419c-4e14-4687-b9a7-43626faa3e55\") " pod="openshift-marketplace/redhat-marketplace-rpltz" Nov 28 10:33:29 crc kubenswrapper[4838]: I1128 10:33:29.580924 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gspqs\" (UniqueName: \"kubernetes.io/projected/7fd2419c-4e14-4687-b9a7-43626faa3e55-kube-api-access-gspqs\") pod \"redhat-marketplace-rpltz\" (UID: \"7fd2419c-4e14-4687-b9a7-43626faa3e55\") " pod="openshift-marketplace/redhat-marketplace-rpltz" Nov 28 10:33:29 crc kubenswrapper[4838]: I1128 10:33:29.581314 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7fd2419c-4e14-4687-b9a7-43626faa3e55-utilities\") pod \"redhat-marketplace-rpltz\" (UID: \"7fd2419c-4e14-4687-b9a7-43626faa3e55\") " pod="openshift-marketplace/redhat-marketplace-rpltz" Nov 28 10:33:29 crc kubenswrapper[4838]: I1128 10:33:29.581376 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7fd2419c-4e14-4687-b9a7-43626faa3e55-catalog-content\") pod \"redhat-marketplace-rpltz\" (UID: \"7fd2419c-4e14-4687-b9a7-43626faa3e55\") " pod="openshift-marketplace/redhat-marketplace-rpltz" Nov 28 10:33:29 crc kubenswrapper[4838]: I1128 10:33:29.581902 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7fd2419c-4e14-4687-b9a7-43626faa3e55-utilities\") pod \"redhat-marketplace-rpltz\" (UID: \"7fd2419c-4e14-4687-b9a7-43626faa3e55\") " pod="openshift-marketplace/redhat-marketplace-rpltz" Nov 28 10:33:29 crc kubenswrapper[4838]: I1128 10:33:29.582145 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7fd2419c-4e14-4687-b9a7-43626faa3e55-catalog-content\") pod \"redhat-marketplace-rpltz\" (UID: \"7fd2419c-4e14-4687-b9a7-43626faa3e55\") " pod="openshift-marketplace/redhat-marketplace-rpltz" Nov 28 10:33:29 crc kubenswrapper[4838]: I1128 10:33:29.605170 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gspqs\" (UniqueName: \"kubernetes.io/projected/7fd2419c-4e14-4687-b9a7-43626faa3e55-kube-api-access-gspqs\") pod \"redhat-marketplace-rpltz\" (UID: \"7fd2419c-4e14-4687-b9a7-43626faa3e55\") " pod="openshift-marketplace/redhat-marketplace-rpltz" Nov 28 10:33:29 crc kubenswrapper[4838]: I1128 10:33:29.629452 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rpltz" Nov 28 10:33:30 crc kubenswrapper[4838]: I1128 10:33:30.197871 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-rpltz"] Nov 28 10:33:30 crc kubenswrapper[4838]: I1128 10:33:30.419904 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rpltz" event={"ID":"7fd2419c-4e14-4687-b9a7-43626faa3e55","Type":"ContainerStarted","Data":"70c277f8f1cdcff500c0a2060b374b372edcaeb8ff6561e1ecfca92f63522aaa"} Nov 28 10:33:31 crc kubenswrapper[4838]: I1128 10:33:31.434323 4838 generic.go:334] "Generic (PLEG): container finished" podID="7fd2419c-4e14-4687-b9a7-43626faa3e55" containerID="fb58cdc342202ee89ceab0ed28511ce12c527ea976d71bfd8c554c477fddafbc" exitCode=0 Nov 28 10:33:31 crc kubenswrapper[4838]: I1128 10:33:31.434436 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rpltz" event={"ID":"7fd2419c-4e14-4687-b9a7-43626faa3e55","Type":"ContainerDied","Data":"fb58cdc342202ee89ceab0ed28511ce12c527ea976d71bfd8c554c477fddafbc"} Nov 28 10:33:32 crc kubenswrapper[4838]: I1128 10:33:32.445030 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rpltz" event={"ID":"7fd2419c-4e14-4687-b9a7-43626faa3e55","Type":"ContainerStarted","Data":"5ebb833816784d1f0206c683d97c52d53b3801866874c83a2828d99faae4a6ec"} Nov 28 10:33:33 crc kubenswrapper[4838]: I1128 10:33:33.455506 4838 generic.go:334] "Generic (PLEG): container finished" podID="7fd2419c-4e14-4687-b9a7-43626faa3e55" containerID="5ebb833816784d1f0206c683d97c52d53b3801866874c83a2828d99faae4a6ec" exitCode=0 Nov 28 10:33:33 crc kubenswrapper[4838]: I1128 10:33:33.455595 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rpltz" event={"ID":"7fd2419c-4e14-4687-b9a7-43626faa3e55","Type":"ContainerDied","Data":"5ebb833816784d1f0206c683d97c52d53b3801866874c83a2828d99faae4a6ec"} Nov 28 10:33:35 crc kubenswrapper[4838]: I1128 10:33:35.482891 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rpltz" event={"ID":"7fd2419c-4e14-4687-b9a7-43626faa3e55","Type":"ContainerStarted","Data":"4ea37d773abc1d98ce1fe502153d1f95f82f3a818e8efc6f8de9fd0fa0f40919"} Nov 28 10:33:35 crc kubenswrapper[4838]: I1128 10:33:35.511757 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-rpltz" podStartSLOduration=3.555116915 podStartE2EDuration="6.511693821s" podCreationTimestamp="2025-11-28 10:33:29 +0000 UTC" firstStartedPulling="2025-11-28 10:33:31.437175797 +0000 UTC m=+2183.136149977" lastFinishedPulling="2025-11-28 10:33:34.393752683 +0000 UTC m=+2186.092726883" observedRunningTime="2025-11-28 10:33:35.501293289 +0000 UTC m=+2187.200267489" watchObservedRunningTime="2025-11-28 10:33:35.511693821 +0000 UTC m=+2187.210668031" Nov 28 10:33:38 crc kubenswrapper[4838]: I1128 10:33:38.671942 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-mf8cg"] Nov 28 10:33:38 crc kubenswrapper[4838]: I1128 10:33:38.688174 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-ghldt"] Nov 28 10:33:38 crc kubenswrapper[4838]: I1128 10:33:38.694435 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-gg2f7"] Nov 28 10:33:38 crc kubenswrapper[4838]: I1128 10:33:38.700406 4838 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-mf8cg"] Nov 28 10:33:38 crc kubenswrapper[4838]: I1128 10:33:38.705800 4838 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-ghldt"] Nov 28 10:33:38 crc kubenswrapper[4838]: I1128 10:33:38.711120 4838 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-gg2f7"] Nov 28 10:33:38 crc kubenswrapper[4838]: I1128 10:33:38.716923 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-bjdcp"] Nov 28 10:33:38 crc kubenswrapper[4838]: I1128 10:33:38.724986 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-c2xjz"] Nov 28 10:33:38 crc kubenswrapper[4838]: I1128 10:33:38.734737 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-52cwj"] Nov 28 10:33:38 crc kubenswrapper[4838]: I1128 10:33:38.741075 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-58bxs"] Nov 28 10:33:38 crc kubenswrapper[4838]: I1128 10:33:38.748776 4838 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-58bxs"] Nov 28 10:33:38 crc kubenswrapper[4838]: I1128 10:33:38.753806 4838 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-52cwj"] Nov 28 10:33:38 crc kubenswrapper[4838]: I1128 10:33:38.761565 4838 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-c2xjz"] Nov 28 10:33:38 crc kubenswrapper[4838]: I1128 10:33:38.768603 4838 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-bjdcp"] Nov 28 10:33:38 crc kubenswrapper[4838]: I1128 10:33:38.775609 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-8476r"] Nov 28 10:33:38 crc kubenswrapper[4838]: I1128 10:33:38.782553 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-5skrj"] Nov 28 10:33:38 crc kubenswrapper[4838]: I1128 10:33:38.788248 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-lgq9n"] Nov 28 10:33:38 crc kubenswrapper[4838]: I1128 10:33:38.793467 4838 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-5skrj"] Nov 28 10:33:38 crc kubenswrapper[4838]: I1128 10:33:38.798614 4838 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-8476r"] Nov 28 10:33:38 crc kubenswrapper[4838]: I1128 10:33:38.803963 4838 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-lgq9n"] Nov 28 10:33:39 crc kubenswrapper[4838]: I1128 10:33:39.630449 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-rpltz" Nov 28 10:33:39 crc kubenswrapper[4838]: I1128 10:33:39.630514 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-rpltz" Nov 28 10:33:39 crc kubenswrapper[4838]: I1128 10:33:39.703349 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-rpltz" Nov 28 10:33:40 crc kubenswrapper[4838]: I1128 10:33:40.584864 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="06110a37-1e85-48b3-9e24-aad19c7b062e" path="/var/lib/kubelet/pods/06110a37-1e85-48b3-9e24-aad19c7b062e/volumes" Nov 28 10:33:40 crc kubenswrapper[4838]: I1128 10:33:40.587004 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0dc66645-e2fb-4fa3-ae22-be67c8bf6eb5" path="/var/lib/kubelet/pods/0dc66645-e2fb-4fa3-ae22-be67c8bf6eb5/volumes" Nov 28 10:33:40 crc kubenswrapper[4838]: I1128 10:33:40.588417 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4c8f2bc3-9c2d-43c2-99bb-3d7cb73233e0" path="/var/lib/kubelet/pods/4c8f2bc3-9c2d-43c2-99bb-3d7cb73233e0/volumes" Nov 28 10:33:40 crc kubenswrapper[4838]: I1128 10:33:40.589802 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="64d0c9f2-be2c-41e0-b740-25b053504b1b" path="/var/lib/kubelet/pods/64d0c9f2-be2c-41e0-b740-25b053504b1b/volumes" Nov 28 10:33:40 crc kubenswrapper[4838]: I1128 10:33:40.592605 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6da99ea6-330f-43fa-8586-fbfed3a9a4d1" path="/var/lib/kubelet/pods/6da99ea6-330f-43fa-8586-fbfed3a9a4d1/volumes" Nov 28 10:33:40 crc kubenswrapper[4838]: I1128 10:33:40.594767 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="86cb5779-8a22-4602-8bda-e9f2fb2cb78d" path="/var/lib/kubelet/pods/86cb5779-8a22-4602-8bda-e9f2fb2cb78d/volumes" Nov 28 10:33:40 crc kubenswrapper[4838]: I1128 10:33:40.595874 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8dfef46a-ee03-4662-b64d-c906a0c8759e" path="/var/lib/kubelet/pods/8dfef46a-ee03-4662-b64d-c906a0c8759e/volumes" Nov 28 10:33:40 crc kubenswrapper[4838]: I1128 10:33:40.597549 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9b2a3bd0-add6-473d-b3fe-3f0e1f211230" path="/var/lib/kubelet/pods/9b2a3bd0-add6-473d-b3fe-3f0e1f211230/volumes" Nov 28 10:33:40 crc kubenswrapper[4838]: I1128 10:33:40.599049 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9e6f900b-6658-4957-a107-a89a5e77fefa" path="/var/lib/kubelet/pods/9e6f900b-6658-4957-a107-a89a5e77fefa/volumes" Nov 28 10:33:40 crc kubenswrapper[4838]: I1128 10:33:40.600175 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e8600285-a3ab-424a-8b28-94560f292e43" path="/var/lib/kubelet/pods/e8600285-a3ab-424a-8b28-94560f292e43/volumes" Nov 28 10:33:40 crc kubenswrapper[4838]: I1128 10:33:40.607279 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-rpltz" Nov 28 10:33:40 crc kubenswrapper[4838]: I1128 10:33:40.675167 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-rpltz"] Nov 28 10:33:42 crc kubenswrapper[4838]: I1128 10:33:42.551230 4838 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-rpltz" podUID="7fd2419c-4e14-4687-b9a7-43626faa3e55" containerName="registry-server" containerID="cri-o://4ea37d773abc1d98ce1fe502153d1f95f82f3a818e8efc6f8de9fd0fa0f40919" gracePeriod=2 Nov 28 10:33:43 crc kubenswrapper[4838]: I1128 10:33:43.117947 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rpltz" Nov 28 10:33:43 crc kubenswrapper[4838]: I1128 10:33:43.255329 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7fd2419c-4e14-4687-b9a7-43626faa3e55-utilities\") pod \"7fd2419c-4e14-4687-b9a7-43626faa3e55\" (UID: \"7fd2419c-4e14-4687-b9a7-43626faa3e55\") " Nov 28 10:33:43 crc kubenswrapper[4838]: I1128 10:33:43.255479 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7fd2419c-4e14-4687-b9a7-43626faa3e55-catalog-content\") pod \"7fd2419c-4e14-4687-b9a7-43626faa3e55\" (UID: \"7fd2419c-4e14-4687-b9a7-43626faa3e55\") " Nov 28 10:33:43 crc kubenswrapper[4838]: I1128 10:33:43.255553 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gspqs\" (UniqueName: \"kubernetes.io/projected/7fd2419c-4e14-4687-b9a7-43626faa3e55-kube-api-access-gspqs\") pod \"7fd2419c-4e14-4687-b9a7-43626faa3e55\" (UID: \"7fd2419c-4e14-4687-b9a7-43626faa3e55\") " Nov 28 10:33:43 crc kubenswrapper[4838]: I1128 10:33:43.256351 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7fd2419c-4e14-4687-b9a7-43626faa3e55-utilities" (OuterVolumeSpecName: "utilities") pod "7fd2419c-4e14-4687-b9a7-43626faa3e55" (UID: "7fd2419c-4e14-4687-b9a7-43626faa3e55"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 10:33:43 crc kubenswrapper[4838]: I1128 10:33:43.263109 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7fd2419c-4e14-4687-b9a7-43626faa3e55-kube-api-access-gspqs" (OuterVolumeSpecName: "kube-api-access-gspqs") pod "7fd2419c-4e14-4687-b9a7-43626faa3e55" (UID: "7fd2419c-4e14-4687-b9a7-43626faa3e55"). InnerVolumeSpecName "kube-api-access-gspqs". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:33:43 crc kubenswrapper[4838]: I1128 10:33:43.277628 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7fd2419c-4e14-4687-b9a7-43626faa3e55-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "7fd2419c-4e14-4687-b9a7-43626faa3e55" (UID: "7fd2419c-4e14-4687-b9a7-43626faa3e55"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 10:33:43 crc kubenswrapper[4838]: I1128 10:33:43.358633 4838 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7fd2419c-4e14-4687-b9a7-43626faa3e55-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 10:33:43 crc kubenswrapper[4838]: I1128 10:33:43.358665 4838 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7fd2419c-4e14-4687-b9a7-43626faa3e55-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 10:33:43 crc kubenswrapper[4838]: I1128 10:33:43.358678 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gspqs\" (UniqueName: \"kubernetes.io/projected/7fd2419c-4e14-4687-b9a7-43626faa3e55-kube-api-access-gspqs\") on node \"crc\" DevicePath \"\"" Nov 28 10:33:43 crc kubenswrapper[4838]: I1128 10:33:43.578547 4838 generic.go:334] "Generic (PLEG): container finished" podID="7fd2419c-4e14-4687-b9a7-43626faa3e55" containerID="4ea37d773abc1d98ce1fe502153d1f95f82f3a818e8efc6f8de9fd0fa0f40919" exitCode=0 Nov 28 10:33:43 crc kubenswrapper[4838]: I1128 10:33:43.578622 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rpltz" event={"ID":"7fd2419c-4e14-4687-b9a7-43626faa3e55","Type":"ContainerDied","Data":"4ea37d773abc1d98ce1fe502153d1f95f82f3a818e8efc6f8de9fd0fa0f40919"} Nov 28 10:33:43 crc kubenswrapper[4838]: I1128 10:33:43.578774 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rpltz" Nov 28 10:33:43 crc kubenswrapper[4838]: I1128 10:33:43.579917 4838 scope.go:117] "RemoveContainer" containerID="4ea37d773abc1d98ce1fe502153d1f95f82f3a818e8efc6f8de9fd0fa0f40919" Nov 28 10:33:43 crc kubenswrapper[4838]: I1128 10:33:43.579795 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rpltz" event={"ID":"7fd2419c-4e14-4687-b9a7-43626faa3e55","Type":"ContainerDied","Data":"70c277f8f1cdcff500c0a2060b374b372edcaeb8ff6561e1ecfca92f63522aaa"} Nov 28 10:33:43 crc kubenswrapper[4838]: I1128 10:33:43.607971 4838 scope.go:117] "RemoveContainer" containerID="5ebb833816784d1f0206c683d97c52d53b3801866874c83a2828d99faae4a6ec" Nov 28 10:33:43 crc kubenswrapper[4838]: I1128 10:33:43.628819 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-rpltz"] Nov 28 10:33:43 crc kubenswrapper[4838]: I1128 10:33:43.638854 4838 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-rpltz"] Nov 28 10:33:43 crc kubenswrapper[4838]: I1128 10:33:43.648559 4838 scope.go:117] "RemoveContainer" containerID="fb58cdc342202ee89ceab0ed28511ce12c527ea976d71bfd8c554c477fddafbc" Nov 28 10:33:43 crc kubenswrapper[4838]: I1128 10:33:43.699951 4838 scope.go:117] "RemoveContainer" containerID="4ea37d773abc1d98ce1fe502153d1f95f82f3a818e8efc6f8de9fd0fa0f40919" Nov 28 10:33:43 crc kubenswrapper[4838]: E1128 10:33:43.700478 4838 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4ea37d773abc1d98ce1fe502153d1f95f82f3a818e8efc6f8de9fd0fa0f40919\": container with ID starting with 4ea37d773abc1d98ce1fe502153d1f95f82f3a818e8efc6f8de9fd0fa0f40919 not found: ID does not exist" containerID="4ea37d773abc1d98ce1fe502153d1f95f82f3a818e8efc6f8de9fd0fa0f40919" Nov 28 10:33:43 crc kubenswrapper[4838]: I1128 10:33:43.700581 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4ea37d773abc1d98ce1fe502153d1f95f82f3a818e8efc6f8de9fd0fa0f40919"} err="failed to get container status \"4ea37d773abc1d98ce1fe502153d1f95f82f3a818e8efc6f8de9fd0fa0f40919\": rpc error: code = NotFound desc = could not find container \"4ea37d773abc1d98ce1fe502153d1f95f82f3a818e8efc6f8de9fd0fa0f40919\": container with ID starting with 4ea37d773abc1d98ce1fe502153d1f95f82f3a818e8efc6f8de9fd0fa0f40919 not found: ID does not exist" Nov 28 10:33:43 crc kubenswrapper[4838]: I1128 10:33:43.700678 4838 scope.go:117] "RemoveContainer" containerID="5ebb833816784d1f0206c683d97c52d53b3801866874c83a2828d99faae4a6ec" Nov 28 10:33:43 crc kubenswrapper[4838]: E1128 10:33:43.701261 4838 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5ebb833816784d1f0206c683d97c52d53b3801866874c83a2828d99faae4a6ec\": container with ID starting with 5ebb833816784d1f0206c683d97c52d53b3801866874c83a2828d99faae4a6ec not found: ID does not exist" containerID="5ebb833816784d1f0206c683d97c52d53b3801866874c83a2828d99faae4a6ec" Nov 28 10:33:43 crc kubenswrapper[4838]: I1128 10:33:43.701315 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5ebb833816784d1f0206c683d97c52d53b3801866874c83a2828d99faae4a6ec"} err="failed to get container status \"5ebb833816784d1f0206c683d97c52d53b3801866874c83a2828d99faae4a6ec\": rpc error: code = NotFound desc = could not find container \"5ebb833816784d1f0206c683d97c52d53b3801866874c83a2828d99faae4a6ec\": container with ID starting with 5ebb833816784d1f0206c683d97c52d53b3801866874c83a2828d99faae4a6ec not found: ID does not exist" Nov 28 10:33:43 crc kubenswrapper[4838]: I1128 10:33:43.701346 4838 scope.go:117] "RemoveContainer" containerID="fb58cdc342202ee89ceab0ed28511ce12c527ea976d71bfd8c554c477fddafbc" Nov 28 10:33:43 crc kubenswrapper[4838]: E1128 10:33:43.701626 4838 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fb58cdc342202ee89ceab0ed28511ce12c527ea976d71bfd8c554c477fddafbc\": container with ID starting with fb58cdc342202ee89ceab0ed28511ce12c527ea976d71bfd8c554c477fddafbc not found: ID does not exist" containerID="fb58cdc342202ee89ceab0ed28511ce12c527ea976d71bfd8c554c477fddafbc" Nov 28 10:33:43 crc kubenswrapper[4838]: I1128 10:33:43.701656 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fb58cdc342202ee89ceab0ed28511ce12c527ea976d71bfd8c554c477fddafbc"} err="failed to get container status \"fb58cdc342202ee89ceab0ed28511ce12c527ea976d71bfd8c554c477fddafbc\": rpc error: code = NotFound desc = could not find container \"fb58cdc342202ee89ceab0ed28511ce12c527ea976d71bfd8c554c477fddafbc\": container with ID starting with fb58cdc342202ee89ceab0ed28511ce12c527ea976d71bfd8c554c477fddafbc not found: ID does not exist" Nov 28 10:33:44 crc kubenswrapper[4838]: I1128 10:33:44.510703 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-sjch6"] Nov 28 10:33:44 crc kubenswrapper[4838]: E1128 10:33:44.511115 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7fd2419c-4e14-4687-b9a7-43626faa3e55" containerName="extract-content" Nov 28 10:33:44 crc kubenswrapper[4838]: I1128 10:33:44.511130 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="7fd2419c-4e14-4687-b9a7-43626faa3e55" containerName="extract-content" Nov 28 10:33:44 crc kubenswrapper[4838]: E1128 10:33:44.511147 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7fd2419c-4e14-4687-b9a7-43626faa3e55" containerName="registry-server" Nov 28 10:33:44 crc kubenswrapper[4838]: I1128 10:33:44.511154 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="7fd2419c-4e14-4687-b9a7-43626faa3e55" containerName="registry-server" Nov 28 10:33:44 crc kubenswrapper[4838]: E1128 10:33:44.511172 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7fd2419c-4e14-4687-b9a7-43626faa3e55" containerName="extract-utilities" Nov 28 10:33:44 crc kubenswrapper[4838]: I1128 10:33:44.511180 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="7fd2419c-4e14-4687-b9a7-43626faa3e55" containerName="extract-utilities" Nov 28 10:33:44 crc kubenswrapper[4838]: I1128 10:33:44.511339 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="7fd2419c-4e14-4687-b9a7-43626faa3e55" containerName="registry-server" Nov 28 10:33:44 crc kubenswrapper[4838]: I1128 10:33:44.512029 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-sjch6" Nov 28 10:33:44 crc kubenswrapper[4838]: I1128 10:33:44.514520 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 28 10:33:44 crc kubenswrapper[4838]: I1128 10:33:44.514708 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceph-conf-files" Nov 28 10:33:44 crc kubenswrapper[4838]: I1128 10:33:44.514917 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-t6dwn" Nov 28 10:33:44 crc kubenswrapper[4838]: I1128 10:33:44.515133 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 28 10:33:44 crc kubenswrapper[4838]: I1128 10:33:44.515274 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 28 10:33:44 crc kubenswrapper[4838]: I1128 10:33:44.521549 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-sjch6"] Nov 28 10:33:44 crc kubenswrapper[4838]: I1128 10:33:44.574512 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7fd2419c-4e14-4687-b9a7-43626faa3e55" path="/var/lib/kubelet/pods/7fd2419c-4e14-4687-b9a7-43626faa3e55/volumes" Nov 28 10:33:44 crc kubenswrapper[4838]: I1128 10:33:44.588193 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/277c54af-c091-426d-bf0d-523eca9b41fb-ssh-key\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-sjch6\" (UID: \"277c54af-c091-426d-bf0d-523eca9b41fb\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-sjch6" Nov 28 10:33:44 crc kubenswrapper[4838]: I1128 10:33:44.588268 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/277c54af-c091-426d-bf0d-523eca9b41fb-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-sjch6\" (UID: \"277c54af-c091-426d-bf0d-523eca9b41fb\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-sjch6" Nov 28 10:33:44 crc kubenswrapper[4838]: I1128 10:33:44.588300 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hb4z9\" (UniqueName: \"kubernetes.io/projected/277c54af-c091-426d-bf0d-523eca9b41fb-kube-api-access-hb4z9\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-sjch6\" (UID: \"277c54af-c091-426d-bf0d-523eca9b41fb\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-sjch6" Nov 28 10:33:44 crc kubenswrapper[4838]: I1128 10:33:44.588457 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/277c54af-c091-426d-bf0d-523eca9b41fb-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-sjch6\" (UID: \"277c54af-c091-426d-bf0d-523eca9b41fb\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-sjch6" Nov 28 10:33:44 crc kubenswrapper[4838]: I1128 10:33:44.588492 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/277c54af-c091-426d-bf0d-523eca9b41fb-ceph\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-sjch6\" (UID: \"277c54af-c091-426d-bf0d-523eca9b41fb\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-sjch6" Nov 28 10:33:44 crc kubenswrapper[4838]: I1128 10:33:44.690921 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/277c54af-c091-426d-bf0d-523eca9b41fb-ssh-key\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-sjch6\" (UID: \"277c54af-c091-426d-bf0d-523eca9b41fb\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-sjch6" Nov 28 10:33:44 crc kubenswrapper[4838]: I1128 10:33:44.691047 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/277c54af-c091-426d-bf0d-523eca9b41fb-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-sjch6\" (UID: \"277c54af-c091-426d-bf0d-523eca9b41fb\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-sjch6" Nov 28 10:33:44 crc kubenswrapper[4838]: I1128 10:33:44.691097 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hb4z9\" (UniqueName: \"kubernetes.io/projected/277c54af-c091-426d-bf0d-523eca9b41fb-kube-api-access-hb4z9\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-sjch6\" (UID: \"277c54af-c091-426d-bf0d-523eca9b41fb\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-sjch6" Nov 28 10:33:44 crc kubenswrapper[4838]: I1128 10:33:44.691230 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/277c54af-c091-426d-bf0d-523eca9b41fb-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-sjch6\" (UID: \"277c54af-c091-426d-bf0d-523eca9b41fb\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-sjch6" Nov 28 10:33:44 crc kubenswrapper[4838]: I1128 10:33:44.691286 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/277c54af-c091-426d-bf0d-523eca9b41fb-ceph\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-sjch6\" (UID: \"277c54af-c091-426d-bf0d-523eca9b41fb\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-sjch6" Nov 28 10:33:44 crc kubenswrapper[4838]: I1128 10:33:44.697599 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/277c54af-c091-426d-bf0d-523eca9b41fb-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-sjch6\" (UID: \"277c54af-c091-426d-bf0d-523eca9b41fb\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-sjch6" Nov 28 10:33:44 crc kubenswrapper[4838]: I1128 10:33:44.698474 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/277c54af-c091-426d-bf0d-523eca9b41fb-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-sjch6\" (UID: \"277c54af-c091-426d-bf0d-523eca9b41fb\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-sjch6" Nov 28 10:33:44 crc kubenswrapper[4838]: I1128 10:33:44.700095 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/277c54af-c091-426d-bf0d-523eca9b41fb-ceph\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-sjch6\" (UID: \"277c54af-c091-426d-bf0d-523eca9b41fb\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-sjch6" Nov 28 10:33:44 crc kubenswrapper[4838]: I1128 10:33:44.704502 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/277c54af-c091-426d-bf0d-523eca9b41fb-ssh-key\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-sjch6\" (UID: \"277c54af-c091-426d-bf0d-523eca9b41fb\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-sjch6" Nov 28 10:33:44 crc kubenswrapper[4838]: I1128 10:33:44.707059 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hb4z9\" (UniqueName: \"kubernetes.io/projected/277c54af-c091-426d-bf0d-523eca9b41fb-kube-api-access-hb4z9\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-sjch6\" (UID: \"277c54af-c091-426d-bf0d-523eca9b41fb\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-sjch6" Nov 28 10:33:44 crc kubenswrapper[4838]: I1128 10:33:44.865780 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-sjch6" Nov 28 10:33:45 crc kubenswrapper[4838]: I1128 10:33:45.449270 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-sjch6"] Nov 28 10:33:45 crc kubenswrapper[4838]: I1128 10:33:45.599559 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-sjch6" event={"ID":"277c54af-c091-426d-bf0d-523eca9b41fb","Type":"ContainerStarted","Data":"7f6e8cdcec99f7721ceaedec8582cc7eb307e69d06bcbfaf69f247ec433f845d"} Nov 28 10:33:46 crc kubenswrapper[4838]: I1128 10:33:46.609534 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-sjch6" event={"ID":"277c54af-c091-426d-bf0d-523eca9b41fb","Type":"ContainerStarted","Data":"9c1ca2b6a332acc94af8a9eb89cde7386058d9e7795dd898885faa411d1ef710"} Nov 28 10:33:46 crc kubenswrapper[4838]: I1128 10:33:46.631707 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-sjch6" podStartSLOduration=2.000489111 podStartE2EDuration="2.631682779s" podCreationTimestamp="2025-11-28 10:33:44 +0000 UTC" firstStartedPulling="2025-11-28 10:33:45.462714473 +0000 UTC m=+2197.161688683" lastFinishedPulling="2025-11-28 10:33:46.093908171 +0000 UTC m=+2197.792882351" observedRunningTime="2025-11-28 10:33:46.62621437 +0000 UTC m=+2198.325188550" watchObservedRunningTime="2025-11-28 10:33:46.631682779 +0000 UTC m=+2198.330656959" Nov 28 10:33:53 crc kubenswrapper[4838]: I1128 10:33:53.940379 4838 patch_prober.go:28] interesting pod/machine-config-daemon-5dxdd container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 10:33:53 crc kubenswrapper[4838]: I1128 10:33:53.941058 4838 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 10:33:58 crc kubenswrapper[4838]: I1128 10:33:58.765784 4838 generic.go:334] "Generic (PLEG): container finished" podID="277c54af-c091-426d-bf0d-523eca9b41fb" containerID="9c1ca2b6a332acc94af8a9eb89cde7386058d9e7795dd898885faa411d1ef710" exitCode=0 Nov 28 10:33:58 crc kubenswrapper[4838]: I1128 10:33:58.765932 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-sjch6" event={"ID":"277c54af-c091-426d-bf0d-523eca9b41fb","Type":"ContainerDied","Data":"9c1ca2b6a332acc94af8a9eb89cde7386058d9e7795dd898885faa411d1ef710"} Nov 28 10:34:00 crc kubenswrapper[4838]: I1128 10:34:00.195841 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-sjch6" Nov 28 10:34:00 crc kubenswrapper[4838]: I1128 10:34:00.312980 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/277c54af-c091-426d-bf0d-523eca9b41fb-ceph\") pod \"277c54af-c091-426d-bf0d-523eca9b41fb\" (UID: \"277c54af-c091-426d-bf0d-523eca9b41fb\") " Nov 28 10:34:00 crc kubenswrapper[4838]: I1128 10:34:00.313101 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/277c54af-c091-426d-bf0d-523eca9b41fb-inventory\") pod \"277c54af-c091-426d-bf0d-523eca9b41fb\" (UID: \"277c54af-c091-426d-bf0d-523eca9b41fb\") " Nov 28 10:34:00 crc kubenswrapper[4838]: I1128 10:34:00.313220 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/277c54af-c091-426d-bf0d-523eca9b41fb-repo-setup-combined-ca-bundle\") pod \"277c54af-c091-426d-bf0d-523eca9b41fb\" (UID: \"277c54af-c091-426d-bf0d-523eca9b41fb\") " Nov 28 10:34:00 crc kubenswrapper[4838]: I1128 10:34:00.313264 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hb4z9\" (UniqueName: \"kubernetes.io/projected/277c54af-c091-426d-bf0d-523eca9b41fb-kube-api-access-hb4z9\") pod \"277c54af-c091-426d-bf0d-523eca9b41fb\" (UID: \"277c54af-c091-426d-bf0d-523eca9b41fb\") " Nov 28 10:34:00 crc kubenswrapper[4838]: I1128 10:34:00.313284 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/277c54af-c091-426d-bf0d-523eca9b41fb-ssh-key\") pod \"277c54af-c091-426d-bf0d-523eca9b41fb\" (UID: \"277c54af-c091-426d-bf0d-523eca9b41fb\") " Nov 28 10:34:00 crc kubenswrapper[4838]: I1128 10:34:00.320769 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/277c54af-c091-426d-bf0d-523eca9b41fb-repo-setup-combined-ca-bundle" (OuterVolumeSpecName: "repo-setup-combined-ca-bundle") pod "277c54af-c091-426d-bf0d-523eca9b41fb" (UID: "277c54af-c091-426d-bf0d-523eca9b41fb"). InnerVolumeSpecName "repo-setup-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:34:00 crc kubenswrapper[4838]: I1128 10:34:00.320832 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/277c54af-c091-426d-bf0d-523eca9b41fb-kube-api-access-hb4z9" (OuterVolumeSpecName: "kube-api-access-hb4z9") pod "277c54af-c091-426d-bf0d-523eca9b41fb" (UID: "277c54af-c091-426d-bf0d-523eca9b41fb"). InnerVolumeSpecName "kube-api-access-hb4z9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:34:00 crc kubenswrapper[4838]: I1128 10:34:00.322138 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/277c54af-c091-426d-bf0d-523eca9b41fb-ceph" (OuterVolumeSpecName: "ceph") pod "277c54af-c091-426d-bf0d-523eca9b41fb" (UID: "277c54af-c091-426d-bf0d-523eca9b41fb"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:34:00 crc kubenswrapper[4838]: I1128 10:34:00.348500 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/277c54af-c091-426d-bf0d-523eca9b41fb-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "277c54af-c091-426d-bf0d-523eca9b41fb" (UID: "277c54af-c091-426d-bf0d-523eca9b41fb"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:34:00 crc kubenswrapper[4838]: I1128 10:34:00.357956 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/277c54af-c091-426d-bf0d-523eca9b41fb-inventory" (OuterVolumeSpecName: "inventory") pod "277c54af-c091-426d-bf0d-523eca9b41fb" (UID: "277c54af-c091-426d-bf0d-523eca9b41fb"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:34:00 crc kubenswrapper[4838]: I1128 10:34:00.414998 4838 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/277c54af-c091-426d-bf0d-523eca9b41fb-inventory\") on node \"crc\" DevicePath \"\"" Nov 28 10:34:00 crc kubenswrapper[4838]: I1128 10:34:00.415041 4838 reconciler_common.go:293] "Volume detached for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/277c54af-c091-426d-bf0d-523eca9b41fb-repo-setup-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 10:34:00 crc kubenswrapper[4838]: I1128 10:34:00.415063 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hb4z9\" (UniqueName: \"kubernetes.io/projected/277c54af-c091-426d-bf0d-523eca9b41fb-kube-api-access-hb4z9\") on node \"crc\" DevicePath \"\"" Nov 28 10:34:00 crc kubenswrapper[4838]: I1128 10:34:00.415082 4838 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/277c54af-c091-426d-bf0d-523eca9b41fb-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 28 10:34:00 crc kubenswrapper[4838]: I1128 10:34:00.415099 4838 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/277c54af-c091-426d-bf0d-523eca9b41fb-ceph\") on node \"crc\" DevicePath \"\"" Nov 28 10:34:00 crc kubenswrapper[4838]: I1128 10:34:00.794534 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-sjch6" event={"ID":"277c54af-c091-426d-bf0d-523eca9b41fb","Type":"ContainerDied","Data":"7f6e8cdcec99f7721ceaedec8582cc7eb307e69d06bcbfaf69f247ec433f845d"} Nov 28 10:34:00 crc kubenswrapper[4838]: I1128 10:34:00.794597 4838 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7f6e8cdcec99f7721ceaedec8582cc7eb307e69d06bcbfaf69f247ec433f845d" Nov 28 10:34:00 crc kubenswrapper[4838]: I1128 10:34:00.794683 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-sjch6" Nov 28 10:34:00 crc kubenswrapper[4838]: I1128 10:34:00.906667 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-lbwtr"] Nov 28 10:34:00 crc kubenswrapper[4838]: E1128 10:34:00.907367 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="277c54af-c091-426d-bf0d-523eca9b41fb" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Nov 28 10:34:00 crc kubenswrapper[4838]: I1128 10:34:00.907383 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="277c54af-c091-426d-bf0d-523eca9b41fb" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Nov 28 10:34:00 crc kubenswrapper[4838]: I1128 10:34:00.907598 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="277c54af-c091-426d-bf0d-523eca9b41fb" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Nov 28 10:34:00 crc kubenswrapper[4838]: I1128 10:34:00.909481 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-lbwtr" Nov 28 10:34:00 crc kubenswrapper[4838]: I1128 10:34:00.912133 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 28 10:34:00 crc kubenswrapper[4838]: I1128 10:34:00.912647 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 28 10:34:00 crc kubenswrapper[4838]: I1128 10:34:00.912768 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-t6dwn" Nov 28 10:34:00 crc kubenswrapper[4838]: I1128 10:34:00.914161 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceph-conf-files" Nov 28 10:34:00 crc kubenswrapper[4838]: I1128 10:34:00.922891 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-lbwtr"] Nov 28 10:34:00 crc kubenswrapper[4838]: I1128 10:34:00.948930 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z445n\" (UniqueName: \"kubernetes.io/projected/226a6e3a-8fcf-4284-b8a5-3f4055ae9838-kube-api-access-z445n\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-lbwtr\" (UID: \"226a6e3a-8fcf-4284-b8a5-3f4055ae9838\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-lbwtr" Nov 28 10:34:00 crc kubenswrapper[4838]: I1128 10:34:00.949011 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/226a6e3a-8fcf-4284-b8a5-3f4055ae9838-ceph\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-lbwtr\" (UID: \"226a6e3a-8fcf-4284-b8a5-3f4055ae9838\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-lbwtr" Nov 28 10:34:00 crc kubenswrapper[4838]: I1128 10:34:00.949112 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/226a6e3a-8fcf-4284-b8a5-3f4055ae9838-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-lbwtr\" (UID: \"226a6e3a-8fcf-4284-b8a5-3f4055ae9838\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-lbwtr" Nov 28 10:34:00 crc kubenswrapper[4838]: I1128 10:34:00.949150 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/226a6e3a-8fcf-4284-b8a5-3f4055ae9838-ssh-key\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-lbwtr\" (UID: \"226a6e3a-8fcf-4284-b8a5-3f4055ae9838\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-lbwtr" Nov 28 10:34:00 crc kubenswrapper[4838]: I1128 10:34:00.949192 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/226a6e3a-8fcf-4284-b8a5-3f4055ae9838-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-lbwtr\" (UID: \"226a6e3a-8fcf-4284-b8a5-3f4055ae9838\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-lbwtr" Nov 28 10:34:00 crc kubenswrapper[4838]: I1128 10:34:00.954834 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 28 10:34:01 crc kubenswrapper[4838]: I1128 10:34:01.049922 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/226a6e3a-8fcf-4284-b8a5-3f4055ae9838-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-lbwtr\" (UID: \"226a6e3a-8fcf-4284-b8a5-3f4055ae9838\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-lbwtr" Nov 28 10:34:01 crc kubenswrapper[4838]: I1128 10:34:01.049983 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/226a6e3a-8fcf-4284-b8a5-3f4055ae9838-ssh-key\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-lbwtr\" (UID: \"226a6e3a-8fcf-4284-b8a5-3f4055ae9838\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-lbwtr" Nov 28 10:34:01 crc kubenswrapper[4838]: I1128 10:34:01.050030 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/226a6e3a-8fcf-4284-b8a5-3f4055ae9838-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-lbwtr\" (UID: \"226a6e3a-8fcf-4284-b8a5-3f4055ae9838\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-lbwtr" Nov 28 10:34:01 crc kubenswrapper[4838]: I1128 10:34:01.050065 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z445n\" (UniqueName: \"kubernetes.io/projected/226a6e3a-8fcf-4284-b8a5-3f4055ae9838-kube-api-access-z445n\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-lbwtr\" (UID: \"226a6e3a-8fcf-4284-b8a5-3f4055ae9838\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-lbwtr" Nov 28 10:34:01 crc kubenswrapper[4838]: I1128 10:34:01.050131 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/226a6e3a-8fcf-4284-b8a5-3f4055ae9838-ceph\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-lbwtr\" (UID: \"226a6e3a-8fcf-4284-b8a5-3f4055ae9838\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-lbwtr" Nov 28 10:34:01 crc kubenswrapper[4838]: I1128 10:34:01.058578 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/226a6e3a-8fcf-4284-b8a5-3f4055ae9838-ceph\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-lbwtr\" (UID: \"226a6e3a-8fcf-4284-b8a5-3f4055ae9838\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-lbwtr" Nov 28 10:34:01 crc kubenswrapper[4838]: I1128 10:34:01.058740 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/226a6e3a-8fcf-4284-b8a5-3f4055ae9838-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-lbwtr\" (UID: \"226a6e3a-8fcf-4284-b8a5-3f4055ae9838\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-lbwtr" Nov 28 10:34:01 crc kubenswrapper[4838]: I1128 10:34:01.059319 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/226a6e3a-8fcf-4284-b8a5-3f4055ae9838-ssh-key\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-lbwtr\" (UID: \"226a6e3a-8fcf-4284-b8a5-3f4055ae9838\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-lbwtr" Nov 28 10:34:01 crc kubenswrapper[4838]: I1128 10:34:01.065079 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/226a6e3a-8fcf-4284-b8a5-3f4055ae9838-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-lbwtr\" (UID: \"226a6e3a-8fcf-4284-b8a5-3f4055ae9838\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-lbwtr" Nov 28 10:34:01 crc kubenswrapper[4838]: I1128 10:34:01.068470 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z445n\" (UniqueName: \"kubernetes.io/projected/226a6e3a-8fcf-4284-b8a5-3f4055ae9838-kube-api-access-z445n\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-lbwtr\" (UID: \"226a6e3a-8fcf-4284-b8a5-3f4055ae9838\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-lbwtr" Nov 28 10:34:01 crc kubenswrapper[4838]: I1128 10:34:01.265016 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-lbwtr" Nov 28 10:34:01 crc kubenswrapper[4838]: I1128 10:34:01.688309 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-lbwtr"] Nov 28 10:34:01 crc kubenswrapper[4838]: W1128 10:34:01.692354 4838 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod226a6e3a_8fcf_4284_b8a5_3f4055ae9838.slice/crio-553e27d3d3630a1ac6088f2e79b97d6cb83c54dbc47ba11f1b46460cd01a478a WatchSource:0}: Error finding container 553e27d3d3630a1ac6088f2e79b97d6cb83c54dbc47ba11f1b46460cd01a478a: Status 404 returned error can't find the container with id 553e27d3d3630a1ac6088f2e79b97d6cb83c54dbc47ba11f1b46460cd01a478a Nov 28 10:34:01 crc kubenswrapper[4838]: I1128 10:34:01.805161 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-lbwtr" event={"ID":"226a6e3a-8fcf-4284-b8a5-3f4055ae9838","Type":"ContainerStarted","Data":"553e27d3d3630a1ac6088f2e79b97d6cb83c54dbc47ba11f1b46460cd01a478a"} Nov 28 10:34:02 crc kubenswrapper[4838]: I1128 10:34:02.816804 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-lbwtr" event={"ID":"226a6e3a-8fcf-4284-b8a5-3f4055ae9838","Type":"ContainerStarted","Data":"d66acadd1f348ae6c41ed8c89abfb325dbe04f0a43e36c0dc6ec7e5a5583afd4"} Nov 28 10:34:02 crc kubenswrapper[4838]: I1128 10:34:02.839065 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-lbwtr" podStartSLOduration=2.397135392 podStartE2EDuration="2.839049851s" podCreationTimestamp="2025-11-28 10:34:00 +0000 UTC" firstStartedPulling="2025-11-28 10:34:01.694609699 +0000 UTC m=+2213.393583869" lastFinishedPulling="2025-11-28 10:34:02.136524118 +0000 UTC m=+2213.835498328" observedRunningTime="2025-11-28 10:34:02.834384026 +0000 UTC m=+2214.533358206" watchObservedRunningTime="2025-11-28 10:34:02.839049851 +0000 UTC m=+2214.538024021" Nov 28 10:34:17 crc kubenswrapper[4838]: I1128 10:34:17.443897 4838 scope.go:117] "RemoveContainer" containerID="a0dcb66973648868beeca41e5062a4dd24f77347bb64641cbf7e023c1ce65737" Nov 28 10:34:17 crc kubenswrapper[4838]: I1128 10:34:17.501649 4838 scope.go:117] "RemoveContainer" containerID="950708e299668559966871d4c19b3b71c412706cae58d0e7f8b312cff60bb2b8" Nov 28 10:34:17 crc kubenswrapper[4838]: I1128 10:34:17.593618 4838 scope.go:117] "RemoveContainer" containerID="5b2baf7ce903c5ccbd42e05cc2c897c16f19a24844e1be9946e346db898df003" Nov 28 10:34:17 crc kubenswrapper[4838]: I1128 10:34:17.661529 4838 scope.go:117] "RemoveContainer" containerID="d17fdebf7ddf7bf9d073a969b6fa6c85acd4d54a509d6cb4c67f1dad2fec6a0a" Nov 28 10:34:17 crc kubenswrapper[4838]: I1128 10:34:17.732338 4838 scope.go:117] "RemoveContainer" containerID="d323e0c824079bddcc7b03bd7d94d47d1c10284ddf75ea050f3d02d6544b7146" Nov 28 10:34:17 crc kubenswrapper[4838]: I1128 10:34:17.763939 4838 scope.go:117] "RemoveContainer" containerID="edc61b47789223a89855bbc90c1306cf10270325b7fe22400441260338ae823a" Nov 28 10:34:17 crc kubenswrapper[4838]: I1128 10:34:17.830138 4838 scope.go:117] "RemoveContainer" containerID="38de45b3d49d118d7707573f1ce8256730514f7c25ff22bd51c5b87eb6e1f73e" Nov 28 10:34:17 crc kubenswrapper[4838]: I1128 10:34:17.869639 4838 scope.go:117] "RemoveContainer" containerID="2c902d090bc4e211d8794bdd4817846666a7efbafc414323168640c86902b25a" Nov 28 10:34:23 crc kubenswrapper[4838]: I1128 10:34:23.939935 4838 patch_prober.go:28] interesting pod/machine-config-daemon-5dxdd container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 10:34:23 crc kubenswrapper[4838]: I1128 10:34:23.940611 4838 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 10:34:23 crc kubenswrapper[4838]: I1128 10:34:23.940675 4838 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" Nov 28 10:34:23 crc kubenswrapper[4838]: I1128 10:34:23.945741 4838 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"5afd39ddd492746c0199d772a63313b45e117b315a570579a80048ab6e189d3f"} pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 10:34:23 crc kubenswrapper[4838]: I1128 10:34:23.945952 4838 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" containerName="machine-config-daemon" containerID="cri-o://5afd39ddd492746c0199d772a63313b45e117b315a570579a80048ab6e189d3f" gracePeriod=600 Nov 28 10:34:25 crc kubenswrapper[4838]: I1128 10:34:25.091712 4838 generic.go:334] "Generic (PLEG): container finished" podID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" containerID="5afd39ddd492746c0199d772a63313b45e117b315a570579a80048ab6e189d3f" exitCode=0 Nov 28 10:34:25 crc kubenswrapper[4838]: I1128 10:34:25.091860 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" event={"ID":"5c3daa53-8c4e-4e30-aeba-146602dd45cd","Type":"ContainerDied","Data":"5afd39ddd492746c0199d772a63313b45e117b315a570579a80048ab6e189d3f"} Nov 28 10:34:25 crc kubenswrapper[4838]: I1128 10:34:25.092580 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" event={"ID":"5c3daa53-8c4e-4e30-aeba-146602dd45cd","Type":"ContainerStarted","Data":"eb7c3a5fb0c9835207f14cd442a9c164c2c7763a8bc95f818d99864ca09179f7"} Nov 28 10:34:25 crc kubenswrapper[4838]: I1128 10:34:25.092620 4838 scope.go:117] "RemoveContainer" containerID="94f5e3e0d388a990bfe9cc3186f4655bc8ffbe5eaaa4558781704f164737d7ab" Nov 28 10:34:51 crc kubenswrapper[4838]: I1128 10:34:51.257268 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-wkvfh"] Nov 28 10:34:51 crc kubenswrapper[4838]: I1128 10:34:51.259741 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-wkvfh" Nov 28 10:34:51 crc kubenswrapper[4838]: I1128 10:34:51.283873 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-wkvfh"] Nov 28 10:34:51 crc kubenswrapper[4838]: I1128 10:34:51.410455 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8qcqg\" (UniqueName: \"kubernetes.io/projected/3adc8830-5957-482e-8f05-b921505ee8db-kube-api-access-8qcqg\") pod \"community-operators-wkvfh\" (UID: \"3adc8830-5957-482e-8f05-b921505ee8db\") " pod="openshift-marketplace/community-operators-wkvfh" Nov 28 10:34:51 crc kubenswrapper[4838]: I1128 10:34:51.410523 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3adc8830-5957-482e-8f05-b921505ee8db-utilities\") pod \"community-operators-wkvfh\" (UID: \"3adc8830-5957-482e-8f05-b921505ee8db\") " pod="openshift-marketplace/community-operators-wkvfh" Nov 28 10:34:51 crc kubenswrapper[4838]: I1128 10:34:51.410789 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3adc8830-5957-482e-8f05-b921505ee8db-catalog-content\") pod \"community-operators-wkvfh\" (UID: \"3adc8830-5957-482e-8f05-b921505ee8db\") " pod="openshift-marketplace/community-operators-wkvfh" Nov 28 10:34:51 crc kubenswrapper[4838]: I1128 10:34:51.513077 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3adc8830-5957-482e-8f05-b921505ee8db-catalog-content\") pod \"community-operators-wkvfh\" (UID: \"3adc8830-5957-482e-8f05-b921505ee8db\") " pod="openshift-marketplace/community-operators-wkvfh" Nov 28 10:34:51 crc kubenswrapper[4838]: I1128 10:34:51.513170 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8qcqg\" (UniqueName: \"kubernetes.io/projected/3adc8830-5957-482e-8f05-b921505ee8db-kube-api-access-8qcqg\") pod \"community-operators-wkvfh\" (UID: \"3adc8830-5957-482e-8f05-b921505ee8db\") " pod="openshift-marketplace/community-operators-wkvfh" Nov 28 10:34:51 crc kubenswrapper[4838]: I1128 10:34:51.513200 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3adc8830-5957-482e-8f05-b921505ee8db-utilities\") pod \"community-operators-wkvfh\" (UID: \"3adc8830-5957-482e-8f05-b921505ee8db\") " pod="openshift-marketplace/community-operators-wkvfh" Nov 28 10:34:51 crc kubenswrapper[4838]: I1128 10:34:51.513729 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3adc8830-5957-482e-8f05-b921505ee8db-utilities\") pod \"community-operators-wkvfh\" (UID: \"3adc8830-5957-482e-8f05-b921505ee8db\") " pod="openshift-marketplace/community-operators-wkvfh" Nov 28 10:34:51 crc kubenswrapper[4838]: I1128 10:34:51.514125 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3adc8830-5957-482e-8f05-b921505ee8db-catalog-content\") pod \"community-operators-wkvfh\" (UID: \"3adc8830-5957-482e-8f05-b921505ee8db\") " pod="openshift-marketplace/community-operators-wkvfh" Nov 28 10:34:51 crc kubenswrapper[4838]: I1128 10:34:51.533628 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8qcqg\" (UniqueName: \"kubernetes.io/projected/3adc8830-5957-482e-8f05-b921505ee8db-kube-api-access-8qcqg\") pod \"community-operators-wkvfh\" (UID: \"3adc8830-5957-482e-8f05-b921505ee8db\") " pod="openshift-marketplace/community-operators-wkvfh" Nov 28 10:34:51 crc kubenswrapper[4838]: I1128 10:34:51.595972 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-wkvfh" Nov 28 10:34:52 crc kubenswrapper[4838]: I1128 10:34:52.089584 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-wkvfh"] Nov 28 10:34:52 crc kubenswrapper[4838]: W1128 10:34:52.095910 4838 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3adc8830_5957_482e_8f05_b921505ee8db.slice/crio-e7ff6c05343fc2f92ac7b2bed20f172a81fb332c2f345e1938a55eb568eda5d7 WatchSource:0}: Error finding container e7ff6c05343fc2f92ac7b2bed20f172a81fb332c2f345e1938a55eb568eda5d7: Status 404 returned error can't find the container with id e7ff6c05343fc2f92ac7b2bed20f172a81fb332c2f345e1938a55eb568eda5d7 Nov 28 10:34:52 crc kubenswrapper[4838]: I1128 10:34:52.407339 4838 generic.go:334] "Generic (PLEG): container finished" podID="3adc8830-5957-482e-8f05-b921505ee8db" containerID="ef9394d1e1d4b0362c0d6fbefaa5e3ab39d55623d383bbfaded928865456e91c" exitCode=0 Nov 28 10:34:52 crc kubenswrapper[4838]: I1128 10:34:52.407411 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wkvfh" event={"ID":"3adc8830-5957-482e-8f05-b921505ee8db","Type":"ContainerDied","Data":"ef9394d1e1d4b0362c0d6fbefaa5e3ab39d55623d383bbfaded928865456e91c"} Nov 28 10:34:52 crc kubenswrapper[4838]: I1128 10:34:52.407473 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wkvfh" event={"ID":"3adc8830-5957-482e-8f05-b921505ee8db","Type":"ContainerStarted","Data":"e7ff6c05343fc2f92ac7b2bed20f172a81fb332c2f345e1938a55eb568eda5d7"} Nov 28 10:34:53 crc kubenswrapper[4838]: I1128 10:34:53.421119 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wkvfh" event={"ID":"3adc8830-5957-482e-8f05-b921505ee8db","Type":"ContainerStarted","Data":"dcb042ca2e6cfafba53a2319df73bd0f0b7b74751049b001d5dc3c04e588b5a8"} Nov 28 10:34:53 crc kubenswrapper[4838]: E1128 10:34:53.710969 4838 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3adc8830_5957_482e_8f05_b921505ee8db.slice/crio-dcb042ca2e6cfafba53a2319df73bd0f0b7b74751049b001d5dc3c04e588b5a8.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3adc8830_5957_482e_8f05_b921505ee8db.slice/crio-conmon-dcb042ca2e6cfafba53a2319df73bd0f0b7b74751049b001d5dc3c04e588b5a8.scope\": RecentStats: unable to find data in memory cache]" Nov 28 10:34:54 crc kubenswrapper[4838]: I1128 10:34:54.432527 4838 generic.go:334] "Generic (PLEG): container finished" podID="3adc8830-5957-482e-8f05-b921505ee8db" containerID="dcb042ca2e6cfafba53a2319df73bd0f0b7b74751049b001d5dc3c04e588b5a8" exitCode=0 Nov 28 10:34:54 crc kubenswrapper[4838]: I1128 10:34:54.432606 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wkvfh" event={"ID":"3adc8830-5957-482e-8f05-b921505ee8db","Type":"ContainerDied","Data":"dcb042ca2e6cfafba53a2319df73bd0f0b7b74751049b001d5dc3c04e588b5a8"} Nov 28 10:34:55 crc kubenswrapper[4838]: I1128 10:34:55.447555 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wkvfh" event={"ID":"3adc8830-5957-482e-8f05-b921505ee8db","Type":"ContainerStarted","Data":"fb54351693018e84580beeeedba572f5d9cc885be62159dfd4a2bc021dd24fe1"} Nov 28 10:34:55 crc kubenswrapper[4838]: I1128 10:34:55.473101 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-wkvfh" podStartSLOduration=1.82972002 podStartE2EDuration="4.473077245s" podCreationTimestamp="2025-11-28 10:34:51 +0000 UTC" firstStartedPulling="2025-11-28 10:34:52.410052834 +0000 UTC m=+2264.109027004" lastFinishedPulling="2025-11-28 10:34:55.053410019 +0000 UTC m=+2266.752384229" observedRunningTime="2025-11-28 10:34:55.469852688 +0000 UTC m=+2267.168826928" watchObservedRunningTime="2025-11-28 10:34:55.473077245 +0000 UTC m=+2267.172051465" Nov 28 10:35:01 crc kubenswrapper[4838]: I1128 10:35:01.596809 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-wkvfh" Nov 28 10:35:01 crc kubenswrapper[4838]: I1128 10:35:01.599784 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-wkvfh" Nov 28 10:35:01 crc kubenswrapper[4838]: I1128 10:35:01.675044 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-wkvfh" Nov 28 10:35:02 crc kubenswrapper[4838]: I1128 10:35:02.607210 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-wkvfh" Nov 28 10:35:02 crc kubenswrapper[4838]: I1128 10:35:02.654635 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-wkvfh"] Nov 28 10:35:04 crc kubenswrapper[4838]: I1128 10:35:04.541041 4838 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-wkvfh" podUID="3adc8830-5957-482e-8f05-b921505ee8db" containerName="registry-server" containerID="cri-o://fb54351693018e84580beeeedba572f5d9cc885be62159dfd4a2bc021dd24fe1" gracePeriod=2 Nov 28 10:35:05 crc kubenswrapper[4838]: I1128 10:35:05.552515 4838 generic.go:334] "Generic (PLEG): container finished" podID="3adc8830-5957-482e-8f05-b921505ee8db" containerID="fb54351693018e84580beeeedba572f5d9cc885be62159dfd4a2bc021dd24fe1" exitCode=0 Nov 28 10:35:05 crc kubenswrapper[4838]: I1128 10:35:05.552637 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wkvfh" event={"ID":"3adc8830-5957-482e-8f05-b921505ee8db","Type":"ContainerDied","Data":"fb54351693018e84580beeeedba572f5d9cc885be62159dfd4a2bc021dd24fe1"} Nov 28 10:35:05 crc kubenswrapper[4838]: I1128 10:35:05.554015 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wkvfh" event={"ID":"3adc8830-5957-482e-8f05-b921505ee8db","Type":"ContainerDied","Data":"e7ff6c05343fc2f92ac7b2bed20f172a81fb332c2f345e1938a55eb568eda5d7"} Nov 28 10:35:05 crc kubenswrapper[4838]: I1128 10:35:05.554100 4838 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e7ff6c05343fc2f92ac7b2bed20f172a81fb332c2f345e1938a55eb568eda5d7" Nov 28 10:35:05 crc kubenswrapper[4838]: I1128 10:35:05.631245 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-wkvfh" Nov 28 10:35:05 crc kubenswrapper[4838]: I1128 10:35:05.702075 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8qcqg\" (UniqueName: \"kubernetes.io/projected/3adc8830-5957-482e-8f05-b921505ee8db-kube-api-access-8qcqg\") pod \"3adc8830-5957-482e-8f05-b921505ee8db\" (UID: \"3adc8830-5957-482e-8f05-b921505ee8db\") " Nov 28 10:35:05 crc kubenswrapper[4838]: I1128 10:35:05.702210 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3adc8830-5957-482e-8f05-b921505ee8db-catalog-content\") pod \"3adc8830-5957-482e-8f05-b921505ee8db\" (UID: \"3adc8830-5957-482e-8f05-b921505ee8db\") " Nov 28 10:35:05 crc kubenswrapper[4838]: I1128 10:35:05.702247 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3adc8830-5957-482e-8f05-b921505ee8db-utilities\") pod \"3adc8830-5957-482e-8f05-b921505ee8db\" (UID: \"3adc8830-5957-482e-8f05-b921505ee8db\") " Nov 28 10:35:05 crc kubenswrapper[4838]: I1128 10:35:05.708663 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3adc8830-5957-482e-8f05-b921505ee8db-utilities" (OuterVolumeSpecName: "utilities") pod "3adc8830-5957-482e-8f05-b921505ee8db" (UID: "3adc8830-5957-482e-8f05-b921505ee8db"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 10:35:05 crc kubenswrapper[4838]: I1128 10:35:05.718964 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3adc8830-5957-482e-8f05-b921505ee8db-kube-api-access-8qcqg" (OuterVolumeSpecName: "kube-api-access-8qcqg") pod "3adc8830-5957-482e-8f05-b921505ee8db" (UID: "3adc8830-5957-482e-8f05-b921505ee8db"). InnerVolumeSpecName "kube-api-access-8qcqg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:35:05 crc kubenswrapper[4838]: I1128 10:35:05.754731 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3adc8830-5957-482e-8f05-b921505ee8db-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "3adc8830-5957-482e-8f05-b921505ee8db" (UID: "3adc8830-5957-482e-8f05-b921505ee8db"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 10:35:05 crc kubenswrapper[4838]: I1128 10:35:05.805081 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8qcqg\" (UniqueName: \"kubernetes.io/projected/3adc8830-5957-482e-8f05-b921505ee8db-kube-api-access-8qcqg\") on node \"crc\" DevicePath \"\"" Nov 28 10:35:05 crc kubenswrapper[4838]: I1128 10:35:05.805332 4838 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3adc8830-5957-482e-8f05-b921505ee8db-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 10:35:05 crc kubenswrapper[4838]: I1128 10:35:05.805342 4838 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3adc8830-5957-482e-8f05-b921505ee8db-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 10:35:06 crc kubenswrapper[4838]: I1128 10:35:06.564766 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-wkvfh" Nov 28 10:35:06 crc kubenswrapper[4838]: I1128 10:35:06.639145 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-wkvfh"] Nov 28 10:35:06 crc kubenswrapper[4838]: I1128 10:35:06.657298 4838 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-wkvfh"] Nov 28 10:35:08 crc kubenswrapper[4838]: I1128 10:35:08.594468 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3adc8830-5957-482e-8f05-b921505ee8db" path="/var/lib/kubelet/pods/3adc8830-5957-482e-8f05-b921505ee8db/volumes" Nov 28 10:35:18 crc kubenswrapper[4838]: I1128 10:35:18.079538 4838 scope.go:117] "RemoveContainer" containerID="0f8a47eb02a89f674bf42fb3ff1d041cb6a372e412565529ac9dd10cb4091e40" Nov 28 10:35:18 crc kubenswrapper[4838]: I1128 10:35:18.144493 4838 scope.go:117] "RemoveContainer" containerID="099284e5e0e738e61107a3db7a5198a5fbf09832d7385fbce4d250d5f911cd50" Nov 28 10:35:43 crc kubenswrapper[4838]: I1128 10:35:43.949989 4838 generic.go:334] "Generic (PLEG): container finished" podID="226a6e3a-8fcf-4284-b8a5-3f4055ae9838" containerID="d66acadd1f348ae6c41ed8c89abfb325dbe04f0a43e36c0dc6ec7e5a5583afd4" exitCode=0 Nov 28 10:35:43 crc kubenswrapper[4838]: I1128 10:35:43.950086 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-lbwtr" event={"ID":"226a6e3a-8fcf-4284-b8a5-3f4055ae9838","Type":"ContainerDied","Data":"d66acadd1f348ae6c41ed8c89abfb325dbe04f0a43e36c0dc6ec7e5a5583afd4"} Nov 28 10:35:45 crc kubenswrapper[4838]: I1128 10:35:45.456299 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-lbwtr" Nov 28 10:35:45 crc kubenswrapper[4838]: I1128 10:35:45.649371 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/226a6e3a-8fcf-4284-b8a5-3f4055ae9838-ssh-key\") pod \"226a6e3a-8fcf-4284-b8a5-3f4055ae9838\" (UID: \"226a6e3a-8fcf-4284-b8a5-3f4055ae9838\") " Nov 28 10:35:45 crc kubenswrapper[4838]: I1128 10:35:45.649451 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/226a6e3a-8fcf-4284-b8a5-3f4055ae9838-bootstrap-combined-ca-bundle\") pod \"226a6e3a-8fcf-4284-b8a5-3f4055ae9838\" (UID: \"226a6e3a-8fcf-4284-b8a5-3f4055ae9838\") " Nov 28 10:35:45 crc kubenswrapper[4838]: I1128 10:35:45.649569 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/226a6e3a-8fcf-4284-b8a5-3f4055ae9838-inventory\") pod \"226a6e3a-8fcf-4284-b8a5-3f4055ae9838\" (UID: \"226a6e3a-8fcf-4284-b8a5-3f4055ae9838\") " Nov 28 10:35:45 crc kubenswrapper[4838]: I1128 10:35:45.650857 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z445n\" (UniqueName: \"kubernetes.io/projected/226a6e3a-8fcf-4284-b8a5-3f4055ae9838-kube-api-access-z445n\") pod \"226a6e3a-8fcf-4284-b8a5-3f4055ae9838\" (UID: \"226a6e3a-8fcf-4284-b8a5-3f4055ae9838\") " Nov 28 10:35:45 crc kubenswrapper[4838]: I1128 10:35:45.650999 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/226a6e3a-8fcf-4284-b8a5-3f4055ae9838-ceph\") pod \"226a6e3a-8fcf-4284-b8a5-3f4055ae9838\" (UID: \"226a6e3a-8fcf-4284-b8a5-3f4055ae9838\") " Nov 28 10:35:45 crc kubenswrapper[4838]: I1128 10:35:45.658816 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/226a6e3a-8fcf-4284-b8a5-3f4055ae9838-kube-api-access-z445n" (OuterVolumeSpecName: "kube-api-access-z445n") pod "226a6e3a-8fcf-4284-b8a5-3f4055ae9838" (UID: "226a6e3a-8fcf-4284-b8a5-3f4055ae9838"). InnerVolumeSpecName "kube-api-access-z445n". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:35:45 crc kubenswrapper[4838]: I1128 10:35:45.658864 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/226a6e3a-8fcf-4284-b8a5-3f4055ae9838-bootstrap-combined-ca-bundle" (OuterVolumeSpecName: "bootstrap-combined-ca-bundle") pod "226a6e3a-8fcf-4284-b8a5-3f4055ae9838" (UID: "226a6e3a-8fcf-4284-b8a5-3f4055ae9838"). InnerVolumeSpecName "bootstrap-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:35:45 crc kubenswrapper[4838]: I1128 10:35:45.662159 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/226a6e3a-8fcf-4284-b8a5-3f4055ae9838-ceph" (OuterVolumeSpecName: "ceph") pod "226a6e3a-8fcf-4284-b8a5-3f4055ae9838" (UID: "226a6e3a-8fcf-4284-b8a5-3f4055ae9838"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:35:45 crc kubenswrapper[4838]: E1128 10:35:45.697148 4838 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/226a6e3a-8fcf-4284-b8a5-3f4055ae9838-inventory podName:226a6e3a-8fcf-4284-b8a5-3f4055ae9838 nodeName:}" failed. No retries permitted until 2025-11-28 10:35:46.197112655 +0000 UTC m=+2317.896086855 (durationBeforeRetry 500ms). Error: error cleaning subPath mounts for volume "inventory" (UniqueName: "kubernetes.io/secret/226a6e3a-8fcf-4284-b8a5-3f4055ae9838-inventory") pod "226a6e3a-8fcf-4284-b8a5-3f4055ae9838" (UID: "226a6e3a-8fcf-4284-b8a5-3f4055ae9838") : error deleting /var/lib/kubelet/pods/226a6e3a-8fcf-4284-b8a5-3f4055ae9838/volume-subpaths: remove /var/lib/kubelet/pods/226a6e3a-8fcf-4284-b8a5-3f4055ae9838/volume-subpaths: no such file or directory Nov 28 10:35:45 crc kubenswrapper[4838]: I1128 10:35:45.700971 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/226a6e3a-8fcf-4284-b8a5-3f4055ae9838-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "226a6e3a-8fcf-4284-b8a5-3f4055ae9838" (UID: "226a6e3a-8fcf-4284-b8a5-3f4055ae9838"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:35:45 crc kubenswrapper[4838]: I1128 10:35:45.753370 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z445n\" (UniqueName: \"kubernetes.io/projected/226a6e3a-8fcf-4284-b8a5-3f4055ae9838-kube-api-access-z445n\") on node \"crc\" DevicePath \"\"" Nov 28 10:35:45 crc kubenswrapper[4838]: I1128 10:35:45.753417 4838 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/226a6e3a-8fcf-4284-b8a5-3f4055ae9838-ceph\") on node \"crc\" DevicePath \"\"" Nov 28 10:35:45 crc kubenswrapper[4838]: I1128 10:35:45.753436 4838 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/226a6e3a-8fcf-4284-b8a5-3f4055ae9838-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 28 10:35:45 crc kubenswrapper[4838]: I1128 10:35:45.753453 4838 reconciler_common.go:293] "Volume detached for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/226a6e3a-8fcf-4284-b8a5-3f4055ae9838-bootstrap-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 10:35:45 crc kubenswrapper[4838]: I1128 10:35:45.975232 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-lbwtr" event={"ID":"226a6e3a-8fcf-4284-b8a5-3f4055ae9838","Type":"ContainerDied","Data":"553e27d3d3630a1ac6088f2e79b97d6cb83c54dbc47ba11f1b46460cd01a478a"} Nov 28 10:35:45 crc kubenswrapper[4838]: I1128 10:35:45.975289 4838 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="553e27d3d3630a1ac6088f2e79b97d6cb83c54dbc47ba11f1b46460cd01a478a" Nov 28 10:35:45 crc kubenswrapper[4838]: I1128 10:35:45.975387 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-lbwtr" Nov 28 10:35:46 crc kubenswrapper[4838]: I1128 10:35:46.106252 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-b8l59"] Nov 28 10:35:46 crc kubenswrapper[4838]: E1128 10:35:46.108242 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3adc8830-5957-482e-8f05-b921505ee8db" containerName="registry-server" Nov 28 10:35:46 crc kubenswrapper[4838]: I1128 10:35:46.108361 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="3adc8830-5957-482e-8f05-b921505ee8db" containerName="registry-server" Nov 28 10:35:46 crc kubenswrapper[4838]: E1128 10:35:46.108459 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3adc8830-5957-482e-8f05-b921505ee8db" containerName="extract-utilities" Nov 28 10:35:46 crc kubenswrapper[4838]: I1128 10:35:46.108526 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="3adc8830-5957-482e-8f05-b921505ee8db" containerName="extract-utilities" Nov 28 10:35:46 crc kubenswrapper[4838]: E1128 10:35:46.108604 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="226a6e3a-8fcf-4284-b8a5-3f4055ae9838" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Nov 28 10:35:46 crc kubenswrapper[4838]: I1128 10:35:46.108670 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="226a6e3a-8fcf-4284-b8a5-3f4055ae9838" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Nov 28 10:35:46 crc kubenswrapper[4838]: E1128 10:35:46.108779 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3adc8830-5957-482e-8f05-b921505ee8db" containerName="extract-content" Nov 28 10:35:46 crc kubenswrapper[4838]: I1128 10:35:46.108847 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="3adc8830-5957-482e-8f05-b921505ee8db" containerName="extract-content" Nov 28 10:35:46 crc kubenswrapper[4838]: I1128 10:35:46.109104 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="226a6e3a-8fcf-4284-b8a5-3f4055ae9838" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Nov 28 10:35:46 crc kubenswrapper[4838]: I1128 10:35:46.111271 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="3adc8830-5957-482e-8f05-b921505ee8db" containerName="registry-server" Nov 28 10:35:46 crc kubenswrapper[4838]: I1128 10:35:46.112146 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-b8l59" Nov 28 10:35:46 crc kubenswrapper[4838]: I1128 10:35:46.133984 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-b8l59"] Nov 28 10:35:46 crc kubenswrapper[4838]: I1128 10:35:46.160799 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c2c8e67b-9151-44fa-b8f3-f86621d4fd67-ssh-key\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-b8l59\" (UID: \"c2c8e67b-9151-44fa-b8f3-f86621d4fd67\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-b8l59" Nov 28 10:35:46 crc kubenswrapper[4838]: I1128 10:35:46.160870 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w6xbd\" (UniqueName: \"kubernetes.io/projected/c2c8e67b-9151-44fa-b8f3-f86621d4fd67-kube-api-access-w6xbd\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-b8l59\" (UID: \"c2c8e67b-9151-44fa-b8f3-f86621d4fd67\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-b8l59" Nov 28 10:35:46 crc kubenswrapper[4838]: I1128 10:35:46.160907 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c2c8e67b-9151-44fa-b8f3-f86621d4fd67-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-b8l59\" (UID: \"c2c8e67b-9151-44fa-b8f3-f86621d4fd67\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-b8l59" Nov 28 10:35:46 crc kubenswrapper[4838]: I1128 10:35:46.161138 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/c2c8e67b-9151-44fa-b8f3-f86621d4fd67-ceph\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-b8l59\" (UID: \"c2c8e67b-9151-44fa-b8f3-f86621d4fd67\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-b8l59" Nov 28 10:35:46 crc kubenswrapper[4838]: I1128 10:35:46.262967 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/226a6e3a-8fcf-4284-b8a5-3f4055ae9838-inventory\") pod \"226a6e3a-8fcf-4284-b8a5-3f4055ae9838\" (UID: \"226a6e3a-8fcf-4284-b8a5-3f4055ae9838\") " Nov 28 10:35:46 crc kubenswrapper[4838]: I1128 10:35:46.263316 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c2c8e67b-9151-44fa-b8f3-f86621d4fd67-ssh-key\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-b8l59\" (UID: \"c2c8e67b-9151-44fa-b8f3-f86621d4fd67\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-b8l59" Nov 28 10:35:46 crc kubenswrapper[4838]: I1128 10:35:46.263370 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w6xbd\" (UniqueName: \"kubernetes.io/projected/c2c8e67b-9151-44fa-b8f3-f86621d4fd67-kube-api-access-w6xbd\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-b8l59\" (UID: \"c2c8e67b-9151-44fa-b8f3-f86621d4fd67\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-b8l59" Nov 28 10:35:46 crc kubenswrapper[4838]: I1128 10:35:46.263389 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c2c8e67b-9151-44fa-b8f3-f86621d4fd67-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-b8l59\" (UID: \"c2c8e67b-9151-44fa-b8f3-f86621d4fd67\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-b8l59" Nov 28 10:35:46 crc kubenswrapper[4838]: I1128 10:35:46.263464 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/c2c8e67b-9151-44fa-b8f3-f86621d4fd67-ceph\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-b8l59\" (UID: \"c2c8e67b-9151-44fa-b8f3-f86621d4fd67\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-b8l59" Nov 28 10:35:46 crc kubenswrapper[4838]: I1128 10:35:46.267615 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/226a6e3a-8fcf-4284-b8a5-3f4055ae9838-inventory" (OuterVolumeSpecName: "inventory") pod "226a6e3a-8fcf-4284-b8a5-3f4055ae9838" (UID: "226a6e3a-8fcf-4284-b8a5-3f4055ae9838"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:35:46 crc kubenswrapper[4838]: I1128 10:35:46.269048 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c2c8e67b-9151-44fa-b8f3-f86621d4fd67-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-b8l59\" (UID: \"c2c8e67b-9151-44fa-b8f3-f86621d4fd67\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-b8l59" Nov 28 10:35:46 crc kubenswrapper[4838]: I1128 10:35:46.269075 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/c2c8e67b-9151-44fa-b8f3-f86621d4fd67-ceph\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-b8l59\" (UID: \"c2c8e67b-9151-44fa-b8f3-f86621d4fd67\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-b8l59" Nov 28 10:35:46 crc kubenswrapper[4838]: I1128 10:35:46.269376 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c2c8e67b-9151-44fa-b8f3-f86621d4fd67-ssh-key\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-b8l59\" (UID: \"c2c8e67b-9151-44fa-b8f3-f86621d4fd67\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-b8l59" Nov 28 10:35:46 crc kubenswrapper[4838]: I1128 10:35:46.295605 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w6xbd\" (UniqueName: \"kubernetes.io/projected/c2c8e67b-9151-44fa-b8f3-f86621d4fd67-kube-api-access-w6xbd\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-b8l59\" (UID: \"c2c8e67b-9151-44fa-b8f3-f86621d4fd67\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-b8l59" Nov 28 10:35:46 crc kubenswrapper[4838]: I1128 10:35:46.364555 4838 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/226a6e3a-8fcf-4284-b8a5-3f4055ae9838-inventory\") on node \"crc\" DevicePath \"\"" Nov 28 10:35:46 crc kubenswrapper[4838]: I1128 10:35:46.439823 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-b8l59" Nov 28 10:35:46 crc kubenswrapper[4838]: I1128 10:35:46.829832 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-b8l59"] Nov 28 10:35:46 crc kubenswrapper[4838]: I1128 10:35:46.987796 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-b8l59" event={"ID":"c2c8e67b-9151-44fa-b8f3-f86621d4fd67","Type":"ContainerStarted","Data":"394ed5b63dd35cb373f76e19ae345bc354bcb2b4849d8fb4612b132e5bafc344"} Nov 28 10:35:48 crc kubenswrapper[4838]: I1128 10:35:47.999993 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-b8l59" event={"ID":"c2c8e67b-9151-44fa-b8f3-f86621d4fd67","Type":"ContainerStarted","Data":"950866e5533cfacb966254785c257cbc7329ed624a7f572b46a25ecd81ef3d62"} Nov 28 10:35:48 crc kubenswrapper[4838]: I1128 10:35:48.035536 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-b8l59" podStartSLOduration=1.464460363 podStartE2EDuration="2.035509114s" podCreationTimestamp="2025-11-28 10:35:46 +0000 UTC" firstStartedPulling="2025-11-28 10:35:46.847372936 +0000 UTC m=+2318.546347116" lastFinishedPulling="2025-11-28 10:35:47.418421667 +0000 UTC m=+2319.117395867" observedRunningTime="2025-11-28 10:35:48.021899615 +0000 UTC m=+2319.720873865" watchObservedRunningTime="2025-11-28 10:35:48.035509114 +0000 UTC m=+2319.734483324" Nov 28 10:35:48 crc kubenswrapper[4838]: I1128 10:35:48.300203 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-c877p"] Nov 28 10:35:48 crc kubenswrapper[4838]: I1128 10:35:48.307238 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-c877p" Nov 28 10:35:48 crc kubenswrapper[4838]: I1128 10:35:48.343256 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-c877p"] Nov 28 10:35:48 crc kubenswrapper[4838]: I1128 10:35:48.510510 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6ba67be2-320d-41f5-96bf-45f759352892-catalog-content\") pod \"certified-operators-c877p\" (UID: \"6ba67be2-320d-41f5-96bf-45f759352892\") " pod="openshift-marketplace/certified-operators-c877p" Nov 28 10:35:48 crc kubenswrapper[4838]: I1128 10:35:48.511016 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6ba67be2-320d-41f5-96bf-45f759352892-utilities\") pod \"certified-operators-c877p\" (UID: \"6ba67be2-320d-41f5-96bf-45f759352892\") " pod="openshift-marketplace/certified-operators-c877p" Nov 28 10:35:48 crc kubenswrapper[4838]: I1128 10:35:48.511092 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qxnw2\" (UniqueName: \"kubernetes.io/projected/6ba67be2-320d-41f5-96bf-45f759352892-kube-api-access-qxnw2\") pod \"certified-operators-c877p\" (UID: \"6ba67be2-320d-41f5-96bf-45f759352892\") " pod="openshift-marketplace/certified-operators-c877p" Nov 28 10:35:48 crc kubenswrapper[4838]: I1128 10:35:48.634601 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6ba67be2-320d-41f5-96bf-45f759352892-utilities\") pod \"certified-operators-c877p\" (UID: \"6ba67be2-320d-41f5-96bf-45f759352892\") " pod="openshift-marketplace/certified-operators-c877p" Nov 28 10:35:48 crc kubenswrapper[4838]: I1128 10:35:48.634660 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qxnw2\" (UniqueName: \"kubernetes.io/projected/6ba67be2-320d-41f5-96bf-45f759352892-kube-api-access-qxnw2\") pod \"certified-operators-c877p\" (UID: \"6ba67be2-320d-41f5-96bf-45f759352892\") " pod="openshift-marketplace/certified-operators-c877p" Nov 28 10:35:48 crc kubenswrapper[4838]: I1128 10:35:48.634809 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6ba67be2-320d-41f5-96bf-45f759352892-catalog-content\") pod \"certified-operators-c877p\" (UID: \"6ba67be2-320d-41f5-96bf-45f759352892\") " pod="openshift-marketplace/certified-operators-c877p" Nov 28 10:35:48 crc kubenswrapper[4838]: I1128 10:35:48.636682 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6ba67be2-320d-41f5-96bf-45f759352892-utilities\") pod \"certified-operators-c877p\" (UID: \"6ba67be2-320d-41f5-96bf-45f759352892\") " pod="openshift-marketplace/certified-operators-c877p" Nov 28 10:35:48 crc kubenswrapper[4838]: I1128 10:35:48.637286 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6ba67be2-320d-41f5-96bf-45f759352892-catalog-content\") pod \"certified-operators-c877p\" (UID: \"6ba67be2-320d-41f5-96bf-45f759352892\") " pod="openshift-marketplace/certified-operators-c877p" Nov 28 10:35:48 crc kubenswrapper[4838]: I1128 10:35:48.663533 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qxnw2\" (UniqueName: \"kubernetes.io/projected/6ba67be2-320d-41f5-96bf-45f759352892-kube-api-access-qxnw2\") pod \"certified-operators-c877p\" (UID: \"6ba67be2-320d-41f5-96bf-45f759352892\") " pod="openshift-marketplace/certified-operators-c877p" Nov 28 10:35:48 crc kubenswrapper[4838]: I1128 10:35:48.934060 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-c877p" Nov 28 10:35:49 crc kubenswrapper[4838]: I1128 10:35:49.437591 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-c877p"] Nov 28 10:35:50 crc kubenswrapper[4838]: I1128 10:35:50.029353 4838 generic.go:334] "Generic (PLEG): container finished" podID="6ba67be2-320d-41f5-96bf-45f759352892" containerID="c2a51b130555bbe1e77c50aac22f634d1d000c68f2ea4f2c848e54fb7d7f3d0a" exitCode=0 Nov 28 10:35:50 crc kubenswrapper[4838]: I1128 10:35:50.029424 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-c877p" event={"ID":"6ba67be2-320d-41f5-96bf-45f759352892","Type":"ContainerDied","Data":"c2a51b130555bbe1e77c50aac22f634d1d000c68f2ea4f2c848e54fb7d7f3d0a"} Nov 28 10:35:50 crc kubenswrapper[4838]: I1128 10:35:50.029474 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-c877p" event={"ID":"6ba67be2-320d-41f5-96bf-45f759352892","Type":"ContainerStarted","Data":"99b8ad76f4c3bf8da4a4c7c1fd6abe066f0389bcd19b2d7ba735edd3d6fd0a06"} Nov 28 10:35:52 crc kubenswrapper[4838]: I1128 10:35:52.066506 4838 generic.go:334] "Generic (PLEG): container finished" podID="6ba67be2-320d-41f5-96bf-45f759352892" containerID="08e86f18f919b6f271083d2cb7e049f312c219c9ce4a88428f4d771637824a77" exitCode=0 Nov 28 10:35:52 crc kubenswrapper[4838]: I1128 10:35:52.066592 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-c877p" event={"ID":"6ba67be2-320d-41f5-96bf-45f759352892","Type":"ContainerDied","Data":"08e86f18f919b6f271083d2cb7e049f312c219c9ce4a88428f4d771637824a77"} Nov 28 10:35:53 crc kubenswrapper[4838]: I1128 10:35:53.082170 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-c877p" event={"ID":"6ba67be2-320d-41f5-96bf-45f759352892","Type":"ContainerStarted","Data":"d9db3927850561a5b3a0e875051057bd781567b3662e3f7de7c4ae2b9f5d6ce1"} Nov 28 10:35:53 crc kubenswrapper[4838]: I1128 10:35:53.106985 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-c877p" podStartSLOduration=2.462746972 podStartE2EDuration="5.106964961s" podCreationTimestamp="2025-11-28 10:35:48 +0000 UTC" firstStartedPulling="2025-11-28 10:35:50.031703657 +0000 UTC m=+2321.730677857" lastFinishedPulling="2025-11-28 10:35:52.675921676 +0000 UTC m=+2324.374895846" observedRunningTime="2025-11-28 10:35:53.102089809 +0000 UTC m=+2324.801064009" watchObservedRunningTime="2025-11-28 10:35:53.106964961 +0000 UTC m=+2324.805939141" Nov 28 10:35:58 crc kubenswrapper[4838]: I1128 10:35:58.935528 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-c877p" Nov 28 10:35:58 crc kubenswrapper[4838]: I1128 10:35:58.935947 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-c877p" Nov 28 10:35:59 crc kubenswrapper[4838]: I1128 10:35:59.023562 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-c877p" Nov 28 10:35:59 crc kubenswrapper[4838]: I1128 10:35:59.254013 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-c877p" Nov 28 10:35:59 crc kubenswrapper[4838]: I1128 10:35:59.324563 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-c877p"] Nov 28 10:36:01 crc kubenswrapper[4838]: I1128 10:36:01.190368 4838 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-c877p" podUID="6ba67be2-320d-41f5-96bf-45f759352892" containerName="registry-server" containerID="cri-o://d9db3927850561a5b3a0e875051057bd781567b3662e3f7de7c4ae2b9f5d6ce1" gracePeriod=2 Nov 28 10:36:01 crc kubenswrapper[4838]: I1128 10:36:01.692895 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-c877p" Nov 28 10:36:01 crc kubenswrapper[4838]: I1128 10:36:01.808456 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6ba67be2-320d-41f5-96bf-45f759352892-catalog-content\") pod \"6ba67be2-320d-41f5-96bf-45f759352892\" (UID: \"6ba67be2-320d-41f5-96bf-45f759352892\") " Nov 28 10:36:01 crc kubenswrapper[4838]: I1128 10:36:01.808526 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qxnw2\" (UniqueName: \"kubernetes.io/projected/6ba67be2-320d-41f5-96bf-45f759352892-kube-api-access-qxnw2\") pod \"6ba67be2-320d-41f5-96bf-45f759352892\" (UID: \"6ba67be2-320d-41f5-96bf-45f759352892\") " Nov 28 10:36:01 crc kubenswrapper[4838]: I1128 10:36:01.808643 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6ba67be2-320d-41f5-96bf-45f759352892-utilities\") pod \"6ba67be2-320d-41f5-96bf-45f759352892\" (UID: \"6ba67be2-320d-41f5-96bf-45f759352892\") " Nov 28 10:36:01 crc kubenswrapper[4838]: I1128 10:36:01.809993 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6ba67be2-320d-41f5-96bf-45f759352892-utilities" (OuterVolumeSpecName: "utilities") pod "6ba67be2-320d-41f5-96bf-45f759352892" (UID: "6ba67be2-320d-41f5-96bf-45f759352892"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 10:36:01 crc kubenswrapper[4838]: I1128 10:36:01.816052 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6ba67be2-320d-41f5-96bf-45f759352892-kube-api-access-qxnw2" (OuterVolumeSpecName: "kube-api-access-qxnw2") pod "6ba67be2-320d-41f5-96bf-45f759352892" (UID: "6ba67be2-320d-41f5-96bf-45f759352892"). InnerVolumeSpecName "kube-api-access-qxnw2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:36:01 crc kubenswrapper[4838]: I1128 10:36:01.911171 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qxnw2\" (UniqueName: \"kubernetes.io/projected/6ba67be2-320d-41f5-96bf-45f759352892-kube-api-access-qxnw2\") on node \"crc\" DevicePath \"\"" Nov 28 10:36:01 crc kubenswrapper[4838]: I1128 10:36:01.911218 4838 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6ba67be2-320d-41f5-96bf-45f759352892-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 10:36:01 crc kubenswrapper[4838]: I1128 10:36:01.929196 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6ba67be2-320d-41f5-96bf-45f759352892-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "6ba67be2-320d-41f5-96bf-45f759352892" (UID: "6ba67be2-320d-41f5-96bf-45f759352892"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 10:36:02 crc kubenswrapper[4838]: I1128 10:36:02.014589 4838 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6ba67be2-320d-41f5-96bf-45f759352892-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 10:36:02 crc kubenswrapper[4838]: I1128 10:36:02.202709 4838 generic.go:334] "Generic (PLEG): container finished" podID="6ba67be2-320d-41f5-96bf-45f759352892" containerID="d9db3927850561a5b3a0e875051057bd781567b3662e3f7de7c4ae2b9f5d6ce1" exitCode=0 Nov 28 10:36:02 crc kubenswrapper[4838]: I1128 10:36:02.202803 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-c877p" event={"ID":"6ba67be2-320d-41f5-96bf-45f759352892","Type":"ContainerDied","Data":"d9db3927850561a5b3a0e875051057bd781567b3662e3f7de7c4ae2b9f5d6ce1"} Nov 28 10:36:02 crc kubenswrapper[4838]: I1128 10:36:02.202855 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-c877p" event={"ID":"6ba67be2-320d-41f5-96bf-45f759352892","Type":"ContainerDied","Data":"99b8ad76f4c3bf8da4a4c7c1fd6abe066f0389bcd19b2d7ba735edd3d6fd0a06"} Nov 28 10:36:02 crc kubenswrapper[4838]: I1128 10:36:02.202888 4838 scope.go:117] "RemoveContainer" containerID="d9db3927850561a5b3a0e875051057bd781567b3662e3f7de7c4ae2b9f5d6ce1" Nov 28 10:36:02 crc kubenswrapper[4838]: I1128 10:36:02.202907 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-c877p" Nov 28 10:36:02 crc kubenswrapper[4838]: I1128 10:36:02.230638 4838 scope.go:117] "RemoveContainer" containerID="08e86f18f919b6f271083d2cb7e049f312c219c9ce4a88428f4d771637824a77" Nov 28 10:36:02 crc kubenswrapper[4838]: I1128 10:36:02.260624 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-c877p"] Nov 28 10:36:02 crc kubenswrapper[4838]: I1128 10:36:02.268060 4838 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-c877p"] Nov 28 10:36:02 crc kubenswrapper[4838]: I1128 10:36:02.272981 4838 scope.go:117] "RemoveContainer" containerID="c2a51b130555bbe1e77c50aac22f634d1d000c68f2ea4f2c848e54fb7d7f3d0a" Nov 28 10:36:02 crc kubenswrapper[4838]: I1128 10:36:02.315849 4838 scope.go:117] "RemoveContainer" containerID="d9db3927850561a5b3a0e875051057bd781567b3662e3f7de7c4ae2b9f5d6ce1" Nov 28 10:36:02 crc kubenswrapper[4838]: E1128 10:36:02.317112 4838 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d9db3927850561a5b3a0e875051057bd781567b3662e3f7de7c4ae2b9f5d6ce1\": container with ID starting with d9db3927850561a5b3a0e875051057bd781567b3662e3f7de7c4ae2b9f5d6ce1 not found: ID does not exist" containerID="d9db3927850561a5b3a0e875051057bd781567b3662e3f7de7c4ae2b9f5d6ce1" Nov 28 10:36:02 crc kubenswrapper[4838]: I1128 10:36:02.317147 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d9db3927850561a5b3a0e875051057bd781567b3662e3f7de7c4ae2b9f5d6ce1"} err="failed to get container status \"d9db3927850561a5b3a0e875051057bd781567b3662e3f7de7c4ae2b9f5d6ce1\": rpc error: code = NotFound desc = could not find container \"d9db3927850561a5b3a0e875051057bd781567b3662e3f7de7c4ae2b9f5d6ce1\": container with ID starting with d9db3927850561a5b3a0e875051057bd781567b3662e3f7de7c4ae2b9f5d6ce1 not found: ID does not exist" Nov 28 10:36:02 crc kubenswrapper[4838]: I1128 10:36:02.317168 4838 scope.go:117] "RemoveContainer" containerID="08e86f18f919b6f271083d2cb7e049f312c219c9ce4a88428f4d771637824a77" Nov 28 10:36:02 crc kubenswrapper[4838]: E1128 10:36:02.317461 4838 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"08e86f18f919b6f271083d2cb7e049f312c219c9ce4a88428f4d771637824a77\": container with ID starting with 08e86f18f919b6f271083d2cb7e049f312c219c9ce4a88428f4d771637824a77 not found: ID does not exist" containerID="08e86f18f919b6f271083d2cb7e049f312c219c9ce4a88428f4d771637824a77" Nov 28 10:36:02 crc kubenswrapper[4838]: I1128 10:36:02.317491 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"08e86f18f919b6f271083d2cb7e049f312c219c9ce4a88428f4d771637824a77"} err="failed to get container status \"08e86f18f919b6f271083d2cb7e049f312c219c9ce4a88428f4d771637824a77\": rpc error: code = NotFound desc = could not find container \"08e86f18f919b6f271083d2cb7e049f312c219c9ce4a88428f4d771637824a77\": container with ID starting with 08e86f18f919b6f271083d2cb7e049f312c219c9ce4a88428f4d771637824a77 not found: ID does not exist" Nov 28 10:36:02 crc kubenswrapper[4838]: I1128 10:36:02.317509 4838 scope.go:117] "RemoveContainer" containerID="c2a51b130555bbe1e77c50aac22f634d1d000c68f2ea4f2c848e54fb7d7f3d0a" Nov 28 10:36:02 crc kubenswrapper[4838]: E1128 10:36:02.317951 4838 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c2a51b130555bbe1e77c50aac22f634d1d000c68f2ea4f2c848e54fb7d7f3d0a\": container with ID starting with c2a51b130555bbe1e77c50aac22f634d1d000c68f2ea4f2c848e54fb7d7f3d0a not found: ID does not exist" containerID="c2a51b130555bbe1e77c50aac22f634d1d000c68f2ea4f2c848e54fb7d7f3d0a" Nov 28 10:36:02 crc kubenswrapper[4838]: I1128 10:36:02.317972 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c2a51b130555bbe1e77c50aac22f634d1d000c68f2ea4f2c848e54fb7d7f3d0a"} err="failed to get container status \"c2a51b130555bbe1e77c50aac22f634d1d000c68f2ea4f2c848e54fb7d7f3d0a\": rpc error: code = NotFound desc = could not find container \"c2a51b130555bbe1e77c50aac22f634d1d000c68f2ea4f2c848e54fb7d7f3d0a\": container with ID starting with c2a51b130555bbe1e77c50aac22f634d1d000c68f2ea4f2c848e54fb7d7f3d0a not found: ID does not exist" Nov 28 10:36:02 crc kubenswrapper[4838]: I1128 10:36:02.578785 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6ba67be2-320d-41f5-96bf-45f759352892" path="/var/lib/kubelet/pods/6ba67be2-320d-41f5-96bf-45f759352892/volumes" Nov 28 10:36:16 crc kubenswrapper[4838]: I1128 10:36:16.360654 4838 generic.go:334] "Generic (PLEG): container finished" podID="c2c8e67b-9151-44fa-b8f3-f86621d4fd67" containerID="950866e5533cfacb966254785c257cbc7329ed624a7f572b46a25ecd81ef3d62" exitCode=0 Nov 28 10:36:16 crc kubenswrapper[4838]: I1128 10:36:16.360755 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-b8l59" event={"ID":"c2c8e67b-9151-44fa-b8f3-f86621d4fd67","Type":"ContainerDied","Data":"950866e5533cfacb966254785c257cbc7329ed624a7f572b46a25ecd81ef3d62"} Nov 28 10:36:17 crc kubenswrapper[4838]: I1128 10:36:17.869338 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-b8l59" Nov 28 10:36:18 crc kubenswrapper[4838]: I1128 10:36:18.030547 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/c2c8e67b-9151-44fa-b8f3-f86621d4fd67-ceph\") pod \"c2c8e67b-9151-44fa-b8f3-f86621d4fd67\" (UID: \"c2c8e67b-9151-44fa-b8f3-f86621d4fd67\") " Nov 28 10:36:18 crc kubenswrapper[4838]: I1128 10:36:18.030660 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c2c8e67b-9151-44fa-b8f3-f86621d4fd67-inventory\") pod \"c2c8e67b-9151-44fa-b8f3-f86621d4fd67\" (UID: \"c2c8e67b-9151-44fa-b8f3-f86621d4fd67\") " Nov 28 10:36:18 crc kubenswrapper[4838]: I1128 10:36:18.030885 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c2c8e67b-9151-44fa-b8f3-f86621d4fd67-ssh-key\") pod \"c2c8e67b-9151-44fa-b8f3-f86621d4fd67\" (UID: \"c2c8e67b-9151-44fa-b8f3-f86621d4fd67\") " Nov 28 10:36:18 crc kubenswrapper[4838]: I1128 10:36:18.030935 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w6xbd\" (UniqueName: \"kubernetes.io/projected/c2c8e67b-9151-44fa-b8f3-f86621d4fd67-kube-api-access-w6xbd\") pod \"c2c8e67b-9151-44fa-b8f3-f86621d4fd67\" (UID: \"c2c8e67b-9151-44fa-b8f3-f86621d4fd67\") " Nov 28 10:36:18 crc kubenswrapper[4838]: I1128 10:36:18.039912 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c2c8e67b-9151-44fa-b8f3-f86621d4fd67-ceph" (OuterVolumeSpecName: "ceph") pod "c2c8e67b-9151-44fa-b8f3-f86621d4fd67" (UID: "c2c8e67b-9151-44fa-b8f3-f86621d4fd67"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:36:18 crc kubenswrapper[4838]: I1128 10:36:18.044555 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c2c8e67b-9151-44fa-b8f3-f86621d4fd67-kube-api-access-w6xbd" (OuterVolumeSpecName: "kube-api-access-w6xbd") pod "c2c8e67b-9151-44fa-b8f3-f86621d4fd67" (UID: "c2c8e67b-9151-44fa-b8f3-f86621d4fd67"). InnerVolumeSpecName "kube-api-access-w6xbd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:36:18 crc kubenswrapper[4838]: I1128 10:36:18.078697 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c2c8e67b-9151-44fa-b8f3-f86621d4fd67-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "c2c8e67b-9151-44fa-b8f3-f86621d4fd67" (UID: "c2c8e67b-9151-44fa-b8f3-f86621d4fd67"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:36:18 crc kubenswrapper[4838]: I1128 10:36:18.079528 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c2c8e67b-9151-44fa-b8f3-f86621d4fd67-inventory" (OuterVolumeSpecName: "inventory") pod "c2c8e67b-9151-44fa-b8f3-f86621d4fd67" (UID: "c2c8e67b-9151-44fa-b8f3-f86621d4fd67"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:36:18 crc kubenswrapper[4838]: I1128 10:36:18.133912 4838 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c2c8e67b-9151-44fa-b8f3-f86621d4fd67-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 28 10:36:18 crc kubenswrapper[4838]: I1128 10:36:18.134284 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w6xbd\" (UniqueName: \"kubernetes.io/projected/c2c8e67b-9151-44fa-b8f3-f86621d4fd67-kube-api-access-w6xbd\") on node \"crc\" DevicePath \"\"" Nov 28 10:36:18 crc kubenswrapper[4838]: I1128 10:36:18.134304 4838 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/c2c8e67b-9151-44fa-b8f3-f86621d4fd67-ceph\") on node \"crc\" DevicePath \"\"" Nov 28 10:36:18 crc kubenswrapper[4838]: I1128 10:36:18.134325 4838 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c2c8e67b-9151-44fa-b8f3-f86621d4fd67-inventory\") on node \"crc\" DevicePath \"\"" Nov 28 10:36:18 crc kubenswrapper[4838]: I1128 10:36:18.383428 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-b8l59" event={"ID":"c2c8e67b-9151-44fa-b8f3-f86621d4fd67","Type":"ContainerDied","Data":"394ed5b63dd35cb373f76e19ae345bc354bcb2b4849d8fb4612b132e5bafc344"} Nov 28 10:36:18 crc kubenswrapper[4838]: I1128 10:36:18.383467 4838 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="394ed5b63dd35cb373f76e19ae345bc354bcb2b4849d8fb4612b132e5bafc344" Nov 28 10:36:18 crc kubenswrapper[4838]: I1128 10:36:18.383510 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-b8l59" Nov 28 10:36:18 crc kubenswrapper[4838]: I1128 10:36:18.519263 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-mrkhc"] Nov 28 10:36:18 crc kubenswrapper[4838]: E1128 10:36:18.519781 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6ba67be2-320d-41f5-96bf-45f759352892" containerName="registry-server" Nov 28 10:36:18 crc kubenswrapper[4838]: I1128 10:36:18.519804 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="6ba67be2-320d-41f5-96bf-45f759352892" containerName="registry-server" Nov 28 10:36:18 crc kubenswrapper[4838]: E1128 10:36:18.519816 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6ba67be2-320d-41f5-96bf-45f759352892" containerName="extract-content" Nov 28 10:36:18 crc kubenswrapper[4838]: I1128 10:36:18.519826 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="6ba67be2-320d-41f5-96bf-45f759352892" containerName="extract-content" Nov 28 10:36:18 crc kubenswrapper[4838]: E1128 10:36:18.519840 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6ba67be2-320d-41f5-96bf-45f759352892" containerName="extract-utilities" Nov 28 10:36:18 crc kubenswrapper[4838]: I1128 10:36:18.519850 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="6ba67be2-320d-41f5-96bf-45f759352892" containerName="extract-utilities" Nov 28 10:36:18 crc kubenswrapper[4838]: E1128 10:36:18.519877 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c2c8e67b-9151-44fa-b8f3-f86621d4fd67" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Nov 28 10:36:18 crc kubenswrapper[4838]: I1128 10:36:18.519885 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="c2c8e67b-9151-44fa-b8f3-f86621d4fd67" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Nov 28 10:36:18 crc kubenswrapper[4838]: I1128 10:36:18.520154 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="6ba67be2-320d-41f5-96bf-45f759352892" containerName="registry-server" Nov 28 10:36:18 crc kubenswrapper[4838]: I1128 10:36:18.520175 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="c2c8e67b-9151-44fa-b8f3-f86621d4fd67" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Nov 28 10:36:18 crc kubenswrapper[4838]: I1128 10:36:18.521033 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-mrkhc" Nov 28 10:36:18 crc kubenswrapper[4838]: I1128 10:36:18.531255 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-mrkhc"] Nov 28 10:36:18 crc kubenswrapper[4838]: I1128 10:36:18.538049 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceph-conf-files" Nov 28 10:36:18 crc kubenswrapper[4838]: I1128 10:36:18.538234 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 28 10:36:18 crc kubenswrapper[4838]: I1128 10:36:18.538324 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 28 10:36:18 crc kubenswrapper[4838]: I1128 10:36:18.538438 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 28 10:36:18 crc kubenswrapper[4838]: I1128 10:36:18.539017 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-t6dwn" Nov 28 10:36:18 crc kubenswrapper[4838]: I1128 10:36:18.645634 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d1fe48d6-01d5-4805-8359-921c9b8888a4-ssh-key\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-mrkhc\" (UID: \"d1fe48d6-01d5-4805-8359-921c9b8888a4\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-mrkhc" Nov 28 10:36:18 crc kubenswrapper[4838]: I1128 10:36:18.646173 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/d1fe48d6-01d5-4805-8359-921c9b8888a4-ceph\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-mrkhc\" (UID: \"d1fe48d6-01d5-4805-8359-921c9b8888a4\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-mrkhc" Nov 28 10:36:18 crc kubenswrapper[4838]: I1128 10:36:18.646216 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d1fe48d6-01d5-4805-8359-921c9b8888a4-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-mrkhc\" (UID: \"d1fe48d6-01d5-4805-8359-921c9b8888a4\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-mrkhc" Nov 28 10:36:18 crc kubenswrapper[4838]: I1128 10:36:18.646245 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b2q4f\" (UniqueName: \"kubernetes.io/projected/d1fe48d6-01d5-4805-8359-921c9b8888a4-kube-api-access-b2q4f\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-mrkhc\" (UID: \"d1fe48d6-01d5-4805-8359-921c9b8888a4\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-mrkhc" Nov 28 10:36:18 crc kubenswrapper[4838]: I1128 10:36:18.748297 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/d1fe48d6-01d5-4805-8359-921c9b8888a4-ceph\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-mrkhc\" (UID: \"d1fe48d6-01d5-4805-8359-921c9b8888a4\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-mrkhc" Nov 28 10:36:18 crc kubenswrapper[4838]: I1128 10:36:18.748394 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d1fe48d6-01d5-4805-8359-921c9b8888a4-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-mrkhc\" (UID: \"d1fe48d6-01d5-4805-8359-921c9b8888a4\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-mrkhc" Nov 28 10:36:18 crc kubenswrapper[4838]: I1128 10:36:18.748449 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b2q4f\" (UniqueName: \"kubernetes.io/projected/d1fe48d6-01d5-4805-8359-921c9b8888a4-kube-api-access-b2q4f\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-mrkhc\" (UID: \"d1fe48d6-01d5-4805-8359-921c9b8888a4\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-mrkhc" Nov 28 10:36:18 crc kubenswrapper[4838]: I1128 10:36:18.748621 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d1fe48d6-01d5-4805-8359-921c9b8888a4-ssh-key\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-mrkhc\" (UID: \"d1fe48d6-01d5-4805-8359-921c9b8888a4\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-mrkhc" Nov 28 10:36:18 crc kubenswrapper[4838]: I1128 10:36:18.756506 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d1fe48d6-01d5-4805-8359-921c9b8888a4-ssh-key\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-mrkhc\" (UID: \"d1fe48d6-01d5-4805-8359-921c9b8888a4\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-mrkhc" Nov 28 10:36:18 crc kubenswrapper[4838]: I1128 10:36:18.758814 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d1fe48d6-01d5-4805-8359-921c9b8888a4-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-mrkhc\" (UID: \"d1fe48d6-01d5-4805-8359-921c9b8888a4\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-mrkhc" Nov 28 10:36:18 crc kubenswrapper[4838]: I1128 10:36:18.761868 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/d1fe48d6-01d5-4805-8359-921c9b8888a4-ceph\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-mrkhc\" (UID: \"d1fe48d6-01d5-4805-8359-921c9b8888a4\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-mrkhc" Nov 28 10:36:18 crc kubenswrapper[4838]: I1128 10:36:18.778384 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b2q4f\" (UniqueName: \"kubernetes.io/projected/d1fe48d6-01d5-4805-8359-921c9b8888a4-kube-api-access-b2q4f\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-mrkhc\" (UID: \"d1fe48d6-01d5-4805-8359-921c9b8888a4\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-mrkhc" Nov 28 10:36:18 crc kubenswrapper[4838]: I1128 10:36:18.851479 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-mrkhc" Nov 28 10:36:19 crc kubenswrapper[4838]: I1128 10:36:19.275786 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-mrkhc"] Nov 28 10:36:19 crc kubenswrapper[4838]: I1128 10:36:19.297634 4838 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 28 10:36:19 crc kubenswrapper[4838]: I1128 10:36:19.398971 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-mrkhc" event={"ID":"d1fe48d6-01d5-4805-8359-921c9b8888a4","Type":"ContainerStarted","Data":"071965ed1a85e751782afe49e48fb7430d4f68f496f2d93b249cb2d90b6b06f8"} Nov 28 10:36:20 crc kubenswrapper[4838]: I1128 10:36:20.410344 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-mrkhc" event={"ID":"d1fe48d6-01d5-4805-8359-921c9b8888a4","Type":"ContainerStarted","Data":"f372b1ea71e1f024f5a063643249a1b1fa9ac6ff090805a6049024361776568f"} Nov 28 10:36:20 crc kubenswrapper[4838]: I1128 10:36:20.447286 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-mrkhc" podStartSLOduration=1.96396898 podStartE2EDuration="2.447260481s" podCreationTimestamp="2025-11-28 10:36:18 +0000 UTC" firstStartedPulling="2025-11-28 10:36:19.297144663 +0000 UTC m=+2350.996118873" lastFinishedPulling="2025-11-28 10:36:19.780436174 +0000 UTC m=+2351.479410374" observedRunningTime="2025-11-28 10:36:20.435041949 +0000 UTC m=+2352.134016149" watchObservedRunningTime="2025-11-28 10:36:20.447260481 +0000 UTC m=+2352.146234691" Nov 28 10:36:26 crc kubenswrapper[4838]: I1128 10:36:26.472030 4838 generic.go:334] "Generic (PLEG): container finished" podID="d1fe48d6-01d5-4805-8359-921c9b8888a4" containerID="f372b1ea71e1f024f5a063643249a1b1fa9ac6ff090805a6049024361776568f" exitCode=0 Nov 28 10:36:26 crc kubenswrapper[4838]: I1128 10:36:26.472124 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-mrkhc" event={"ID":"d1fe48d6-01d5-4805-8359-921c9b8888a4","Type":"ContainerDied","Data":"f372b1ea71e1f024f5a063643249a1b1fa9ac6ff090805a6049024361776568f"} Nov 28 10:36:27 crc kubenswrapper[4838]: I1128 10:36:27.953361 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-mrkhc" Nov 28 10:36:27 crc kubenswrapper[4838]: I1128 10:36:27.977360 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/d1fe48d6-01d5-4805-8359-921c9b8888a4-ceph\") pod \"d1fe48d6-01d5-4805-8359-921c9b8888a4\" (UID: \"d1fe48d6-01d5-4805-8359-921c9b8888a4\") " Nov 28 10:36:27 crc kubenswrapper[4838]: I1128 10:36:27.977536 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d1fe48d6-01d5-4805-8359-921c9b8888a4-inventory\") pod \"d1fe48d6-01d5-4805-8359-921c9b8888a4\" (UID: \"d1fe48d6-01d5-4805-8359-921c9b8888a4\") " Nov 28 10:36:27 crc kubenswrapper[4838]: I1128 10:36:27.977590 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-b2q4f\" (UniqueName: \"kubernetes.io/projected/d1fe48d6-01d5-4805-8359-921c9b8888a4-kube-api-access-b2q4f\") pod \"d1fe48d6-01d5-4805-8359-921c9b8888a4\" (UID: \"d1fe48d6-01d5-4805-8359-921c9b8888a4\") " Nov 28 10:36:27 crc kubenswrapper[4838]: I1128 10:36:27.977629 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d1fe48d6-01d5-4805-8359-921c9b8888a4-ssh-key\") pod \"d1fe48d6-01d5-4805-8359-921c9b8888a4\" (UID: \"d1fe48d6-01d5-4805-8359-921c9b8888a4\") " Nov 28 10:36:27 crc kubenswrapper[4838]: I1128 10:36:27.983554 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d1fe48d6-01d5-4805-8359-921c9b8888a4-ceph" (OuterVolumeSpecName: "ceph") pod "d1fe48d6-01d5-4805-8359-921c9b8888a4" (UID: "d1fe48d6-01d5-4805-8359-921c9b8888a4"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:36:27 crc kubenswrapper[4838]: I1128 10:36:27.984923 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d1fe48d6-01d5-4805-8359-921c9b8888a4-kube-api-access-b2q4f" (OuterVolumeSpecName: "kube-api-access-b2q4f") pod "d1fe48d6-01d5-4805-8359-921c9b8888a4" (UID: "d1fe48d6-01d5-4805-8359-921c9b8888a4"). InnerVolumeSpecName "kube-api-access-b2q4f". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:36:28 crc kubenswrapper[4838]: I1128 10:36:28.017016 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d1fe48d6-01d5-4805-8359-921c9b8888a4-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "d1fe48d6-01d5-4805-8359-921c9b8888a4" (UID: "d1fe48d6-01d5-4805-8359-921c9b8888a4"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:36:28 crc kubenswrapper[4838]: I1128 10:36:28.028408 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d1fe48d6-01d5-4805-8359-921c9b8888a4-inventory" (OuterVolumeSpecName: "inventory") pod "d1fe48d6-01d5-4805-8359-921c9b8888a4" (UID: "d1fe48d6-01d5-4805-8359-921c9b8888a4"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:36:28 crc kubenswrapper[4838]: I1128 10:36:28.082298 4838 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d1fe48d6-01d5-4805-8359-921c9b8888a4-inventory\") on node \"crc\" DevicePath \"\"" Nov 28 10:36:28 crc kubenswrapper[4838]: I1128 10:36:28.082347 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-b2q4f\" (UniqueName: \"kubernetes.io/projected/d1fe48d6-01d5-4805-8359-921c9b8888a4-kube-api-access-b2q4f\") on node \"crc\" DevicePath \"\"" Nov 28 10:36:28 crc kubenswrapper[4838]: I1128 10:36:28.082368 4838 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d1fe48d6-01d5-4805-8359-921c9b8888a4-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 28 10:36:28 crc kubenswrapper[4838]: I1128 10:36:28.082387 4838 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/d1fe48d6-01d5-4805-8359-921c9b8888a4-ceph\") on node \"crc\" DevicePath \"\"" Nov 28 10:36:28 crc kubenswrapper[4838]: I1128 10:36:28.494062 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-mrkhc" event={"ID":"d1fe48d6-01d5-4805-8359-921c9b8888a4","Type":"ContainerDied","Data":"071965ed1a85e751782afe49e48fb7430d4f68f496f2d93b249cb2d90b6b06f8"} Nov 28 10:36:28 crc kubenswrapper[4838]: I1128 10:36:28.494102 4838 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="071965ed1a85e751782afe49e48fb7430d4f68f496f2d93b249cb2d90b6b06f8" Nov 28 10:36:28 crc kubenswrapper[4838]: I1128 10:36:28.494129 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-mrkhc" Nov 28 10:36:28 crc kubenswrapper[4838]: I1128 10:36:28.589359 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-ht9vn"] Nov 28 10:36:28 crc kubenswrapper[4838]: E1128 10:36:28.590222 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d1fe48d6-01d5-4805-8359-921c9b8888a4" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Nov 28 10:36:28 crc kubenswrapper[4838]: I1128 10:36:28.590260 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="d1fe48d6-01d5-4805-8359-921c9b8888a4" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Nov 28 10:36:28 crc kubenswrapper[4838]: I1128 10:36:28.590661 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="d1fe48d6-01d5-4805-8359-921c9b8888a4" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Nov 28 10:36:28 crc kubenswrapper[4838]: I1128 10:36:28.591638 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-ht9vn"] Nov 28 10:36:28 crc kubenswrapper[4838]: I1128 10:36:28.591803 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-ht9vn" Nov 28 10:36:28 crc kubenswrapper[4838]: I1128 10:36:28.594369 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 28 10:36:28 crc kubenswrapper[4838]: I1128 10:36:28.594480 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-t6dwn" Nov 28 10:36:28 crc kubenswrapper[4838]: I1128 10:36:28.594571 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceph-conf-files" Nov 28 10:36:28 crc kubenswrapper[4838]: I1128 10:36:28.594861 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 28 10:36:28 crc kubenswrapper[4838]: I1128 10:36:28.594998 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 28 10:36:28 crc kubenswrapper[4838]: I1128 10:36:28.790502 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/02540331-1ba6-45ee-824c-52e9b076f511-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-ht9vn\" (UID: \"02540331-1ba6-45ee-824c-52e9b076f511\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-ht9vn" Nov 28 10:36:28 crc kubenswrapper[4838]: I1128 10:36:28.791807 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/02540331-1ba6-45ee-824c-52e9b076f511-ssh-key\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-ht9vn\" (UID: \"02540331-1ba6-45ee-824c-52e9b076f511\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-ht9vn" Nov 28 10:36:28 crc kubenswrapper[4838]: I1128 10:36:28.791954 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rzpcj\" (UniqueName: \"kubernetes.io/projected/02540331-1ba6-45ee-824c-52e9b076f511-kube-api-access-rzpcj\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-ht9vn\" (UID: \"02540331-1ba6-45ee-824c-52e9b076f511\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-ht9vn" Nov 28 10:36:28 crc kubenswrapper[4838]: I1128 10:36:28.792027 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/02540331-1ba6-45ee-824c-52e9b076f511-ceph\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-ht9vn\" (UID: \"02540331-1ba6-45ee-824c-52e9b076f511\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-ht9vn" Nov 28 10:36:28 crc kubenswrapper[4838]: I1128 10:36:28.894572 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/02540331-1ba6-45ee-824c-52e9b076f511-ssh-key\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-ht9vn\" (UID: \"02540331-1ba6-45ee-824c-52e9b076f511\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-ht9vn" Nov 28 10:36:28 crc kubenswrapper[4838]: I1128 10:36:28.894691 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rzpcj\" (UniqueName: \"kubernetes.io/projected/02540331-1ba6-45ee-824c-52e9b076f511-kube-api-access-rzpcj\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-ht9vn\" (UID: \"02540331-1ba6-45ee-824c-52e9b076f511\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-ht9vn" Nov 28 10:36:28 crc kubenswrapper[4838]: I1128 10:36:28.894742 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/02540331-1ba6-45ee-824c-52e9b076f511-ceph\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-ht9vn\" (UID: \"02540331-1ba6-45ee-824c-52e9b076f511\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-ht9vn" Nov 28 10:36:28 crc kubenswrapper[4838]: I1128 10:36:28.894862 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/02540331-1ba6-45ee-824c-52e9b076f511-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-ht9vn\" (UID: \"02540331-1ba6-45ee-824c-52e9b076f511\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-ht9vn" Nov 28 10:36:28 crc kubenswrapper[4838]: I1128 10:36:28.900464 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/02540331-1ba6-45ee-824c-52e9b076f511-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-ht9vn\" (UID: \"02540331-1ba6-45ee-824c-52e9b076f511\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-ht9vn" Nov 28 10:36:28 crc kubenswrapper[4838]: I1128 10:36:28.906101 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/02540331-1ba6-45ee-824c-52e9b076f511-ceph\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-ht9vn\" (UID: \"02540331-1ba6-45ee-824c-52e9b076f511\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-ht9vn" Nov 28 10:36:28 crc kubenswrapper[4838]: I1128 10:36:28.908726 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/02540331-1ba6-45ee-824c-52e9b076f511-ssh-key\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-ht9vn\" (UID: \"02540331-1ba6-45ee-824c-52e9b076f511\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-ht9vn" Nov 28 10:36:28 crc kubenswrapper[4838]: I1128 10:36:28.923481 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rzpcj\" (UniqueName: \"kubernetes.io/projected/02540331-1ba6-45ee-824c-52e9b076f511-kube-api-access-rzpcj\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-ht9vn\" (UID: \"02540331-1ba6-45ee-824c-52e9b076f511\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-ht9vn" Nov 28 10:36:28 crc kubenswrapper[4838]: I1128 10:36:28.961422 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-ht9vn" Nov 28 10:36:29 crc kubenswrapper[4838]: I1128 10:36:29.328406 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-ht9vn"] Nov 28 10:36:29 crc kubenswrapper[4838]: I1128 10:36:29.508228 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-ht9vn" event={"ID":"02540331-1ba6-45ee-824c-52e9b076f511","Type":"ContainerStarted","Data":"cc5111c3853f8fad6d7430f9f2c6f36839cd5f8831dab1fa291226edee83adba"} Nov 28 10:36:30 crc kubenswrapper[4838]: I1128 10:36:30.517957 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-ht9vn" event={"ID":"02540331-1ba6-45ee-824c-52e9b076f511","Type":"ContainerStarted","Data":"0e6380481cb1bc24ca08416cb1d4f6416b7bf5c9d403d3c6ec4acfc09c9d151a"} Nov 28 10:36:30 crc kubenswrapper[4838]: I1128 10:36:30.543749 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-ht9vn" podStartSLOduration=2.095036141 podStartE2EDuration="2.543735105s" podCreationTimestamp="2025-11-28 10:36:28 +0000 UTC" firstStartedPulling="2025-11-28 10:36:29.335707828 +0000 UTC m=+2361.034682038" lastFinishedPulling="2025-11-28 10:36:29.784406792 +0000 UTC m=+2361.483381002" observedRunningTime="2025-11-28 10:36:30.539979104 +0000 UTC m=+2362.238953294" watchObservedRunningTime="2025-11-28 10:36:30.543735105 +0000 UTC m=+2362.242709275" Nov 28 10:36:53 crc kubenswrapper[4838]: I1128 10:36:53.940087 4838 patch_prober.go:28] interesting pod/machine-config-daemon-5dxdd container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 10:36:53 crc kubenswrapper[4838]: I1128 10:36:53.940824 4838 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 10:37:13 crc kubenswrapper[4838]: I1128 10:37:13.026193 4838 generic.go:334] "Generic (PLEG): container finished" podID="02540331-1ba6-45ee-824c-52e9b076f511" containerID="0e6380481cb1bc24ca08416cb1d4f6416b7bf5c9d403d3c6ec4acfc09c9d151a" exitCode=0 Nov 28 10:37:13 crc kubenswrapper[4838]: I1128 10:37:13.026335 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-ht9vn" event={"ID":"02540331-1ba6-45ee-824c-52e9b076f511","Type":"ContainerDied","Data":"0e6380481cb1bc24ca08416cb1d4f6416b7bf5c9d403d3c6ec4acfc09c9d151a"} Nov 28 10:37:14 crc kubenswrapper[4838]: I1128 10:37:14.470251 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-ht9vn" Nov 28 10:37:14 crc kubenswrapper[4838]: I1128 10:37:14.593499 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rzpcj\" (UniqueName: \"kubernetes.io/projected/02540331-1ba6-45ee-824c-52e9b076f511-kube-api-access-rzpcj\") pod \"02540331-1ba6-45ee-824c-52e9b076f511\" (UID: \"02540331-1ba6-45ee-824c-52e9b076f511\") " Nov 28 10:37:14 crc kubenswrapper[4838]: I1128 10:37:14.593653 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/02540331-1ba6-45ee-824c-52e9b076f511-ceph\") pod \"02540331-1ba6-45ee-824c-52e9b076f511\" (UID: \"02540331-1ba6-45ee-824c-52e9b076f511\") " Nov 28 10:37:14 crc kubenswrapper[4838]: I1128 10:37:14.593709 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/02540331-1ba6-45ee-824c-52e9b076f511-inventory\") pod \"02540331-1ba6-45ee-824c-52e9b076f511\" (UID: \"02540331-1ba6-45ee-824c-52e9b076f511\") " Nov 28 10:37:14 crc kubenswrapper[4838]: I1128 10:37:14.593809 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/02540331-1ba6-45ee-824c-52e9b076f511-ssh-key\") pod \"02540331-1ba6-45ee-824c-52e9b076f511\" (UID: \"02540331-1ba6-45ee-824c-52e9b076f511\") " Nov 28 10:37:14 crc kubenswrapper[4838]: I1128 10:37:14.606156 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/02540331-1ba6-45ee-824c-52e9b076f511-ceph" (OuterVolumeSpecName: "ceph") pod "02540331-1ba6-45ee-824c-52e9b076f511" (UID: "02540331-1ba6-45ee-824c-52e9b076f511"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:37:14 crc kubenswrapper[4838]: I1128 10:37:14.606436 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/02540331-1ba6-45ee-824c-52e9b076f511-kube-api-access-rzpcj" (OuterVolumeSpecName: "kube-api-access-rzpcj") pod "02540331-1ba6-45ee-824c-52e9b076f511" (UID: "02540331-1ba6-45ee-824c-52e9b076f511"). InnerVolumeSpecName "kube-api-access-rzpcj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:37:14 crc kubenswrapper[4838]: I1128 10:37:14.623500 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/02540331-1ba6-45ee-824c-52e9b076f511-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "02540331-1ba6-45ee-824c-52e9b076f511" (UID: "02540331-1ba6-45ee-824c-52e9b076f511"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:37:14 crc kubenswrapper[4838]: I1128 10:37:14.626731 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/02540331-1ba6-45ee-824c-52e9b076f511-inventory" (OuterVolumeSpecName: "inventory") pod "02540331-1ba6-45ee-824c-52e9b076f511" (UID: "02540331-1ba6-45ee-824c-52e9b076f511"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:37:14 crc kubenswrapper[4838]: I1128 10:37:14.697201 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rzpcj\" (UniqueName: \"kubernetes.io/projected/02540331-1ba6-45ee-824c-52e9b076f511-kube-api-access-rzpcj\") on node \"crc\" DevicePath \"\"" Nov 28 10:37:14 crc kubenswrapper[4838]: I1128 10:37:14.697797 4838 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/02540331-1ba6-45ee-824c-52e9b076f511-ceph\") on node \"crc\" DevicePath \"\"" Nov 28 10:37:14 crc kubenswrapper[4838]: I1128 10:37:14.697829 4838 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/02540331-1ba6-45ee-824c-52e9b076f511-inventory\") on node \"crc\" DevicePath \"\"" Nov 28 10:37:14 crc kubenswrapper[4838]: I1128 10:37:14.697848 4838 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/02540331-1ba6-45ee-824c-52e9b076f511-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 28 10:37:15 crc kubenswrapper[4838]: I1128 10:37:15.053568 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-ht9vn" event={"ID":"02540331-1ba6-45ee-824c-52e9b076f511","Type":"ContainerDied","Data":"cc5111c3853f8fad6d7430f9f2c6f36839cd5f8831dab1fa291226edee83adba"} Nov 28 10:37:15 crc kubenswrapper[4838]: I1128 10:37:15.053621 4838 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="cc5111c3853f8fad6d7430f9f2c6f36839cd5f8831dab1fa291226edee83adba" Nov 28 10:37:15 crc kubenswrapper[4838]: I1128 10:37:15.053637 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-ht9vn" Nov 28 10:37:15 crc kubenswrapper[4838]: I1128 10:37:15.183213 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-l5xjj"] Nov 28 10:37:15 crc kubenswrapper[4838]: E1128 10:37:15.183811 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="02540331-1ba6-45ee-824c-52e9b076f511" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Nov 28 10:37:15 crc kubenswrapper[4838]: I1128 10:37:15.183831 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="02540331-1ba6-45ee-824c-52e9b076f511" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Nov 28 10:37:15 crc kubenswrapper[4838]: I1128 10:37:15.184079 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="02540331-1ba6-45ee-824c-52e9b076f511" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Nov 28 10:37:15 crc kubenswrapper[4838]: I1128 10:37:15.184835 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-l5xjj" Nov 28 10:37:15 crc kubenswrapper[4838]: I1128 10:37:15.191651 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 28 10:37:15 crc kubenswrapper[4838]: I1128 10:37:15.191791 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceph-conf-files" Nov 28 10:37:15 crc kubenswrapper[4838]: I1128 10:37:15.191842 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 28 10:37:15 crc kubenswrapper[4838]: I1128 10:37:15.192347 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 28 10:37:15 crc kubenswrapper[4838]: I1128 10:37:15.194217 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-l5xjj"] Nov 28 10:37:15 crc kubenswrapper[4838]: I1128 10:37:15.194794 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-t6dwn" Nov 28 10:37:15 crc kubenswrapper[4838]: I1128 10:37:15.313021 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/882b8dc3-77a7-42d9-a380-c9e27ff2a3e0-ceph\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-l5xjj\" (UID: \"882b8dc3-77a7-42d9-a380-c9e27ff2a3e0\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-l5xjj" Nov 28 10:37:15 crc kubenswrapper[4838]: I1128 10:37:15.313072 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/882b8dc3-77a7-42d9-a380-c9e27ff2a3e0-inventory\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-l5xjj\" (UID: \"882b8dc3-77a7-42d9-a380-c9e27ff2a3e0\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-l5xjj" Nov 28 10:37:15 crc kubenswrapper[4838]: I1128 10:37:15.313866 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/882b8dc3-77a7-42d9-a380-c9e27ff2a3e0-ssh-key\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-l5xjj\" (UID: \"882b8dc3-77a7-42d9-a380-c9e27ff2a3e0\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-l5xjj" Nov 28 10:37:15 crc kubenswrapper[4838]: I1128 10:37:15.313919 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sn29j\" (UniqueName: \"kubernetes.io/projected/882b8dc3-77a7-42d9-a380-c9e27ff2a3e0-kube-api-access-sn29j\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-l5xjj\" (UID: \"882b8dc3-77a7-42d9-a380-c9e27ff2a3e0\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-l5xjj" Nov 28 10:37:15 crc kubenswrapper[4838]: I1128 10:37:15.416304 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/882b8dc3-77a7-42d9-a380-c9e27ff2a3e0-ssh-key\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-l5xjj\" (UID: \"882b8dc3-77a7-42d9-a380-c9e27ff2a3e0\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-l5xjj" Nov 28 10:37:15 crc kubenswrapper[4838]: I1128 10:37:15.416509 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sn29j\" (UniqueName: \"kubernetes.io/projected/882b8dc3-77a7-42d9-a380-c9e27ff2a3e0-kube-api-access-sn29j\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-l5xjj\" (UID: \"882b8dc3-77a7-42d9-a380-c9e27ff2a3e0\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-l5xjj" Nov 28 10:37:15 crc kubenswrapper[4838]: I1128 10:37:15.416872 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/882b8dc3-77a7-42d9-a380-c9e27ff2a3e0-ceph\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-l5xjj\" (UID: \"882b8dc3-77a7-42d9-a380-c9e27ff2a3e0\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-l5xjj" Nov 28 10:37:15 crc kubenswrapper[4838]: I1128 10:37:15.416930 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/882b8dc3-77a7-42d9-a380-c9e27ff2a3e0-inventory\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-l5xjj\" (UID: \"882b8dc3-77a7-42d9-a380-c9e27ff2a3e0\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-l5xjj" Nov 28 10:37:15 crc kubenswrapper[4838]: I1128 10:37:15.425148 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/882b8dc3-77a7-42d9-a380-c9e27ff2a3e0-inventory\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-l5xjj\" (UID: \"882b8dc3-77a7-42d9-a380-c9e27ff2a3e0\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-l5xjj" Nov 28 10:37:15 crc kubenswrapper[4838]: I1128 10:37:15.427871 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/882b8dc3-77a7-42d9-a380-c9e27ff2a3e0-ceph\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-l5xjj\" (UID: \"882b8dc3-77a7-42d9-a380-c9e27ff2a3e0\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-l5xjj" Nov 28 10:37:15 crc kubenswrapper[4838]: I1128 10:37:15.430758 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/882b8dc3-77a7-42d9-a380-c9e27ff2a3e0-ssh-key\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-l5xjj\" (UID: \"882b8dc3-77a7-42d9-a380-c9e27ff2a3e0\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-l5xjj" Nov 28 10:37:15 crc kubenswrapper[4838]: I1128 10:37:15.439686 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sn29j\" (UniqueName: \"kubernetes.io/projected/882b8dc3-77a7-42d9-a380-c9e27ff2a3e0-kube-api-access-sn29j\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-l5xjj\" (UID: \"882b8dc3-77a7-42d9-a380-c9e27ff2a3e0\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-l5xjj" Nov 28 10:37:15 crc kubenswrapper[4838]: I1128 10:37:15.505224 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-l5xjj" Nov 28 10:37:16 crc kubenswrapper[4838]: I1128 10:37:16.119774 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-l5xjj"] Nov 28 10:37:16 crc kubenswrapper[4838]: W1128 10:37:16.122302 4838 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod882b8dc3_77a7_42d9_a380_c9e27ff2a3e0.slice/crio-6510e8a86b27021a125a3a82697da372b47e6160f785f5a8b98b44f0c2fd1316 WatchSource:0}: Error finding container 6510e8a86b27021a125a3a82697da372b47e6160f785f5a8b98b44f0c2fd1316: Status 404 returned error can't find the container with id 6510e8a86b27021a125a3a82697da372b47e6160f785f5a8b98b44f0c2fd1316 Nov 28 10:37:17 crc kubenswrapper[4838]: I1128 10:37:17.077457 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-l5xjj" event={"ID":"882b8dc3-77a7-42d9-a380-c9e27ff2a3e0","Type":"ContainerStarted","Data":"416bd9e24bc0e7b226c27bb3e4b6f6b94ffcb386599f473bd67fcf972556bfb6"} Nov 28 10:37:17 crc kubenswrapper[4838]: I1128 10:37:17.079518 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-l5xjj" event={"ID":"882b8dc3-77a7-42d9-a380-c9e27ff2a3e0","Type":"ContainerStarted","Data":"6510e8a86b27021a125a3a82697da372b47e6160f785f5a8b98b44f0c2fd1316"} Nov 28 10:37:17 crc kubenswrapper[4838]: I1128 10:37:17.094347 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-l5xjj" podStartSLOduration=1.633475297 podStartE2EDuration="2.094312678s" podCreationTimestamp="2025-11-28 10:37:15 +0000 UTC" firstStartedPulling="2025-11-28 10:37:16.125202548 +0000 UTC m=+2407.824176728" lastFinishedPulling="2025-11-28 10:37:16.586039929 +0000 UTC m=+2408.285014109" observedRunningTime="2025-11-28 10:37:17.09400276 +0000 UTC m=+2408.792976980" watchObservedRunningTime="2025-11-28 10:37:17.094312678 +0000 UTC m=+2408.793286888" Nov 28 10:37:21 crc kubenswrapper[4838]: I1128 10:37:21.124934 4838 generic.go:334] "Generic (PLEG): container finished" podID="882b8dc3-77a7-42d9-a380-c9e27ff2a3e0" containerID="416bd9e24bc0e7b226c27bb3e4b6f6b94ffcb386599f473bd67fcf972556bfb6" exitCode=0 Nov 28 10:37:21 crc kubenswrapper[4838]: I1128 10:37:21.125018 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-l5xjj" event={"ID":"882b8dc3-77a7-42d9-a380-c9e27ff2a3e0","Type":"ContainerDied","Data":"416bd9e24bc0e7b226c27bb3e4b6f6b94ffcb386599f473bd67fcf972556bfb6"} Nov 28 10:37:22 crc kubenswrapper[4838]: I1128 10:37:22.570111 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-l5xjj" Nov 28 10:37:22 crc kubenswrapper[4838]: I1128 10:37:22.765166 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/882b8dc3-77a7-42d9-a380-c9e27ff2a3e0-ssh-key\") pod \"882b8dc3-77a7-42d9-a380-c9e27ff2a3e0\" (UID: \"882b8dc3-77a7-42d9-a380-c9e27ff2a3e0\") " Nov 28 10:37:22 crc kubenswrapper[4838]: I1128 10:37:22.765277 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/882b8dc3-77a7-42d9-a380-c9e27ff2a3e0-ceph\") pod \"882b8dc3-77a7-42d9-a380-c9e27ff2a3e0\" (UID: \"882b8dc3-77a7-42d9-a380-c9e27ff2a3e0\") " Nov 28 10:37:22 crc kubenswrapper[4838]: I1128 10:37:22.765458 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sn29j\" (UniqueName: \"kubernetes.io/projected/882b8dc3-77a7-42d9-a380-c9e27ff2a3e0-kube-api-access-sn29j\") pod \"882b8dc3-77a7-42d9-a380-c9e27ff2a3e0\" (UID: \"882b8dc3-77a7-42d9-a380-c9e27ff2a3e0\") " Nov 28 10:37:22 crc kubenswrapper[4838]: I1128 10:37:22.765814 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/882b8dc3-77a7-42d9-a380-c9e27ff2a3e0-inventory\") pod \"882b8dc3-77a7-42d9-a380-c9e27ff2a3e0\" (UID: \"882b8dc3-77a7-42d9-a380-c9e27ff2a3e0\") " Nov 28 10:37:22 crc kubenswrapper[4838]: I1128 10:37:22.774761 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/882b8dc3-77a7-42d9-a380-c9e27ff2a3e0-kube-api-access-sn29j" (OuterVolumeSpecName: "kube-api-access-sn29j") pod "882b8dc3-77a7-42d9-a380-c9e27ff2a3e0" (UID: "882b8dc3-77a7-42d9-a380-c9e27ff2a3e0"). InnerVolumeSpecName "kube-api-access-sn29j". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:37:22 crc kubenswrapper[4838]: I1128 10:37:22.776261 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/882b8dc3-77a7-42d9-a380-c9e27ff2a3e0-ceph" (OuterVolumeSpecName: "ceph") pod "882b8dc3-77a7-42d9-a380-c9e27ff2a3e0" (UID: "882b8dc3-77a7-42d9-a380-c9e27ff2a3e0"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:37:22 crc kubenswrapper[4838]: I1128 10:37:22.794163 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/882b8dc3-77a7-42d9-a380-c9e27ff2a3e0-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "882b8dc3-77a7-42d9-a380-c9e27ff2a3e0" (UID: "882b8dc3-77a7-42d9-a380-c9e27ff2a3e0"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:37:22 crc kubenswrapper[4838]: I1128 10:37:22.815119 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/882b8dc3-77a7-42d9-a380-c9e27ff2a3e0-inventory" (OuterVolumeSpecName: "inventory") pod "882b8dc3-77a7-42d9-a380-c9e27ff2a3e0" (UID: "882b8dc3-77a7-42d9-a380-c9e27ff2a3e0"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:37:22 crc kubenswrapper[4838]: I1128 10:37:22.869771 4838 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/882b8dc3-77a7-42d9-a380-c9e27ff2a3e0-inventory\") on node \"crc\" DevicePath \"\"" Nov 28 10:37:22 crc kubenswrapper[4838]: I1128 10:37:22.869848 4838 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/882b8dc3-77a7-42d9-a380-c9e27ff2a3e0-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 28 10:37:22 crc kubenswrapper[4838]: I1128 10:37:22.869875 4838 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/882b8dc3-77a7-42d9-a380-c9e27ff2a3e0-ceph\") on node \"crc\" DevicePath \"\"" Nov 28 10:37:22 crc kubenswrapper[4838]: I1128 10:37:22.869904 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sn29j\" (UniqueName: \"kubernetes.io/projected/882b8dc3-77a7-42d9-a380-c9e27ff2a3e0-kube-api-access-sn29j\") on node \"crc\" DevicePath \"\"" Nov 28 10:37:23 crc kubenswrapper[4838]: I1128 10:37:23.150011 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-l5xjj" event={"ID":"882b8dc3-77a7-42d9-a380-c9e27ff2a3e0","Type":"ContainerDied","Data":"6510e8a86b27021a125a3a82697da372b47e6160f785f5a8b98b44f0c2fd1316"} Nov 28 10:37:23 crc kubenswrapper[4838]: I1128 10:37:23.150401 4838 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6510e8a86b27021a125a3a82697da372b47e6160f785f5a8b98b44f0c2fd1316" Nov 28 10:37:23 crc kubenswrapper[4838]: I1128 10:37:23.150104 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-l5xjj" Nov 28 10:37:23 crc kubenswrapper[4838]: I1128 10:37:23.244837 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-tb7vp"] Nov 28 10:37:23 crc kubenswrapper[4838]: E1128 10:37:23.245196 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="882b8dc3-77a7-42d9-a380-c9e27ff2a3e0" containerName="ceph-hci-pre-edpm-deployment-openstack-edpm-ipam" Nov 28 10:37:23 crc kubenswrapper[4838]: I1128 10:37:23.245217 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="882b8dc3-77a7-42d9-a380-c9e27ff2a3e0" containerName="ceph-hci-pre-edpm-deployment-openstack-edpm-ipam" Nov 28 10:37:23 crc kubenswrapper[4838]: I1128 10:37:23.245437 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="882b8dc3-77a7-42d9-a380-c9e27ff2a3e0" containerName="ceph-hci-pre-edpm-deployment-openstack-edpm-ipam" Nov 28 10:37:23 crc kubenswrapper[4838]: I1128 10:37:23.246089 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-tb7vp" Nov 28 10:37:23 crc kubenswrapper[4838]: I1128 10:37:23.248860 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceph-conf-files" Nov 28 10:37:23 crc kubenswrapper[4838]: I1128 10:37:23.249300 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 28 10:37:23 crc kubenswrapper[4838]: I1128 10:37:23.256681 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 28 10:37:23 crc kubenswrapper[4838]: I1128 10:37:23.257047 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-t6dwn" Nov 28 10:37:23 crc kubenswrapper[4838]: I1128 10:37:23.257681 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 28 10:37:23 crc kubenswrapper[4838]: I1128 10:37:23.279860 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-tb7vp"] Nov 28 10:37:23 crc kubenswrapper[4838]: I1128 10:37:23.377985 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/345f52bd-a4c3-4f71-bd23-9141bc780bfb-ssh-key\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-tb7vp\" (UID: \"345f52bd-a4c3-4f71-bd23-9141bc780bfb\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-tb7vp" Nov 28 10:37:23 crc kubenswrapper[4838]: I1128 10:37:23.378040 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ql2sr\" (UniqueName: \"kubernetes.io/projected/345f52bd-a4c3-4f71-bd23-9141bc780bfb-kube-api-access-ql2sr\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-tb7vp\" (UID: \"345f52bd-a4c3-4f71-bd23-9141bc780bfb\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-tb7vp" Nov 28 10:37:23 crc kubenswrapper[4838]: I1128 10:37:23.378071 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/345f52bd-a4c3-4f71-bd23-9141bc780bfb-ceph\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-tb7vp\" (UID: \"345f52bd-a4c3-4f71-bd23-9141bc780bfb\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-tb7vp" Nov 28 10:37:23 crc kubenswrapper[4838]: I1128 10:37:23.378525 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/345f52bd-a4c3-4f71-bd23-9141bc780bfb-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-tb7vp\" (UID: \"345f52bd-a4c3-4f71-bd23-9141bc780bfb\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-tb7vp" Nov 28 10:37:23 crc kubenswrapper[4838]: I1128 10:37:23.481320 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/345f52bd-a4c3-4f71-bd23-9141bc780bfb-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-tb7vp\" (UID: \"345f52bd-a4c3-4f71-bd23-9141bc780bfb\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-tb7vp" Nov 28 10:37:23 crc kubenswrapper[4838]: I1128 10:37:23.481432 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/345f52bd-a4c3-4f71-bd23-9141bc780bfb-ssh-key\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-tb7vp\" (UID: \"345f52bd-a4c3-4f71-bd23-9141bc780bfb\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-tb7vp" Nov 28 10:37:23 crc kubenswrapper[4838]: I1128 10:37:23.481495 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ql2sr\" (UniqueName: \"kubernetes.io/projected/345f52bd-a4c3-4f71-bd23-9141bc780bfb-kube-api-access-ql2sr\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-tb7vp\" (UID: \"345f52bd-a4c3-4f71-bd23-9141bc780bfb\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-tb7vp" Nov 28 10:37:23 crc kubenswrapper[4838]: I1128 10:37:23.481545 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/345f52bd-a4c3-4f71-bd23-9141bc780bfb-ceph\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-tb7vp\" (UID: \"345f52bd-a4c3-4f71-bd23-9141bc780bfb\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-tb7vp" Nov 28 10:37:23 crc kubenswrapper[4838]: I1128 10:37:23.487895 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/345f52bd-a4c3-4f71-bd23-9141bc780bfb-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-tb7vp\" (UID: \"345f52bd-a4c3-4f71-bd23-9141bc780bfb\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-tb7vp" Nov 28 10:37:23 crc kubenswrapper[4838]: I1128 10:37:23.488806 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/345f52bd-a4c3-4f71-bd23-9141bc780bfb-ceph\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-tb7vp\" (UID: \"345f52bd-a4c3-4f71-bd23-9141bc780bfb\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-tb7vp" Nov 28 10:37:23 crc kubenswrapper[4838]: I1128 10:37:23.489533 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/345f52bd-a4c3-4f71-bd23-9141bc780bfb-ssh-key\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-tb7vp\" (UID: \"345f52bd-a4c3-4f71-bd23-9141bc780bfb\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-tb7vp" Nov 28 10:37:23 crc kubenswrapper[4838]: I1128 10:37:23.509672 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ql2sr\" (UniqueName: \"kubernetes.io/projected/345f52bd-a4c3-4f71-bd23-9141bc780bfb-kube-api-access-ql2sr\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-tb7vp\" (UID: \"345f52bd-a4c3-4f71-bd23-9141bc780bfb\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-tb7vp" Nov 28 10:37:23 crc kubenswrapper[4838]: I1128 10:37:23.562762 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-tb7vp" Nov 28 10:37:23 crc kubenswrapper[4838]: I1128 10:37:23.940457 4838 patch_prober.go:28] interesting pod/machine-config-daemon-5dxdd container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 10:37:23 crc kubenswrapper[4838]: I1128 10:37:23.940807 4838 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 10:37:24 crc kubenswrapper[4838]: I1128 10:37:24.213850 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-tb7vp"] Nov 28 10:37:25 crc kubenswrapper[4838]: I1128 10:37:25.181175 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-tb7vp" event={"ID":"345f52bd-a4c3-4f71-bd23-9141bc780bfb","Type":"ContainerStarted","Data":"8a9c46af8ca88f7b84ae6fa536d24f18a8b6518b30db37e3b74388df678bb4ce"} Nov 28 10:37:26 crc kubenswrapper[4838]: I1128 10:37:26.195183 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-tb7vp" event={"ID":"345f52bd-a4c3-4f71-bd23-9141bc780bfb","Type":"ContainerStarted","Data":"89c7d03a4a0c7c6266d9ffd0c9b459aa7111cb412e0ac31e7cbc06099211ce00"} Nov 28 10:37:26 crc kubenswrapper[4838]: I1128 10:37:26.230215 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-tb7vp" podStartSLOduration=2.363121749 podStartE2EDuration="3.230186853s" podCreationTimestamp="2025-11-28 10:37:23 +0000 UTC" firstStartedPulling="2025-11-28 10:37:24.208936862 +0000 UTC m=+2415.907911042" lastFinishedPulling="2025-11-28 10:37:25.076001966 +0000 UTC m=+2416.774976146" observedRunningTime="2025-11-28 10:37:26.219858293 +0000 UTC m=+2417.918832473" watchObservedRunningTime="2025-11-28 10:37:26.230186853 +0000 UTC m=+2417.929161063" Nov 28 10:37:53 crc kubenswrapper[4838]: I1128 10:37:53.940106 4838 patch_prober.go:28] interesting pod/machine-config-daemon-5dxdd container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 10:37:53 crc kubenswrapper[4838]: I1128 10:37:53.940805 4838 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 10:37:53 crc kubenswrapper[4838]: I1128 10:37:53.940867 4838 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" Nov 28 10:37:53 crc kubenswrapper[4838]: I1128 10:37:53.941953 4838 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"eb7c3a5fb0c9835207f14cd442a9c164c2c7763a8bc95f818d99864ca09179f7"} pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 10:37:53 crc kubenswrapper[4838]: I1128 10:37:53.942159 4838 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" containerName="machine-config-daemon" containerID="cri-o://eb7c3a5fb0c9835207f14cd442a9c164c2c7763a8bc95f818d99864ca09179f7" gracePeriod=600 Nov 28 10:37:54 crc kubenswrapper[4838]: E1128 10:37:54.074447 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-5dxdd_openshift-machine-config-operator(5c3daa53-8c4e-4e30-aeba-146602dd45cd)\"" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" Nov 28 10:37:54 crc kubenswrapper[4838]: I1128 10:37:54.544113 4838 generic.go:334] "Generic (PLEG): container finished" podID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" containerID="eb7c3a5fb0c9835207f14cd442a9c164c2c7763a8bc95f818d99864ca09179f7" exitCode=0 Nov 28 10:37:54 crc kubenswrapper[4838]: I1128 10:37:54.544194 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" event={"ID":"5c3daa53-8c4e-4e30-aeba-146602dd45cd","Type":"ContainerDied","Data":"eb7c3a5fb0c9835207f14cd442a9c164c2c7763a8bc95f818d99864ca09179f7"} Nov 28 10:37:54 crc kubenswrapper[4838]: I1128 10:37:54.544411 4838 scope.go:117] "RemoveContainer" containerID="5afd39ddd492746c0199d772a63313b45e117b315a570579a80048ab6e189d3f" Nov 28 10:37:54 crc kubenswrapper[4838]: I1128 10:37:54.545161 4838 scope.go:117] "RemoveContainer" containerID="eb7c3a5fb0c9835207f14cd442a9c164c2c7763a8bc95f818d99864ca09179f7" Nov 28 10:37:54 crc kubenswrapper[4838]: E1128 10:37:54.545616 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-5dxdd_openshift-machine-config-operator(5c3daa53-8c4e-4e30-aeba-146602dd45cd)\"" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" Nov 28 10:38:09 crc kubenswrapper[4838]: I1128 10:38:09.562974 4838 scope.go:117] "RemoveContainer" containerID="eb7c3a5fb0c9835207f14cd442a9c164c2c7763a8bc95f818d99864ca09179f7" Nov 28 10:38:09 crc kubenswrapper[4838]: E1128 10:38:09.565687 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-5dxdd_openshift-machine-config-operator(5c3daa53-8c4e-4e30-aeba-146602dd45cd)\"" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" Nov 28 10:38:17 crc kubenswrapper[4838]: I1128 10:38:17.816665 4838 generic.go:334] "Generic (PLEG): container finished" podID="345f52bd-a4c3-4f71-bd23-9141bc780bfb" containerID="89c7d03a4a0c7c6266d9ffd0c9b459aa7111cb412e0ac31e7cbc06099211ce00" exitCode=0 Nov 28 10:38:17 crc kubenswrapper[4838]: I1128 10:38:17.816780 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-tb7vp" event={"ID":"345f52bd-a4c3-4f71-bd23-9141bc780bfb","Type":"ContainerDied","Data":"89c7d03a4a0c7c6266d9ffd0c9b459aa7111cb412e0ac31e7cbc06099211ce00"} Nov 28 10:38:19 crc kubenswrapper[4838]: I1128 10:38:19.331503 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-tb7vp" Nov 28 10:38:19 crc kubenswrapper[4838]: I1128 10:38:19.499556 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/345f52bd-a4c3-4f71-bd23-9141bc780bfb-ceph\") pod \"345f52bd-a4c3-4f71-bd23-9141bc780bfb\" (UID: \"345f52bd-a4c3-4f71-bd23-9141bc780bfb\") " Nov 28 10:38:19 crc kubenswrapper[4838]: I1128 10:38:19.500294 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/345f52bd-a4c3-4f71-bd23-9141bc780bfb-inventory\") pod \"345f52bd-a4c3-4f71-bd23-9141bc780bfb\" (UID: \"345f52bd-a4c3-4f71-bd23-9141bc780bfb\") " Nov 28 10:38:19 crc kubenswrapper[4838]: I1128 10:38:19.501164 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ql2sr\" (UniqueName: \"kubernetes.io/projected/345f52bd-a4c3-4f71-bd23-9141bc780bfb-kube-api-access-ql2sr\") pod \"345f52bd-a4c3-4f71-bd23-9141bc780bfb\" (UID: \"345f52bd-a4c3-4f71-bd23-9141bc780bfb\") " Nov 28 10:38:19 crc kubenswrapper[4838]: I1128 10:38:19.501582 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/345f52bd-a4c3-4f71-bd23-9141bc780bfb-ssh-key\") pod \"345f52bd-a4c3-4f71-bd23-9141bc780bfb\" (UID: \"345f52bd-a4c3-4f71-bd23-9141bc780bfb\") " Nov 28 10:38:19 crc kubenswrapper[4838]: I1128 10:38:19.507645 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/345f52bd-a4c3-4f71-bd23-9141bc780bfb-kube-api-access-ql2sr" (OuterVolumeSpecName: "kube-api-access-ql2sr") pod "345f52bd-a4c3-4f71-bd23-9141bc780bfb" (UID: "345f52bd-a4c3-4f71-bd23-9141bc780bfb"). InnerVolumeSpecName "kube-api-access-ql2sr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:38:19 crc kubenswrapper[4838]: I1128 10:38:19.508292 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/345f52bd-a4c3-4f71-bd23-9141bc780bfb-ceph" (OuterVolumeSpecName: "ceph") pod "345f52bd-a4c3-4f71-bd23-9141bc780bfb" (UID: "345f52bd-a4c3-4f71-bd23-9141bc780bfb"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:38:19 crc kubenswrapper[4838]: I1128 10:38:19.531291 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/345f52bd-a4c3-4f71-bd23-9141bc780bfb-inventory" (OuterVolumeSpecName: "inventory") pod "345f52bd-a4c3-4f71-bd23-9141bc780bfb" (UID: "345f52bd-a4c3-4f71-bd23-9141bc780bfb"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:38:19 crc kubenswrapper[4838]: I1128 10:38:19.532021 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/345f52bd-a4c3-4f71-bd23-9141bc780bfb-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "345f52bd-a4c3-4f71-bd23-9141bc780bfb" (UID: "345f52bd-a4c3-4f71-bd23-9141bc780bfb"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:38:19 crc kubenswrapper[4838]: I1128 10:38:19.604905 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ql2sr\" (UniqueName: \"kubernetes.io/projected/345f52bd-a4c3-4f71-bd23-9141bc780bfb-kube-api-access-ql2sr\") on node \"crc\" DevicePath \"\"" Nov 28 10:38:19 crc kubenswrapper[4838]: I1128 10:38:19.606208 4838 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/345f52bd-a4c3-4f71-bd23-9141bc780bfb-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 28 10:38:19 crc kubenswrapper[4838]: I1128 10:38:19.606394 4838 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/345f52bd-a4c3-4f71-bd23-9141bc780bfb-ceph\") on node \"crc\" DevicePath \"\"" Nov 28 10:38:19 crc kubenswrapper[4838]: I1128 10:38:19.606515 4838 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/345f52bd-a4c3-4f71-bd23-9141bc780bfb-inventory\") on node \"crc\" DevicePath \"\"" Nov 28 10:38:19 crc kubenswrapper[4838]: I1128 10:38:19.841159 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-tb7vp" event={"ID":"345f52bd-a4c3-4f71-bd23-9141bc780bfb","Type":"ContainerDied","Data":"8a9c46af8ca88f7b84ae6fa536d24f18a8b6518b30db37e3b74388df678bb4ce"} Nov 28 10:38:19 crc kubenswrapper[4838]: I1128 10:38:19.841210 4838 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8a9c46af8ca88f7b84ae6fa536d24f18a8b6518b30db37e3b74388df678bb4ce" Nov 28 10:38:19 crc kubenswrapper[4838]: I1128 10:38:19.841263 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-tb7vp" Nov 28 10:38:19 crc kubenswrapper[4838]: I1128 10:38:19.983740 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-zfmqh"] Nov 28 10:38:19 crc kubenswrapper[4838]: E1128 10:38:19.984158 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="345f52bd-a4c3-4f71-bd23-9141bc780bfb" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Nov 28 10:38:19 crc kubenswrapper[4838]: I1128 10:38:19.984178 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="345f52bd-a4c3-4f71-bd23-9141bc780bfb" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Nov 28 10:38:19 crc kubenswrapper[4838]: I1128 10:38:19.984411 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="345f52bd-a4c3-4f71-bd23-9141bc780bfb" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Nov 28 10:38:19 crc kubenswrapper[4838]: I1128 10:38:19.985149 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-zfmqh" Nov 28 10:38:19 crc kubenswrapper[4838]: I1128 10:38:19.988523 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 28 10:38:19 crc kubenswrapper[4838]: I1128 10:38:19.988704 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 28 10:38:19 crc kubenswrapper[4838]: I1128 10:38:19.989075 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-t6dwn" Nov 28 10:38:19 crc kubenswrapper[4838]: I1128 10:38:19.989309 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceph-conf-files" Nov 28 10:38:19 crc kubenswrapper[4838]: I1128 10:38:19.989472 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 28 10:38:20 crc kubenswrapper[4838]: I1128 10:38:20.019128 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-zfmqh"] Nov 28 10:38:20 crc kubenswrapper[4838]: I1128 10:38:20.119933 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/fcf22c85-ed90-47d9-9131-365fa3620686-ceph\") pod \"ssh-known-hosts-edpm-deployment-zfmqh\" (UID: \"fcf22c85-ed90-47d9-9131-365fa3620686\") " pod="openstack/ssh-known-hosts-edpm-deployment-zfmqh" Nov 28 10:38:20 crc kubenswrapper[4838]: I1128 10:38:20.120320 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/fcf22c85-ed90-47d9-9131-365fa3620686-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-zfmqh\" (UID: \"fcf22c85-ed90-47d9-9131-365fa3620686\") " pod="openstack/ssh-known-hosts-edpm-deployment-zfmqh" Nov 28 10:38:20 crc kubenswrapper[4838]: I1128 10:38:20.120469 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/fcf22c85-ed90-47d9-9131-365fa3620686-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-zfmqh\" (UID: \"fcf22c85-ed90-47d9-9131-365fa3620686\") " pod="openstack/ssh-known-hosts-edpm-deployment-zfmqh" Nov 28 10:38:20 crc kubenswrapper[4838]: I1128 10:38:20.120570 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6jdcq\" (UniqueName: \"kubernetes.io/projected/fcf22c85-ed90-47d9-9131-365fa3620686-kube-api-access-6jdcq\") pod \"ssh-known-hosts-edpm-deployment-zfmqh\" (UID: \"fcf22c85-ed90-47d9-9131-365fa3620686\") " pod="openstack/ssh-known-hosts-edpm-deployment-zfmqh" Nov 28 10:38:20 crc kubenswrapper[4838]: I1128 10:38:20.222666 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/fcf22c85-ed90-47d9-9131-365fa3620686-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-zfmqh\" (UID: \"fcf22c85-ed90-47d9-9131-365fa3620686\") " pod="openstack/ssh-known-hosts-edpm-deployment-zfmqh" Nov 28 10:38:20 crc kubenswrapper[4838]: I1128 10:38:20.222762 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6jdcq\" (UniqueName: \"kubernetes.io/projected/fcf22c85-ed90-47d9-9131-365fa3620686-kube-api-access-6jdcq\") pod \"ssh-known-hosts-edpm-deployment-zfmqh\" (UID: \"fcf22c85-ed90-47d9-9131-365fa3620686\") " pod="openstack/ssh-known-hosts-edpm-deployment-zfmqh" Nov 28 10:38:20 crc kubenswrapper[4838]: I1128 10:38:20.222963 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/fcf22c85-ed90-47d9-9131-365fa3620686-ceph\") pod \"ssh-known-hosts-edpm-deployment-zfmqh\" (UID: \"fcf22c85-ed90-47d9-9131-365fa3620686\") " pod="openstack/ssh-known-hosts-edpm-deployment-zfmqh" Nov 28 10:38:20 crc kubenswrapper[4838]: I1128 10:38:20.223075 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/fcf22c85-ed90-47d9-9131-365fa3620686-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-zfmqh\" (UID: \"fcf22c85-ed90-47d9-9131-365fa3620686\") " pod="openstack/ssh-known-hosts-edpm-deployment-zfmqh" Nov 28 10:38:20 crc kubenswrapper[4838]: I1128 10:38:20.228540 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/fcf22c85-ed90-47d9-9131-365fa3620686-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-zfmqh\" (UID: \"fcf22c85-ed90-47d9-9131-365fa3620686\") " pod="openstack/ssh-known-hosts-edpm-deployment-zfmqh" Nov 28 10:38:20 crc kubenswrapper[4838]: I1128 10:38:20.231816 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/fcf22c85-ed90-47d9-9131-365fa3620686-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-zfmqh\" (UID: \"fcf22c85-ed90-47d9-9131-365fa3620686\") " pod="openstack/ssh-known-hosts-edpm-deployment-zfmqh" Nov 28 10:38:20 crc kubenswrapper[4838]: I1128 10:38:20.237900 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/fcf22c85-ed90-47d9-9131-365fa3620686-ceph\") pod \"ssh-known-hosts-edpm-deployment-zfmqh\" (UID: \"fcf22c85-ed90-47d9-9131-365fa3620686\") " pod="openstack/ssh-known-hosts-edpm-deployment-zfmqh" Nov 28 10:38:20 crc kubenswrapper[4838]: I1128 10:38:20.252827 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6jdcq\" (UniqueName: \"kubernetes.io/projected/fcf22c85-ed90-47d9-9131-365fa3620686-kube-api-access-6jdcq\") pod \"ssh-known-hosts-edpm-deployment-zfmqh\" (UID: \"fcf22c85-ed90-47d9-9131-365fa3620686\") " pod="openstack/ssh-known-hosts-edpm-deployment-zfmqh" Nov 28 10:38:20 crc kubenswrapper[4838]: I1128 10:38:20.312359 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-zfmqh" Nov 28 10:38:20 crc kubenswrapper[4838]: I1128 10:38:20.562045 4838 scope.go:117] "RemoveContainer" containerID="eb7c3a5fb0c9835207f14cd442a9c164c2c7763a8bc95f818d99864ca09179f7" Nov 28 10:38:20 crc kubenswrapper[4838]: E1128 10:38:20.562538 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-5dxdd_openshift-machine-config-operator(5c3daa53-8c4e-4e30-aeba-146602dd45cd)\"" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" Nov 28 10:38:20 crc kubenswrapper[4838]: I1128 10:38:20.945513 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-zfmqh"] Nov 28 10:38:21 crc kubenswrapper[4838]: I1128 10:38:21.862565 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-zfmqh" event={"ID":"fcf22c85-ed90-47d9-9131-365fa3620686","Type":"ContainerStarted","Data":"d812910f931e396671a7b1938c2bdc2d2cbb0c8e1b55a239d46f91ca58890f09"} Nov 28 10:38:21 crc kubenswrapper[4838]: I1128 10:38:21.863112 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-zfmqh" event={"ID":"fcf22c85-ed90-47d9-9131-365fa3620686","Type":"ContainerStarted","Data":"a9679c43849204da6765ce83f382ed21332949a461cecaaf219a728d87743dc4"} Nov 28 10:38:21 crc kubenswrapper[4838]: I1128 10:38:21.897369 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ssh-known-hosts-edpm-deployment-zfmqh" podStartSLOduration=2.403577887 podStartE2EDuration="2.897338972s" podCreationTimestamp="2025-11-28 10:38:19 +0000 UTC" firstStartedPulling="2025-11-28 10:38:20.956689476 +0000 UTC m=+2472.655663686" lastFinishedPulling="2025-11-28 10:38:21.450450561 +0000 UTC m=+2473.149424771" observedRunningTime="2025-11-28 10:38:21.886877798 +0000 UTC m=+2473.585852018" watchObservedRunningTime="2025-11-28 10:38:21.897338972 +0000 UTC m=+2473.596313172" Nov 28 10:38:33 crc kubenswrapper[4838]: I1128 10:38:33.562666 4838 scope.go:117] "RemoveContainer" containerID="eb7c3a5fb0c9835207f14cd442a9c164c2c7763a8bc95f818d99864ca09179f7" Nov 28 10:38:33 crc kubenswrapper[4838]: E1128 10:38:33.563699 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-5dxdd_openshift-machine-config-operator(5c3daa53-8c4e-4e30-aeba-146602dd45cd)\"" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" Nov 28 10:38:34 crc kubenswrapper[4838]: I1128 10:38:34.002477 4838 generic.go:334] "Generic (PLEG): container finished" podID="fcf22c85-ed90-47d9-9131-365fa3620686" containerID="d812910f931e396671a7b1938c2bdc2d2cbb0c8e1b55a239d46f91ca58890f09" exitCode=0 Nov 28 10:38:34 crc kubenswrapper[4838]: I1128 10:38:34.002551 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-zfmqh" event={"ID":"fcf22c85-ed90-47d9-9131-365fa3620686","Type":"ContainerDied","Data":"d812910f931e396671a7b1938c2bdc2d2cbb0c8e1b55a239d46f91ca58890f09"} Nov 28 10:38:35 crc kubenswrapper[4838]: I1128 10:38:35.532963 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-zfmqh" Nov 28 10:38:35 crc kubenswrapper[4838]: I1128 10:38:35.596740 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6jdcq\" (UniqueName: \"kubernetes.io/projected/fcf22c85-ed90-47d9-9131-365fa3620686-kube-api-access-6jdcq\") pod \"fcf22c85-ed90-47d9-9131-365fa3620686\" (UID: \"fcf22c85-ed90-47d9-9131-365fa3620686\") " Nov 28 10:38:35 crc kubenswrapper[4838]: I1128 10:38:35.596897 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/fcf22c85-ed90-47d9-9131-365fa3620686-inventory-0\") pod \"fcf22c85-ed90-47d9-9131-365fa3620686\" (UID: \"fcf22c85-ed90-47d9-9131-365fa3620686\") " Nov 28 10:38:35 crc kubenswrapper[4838]: I1128 10:38:35.597098 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/fcf22c85-ed90-47d9-9131-365fa3620686-ssh-key-openstack-edpm-ipam\") pod \"fcf22c85-ed90-47d9-9131-365fa3620686\" (UID: \"fcf22c85-ed90-47d9-9131-365fa3620686\") " Nov 28 10:38:35 crc kubenswrapper[4838]: I1128 10:38:35.597221 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/fcf22c85-ed90-47d9-9131-365fa3620686-ceph\") pod \"fcf22c85-ed90-47d9-9131-365fa3620686\" (UID: \"fcf22c85-ed90-47d9-9131-365fa3620686\") " Nov 28 10:38:35 crc kubenswrapper[4838]: I1128 10:38:35.604206 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fcf22c85-ed90-47d9-9131-365fa3620686-kube-api-access-6jdcq" (OuterVolumeSpecName: "kube-api-access-6jdcq") pod "fcf22c85-ed90-47d9-9131-365fa3620686" (UID: "fcf22c85-ed90-47d9-9131-365fa3620686"). InnerVolumeSpecName "kube-api-access-6jdcq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:38:35 crc kubenswrapper[4838]: I1128 10:38:35.604753 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fcf22c85-ed90-47d9-9131-365fa3620686-ceph" (OuterVolumeSpecName: "ceph") pod "fcf22c85-ed90-47d9-9131-365fa3620686" (UID: "fcf22c85-ed90-47d9-9131-365fa3620686"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:38:35 crc kubenswrapper[4838]: I1128 10:38:35.640939 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fcf22c85-ed90-47d9-9131-365fa3620686-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "fcf22c85-ed90-47d9-9131-365fa3620686" (UID: "fcf22c85-ed90-47d9-9131-365fa3620686"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:38:35 crc kubenswrapper[4838]: I1128 10:38:35.646600 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fcf22c85-ed90-47d9-9131-365fa3620686-inventory-0" (OuterVolumeSpecName: "inventory-0") pod "fcf22c85-ed90-47d9-9131-365fa3620686" (UID: "fcf22c85-ed90-47d9-9131-365fa3620686"). InnerVolumeSpecName "inventory-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:38:35 crc kubenswrapper[4838]: I1128 10:38:35.700485 4838 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/fcf22c85-ed90-47d9-9131-365fa3620686-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Nov 28 10:38:35 crc kubenswrapper[4838]: I1128 10:38:35.700875 4838 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/fcf22c85-ed90-47d9-9131-365fa3620686-ceph\") on node \"crc\" DevicePath \"\"" Nov 28 10:38:35 crc kubenswrapper[4838]: I1128 10:38:35.700890 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6jdcq\" (UniqueName: \"kubernetes.io/projected/fcf22c85-ed90-47d9-9131-365fa3620686-kube-api-access-6jdcq\") on node \"crc\" DevicePath \"\"" Nov 28 10:38:35 crc kubenswrapper[4838]: I1128 10:38:35.700901 4838 reconciler_common.go:293] "Volume detached for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/fcf22c85-ed90-47d9-9131-365fa3620686-inventory-0\") on node \"crc\" DevicePath \"\"" Nov 28 10:38:36 crc kubenswrapper[4838]: I1128 10:38:36.028651 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-zfmqh" event={"ID":"fcf22c85-ed90-47d9-9131-365fa3620686","Type":"ContainerDied","Data":"a9679c43849204da6765ce83f382ed21332949a461cecaaf219a728d87743dc4"} Nov 28 10:38:36 crc kubenswrapper[4838]: I1128 10:38:36.028752 4838 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a9679c43849204da6765ce83f382ed21332949a461cecaaf219a728d87743dc4" Nov 28 10:38:36 crc kubenswrapper[4838]: I1128 10:38:36.028791 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-zfmqh" Nov 28 10:38:36 crc kubenswrapper[4838]: I1128 10:38:36.128171 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-f8h25"] Nov 28 10:38:36 crc kubenswrapper[4838]: E1128 10:38:36.128659 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fcf22c85-ed90-47d9-9131-365fa3620686" containerName="ssh-known-hosts-edpm-deployment" Nov 28 10:38:36 crc kubenswrapper[4838]: I1128 10:38:36.128686 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="fcf22c85-ed90-47d9-9131-365fa3620686" containerName="ssh-known-hosts-edpm-deployment" Nov 28 10:38:36 crc kubenswrapper[4838]: I1128 10:38:36.128963 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="fcf22c85-ed90-47d9-9131-365fa3620686" containerName="ssh-known-hosts-edpm-deployment" Nov 28 10:38:36 crc kubenswrapper[4838]: I1128 10:38:36.130425 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-f8h25" Nov 28 10:38:36 crc kubenswrapper[4838]: I1128 10:38:36.134866 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 28 10:38:36 crc kubenswrapper[4838]: I1128 10:38:36.135073 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 28 10:38:36 crc kubenswrapper[4838]: I1128 10:38:36.135237 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceph-conf-files" Nov 28 10:38:36 crc kubenswrapper[4838]: I1128 10:38:36.135517 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-t6dwn" Nov 28 10:38:36 crc kubenswrapper[4838]: I1128 10:38:36.135813 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 28 10:38:36 crc kubenswrapper[4838]: I1128 10:38:36.145341 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-f8h25"] Nov 28 10:38:36 crc kubenswrapper[4838]: I1128 10:38:36.210337 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ea469a90-76a4-4712-90a7-af038c331ee2-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-f8h25\" (UID: \"ea469a90-76a4-4712-90a7-af038c331ee2\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-f8h25" Nov 28 10:38:36 crc kubenswrapper[4838]: I1128 10:38:36.210465 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/ea469a90-76a4-4712-90a7-af038c331ee2-ceph\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-f8h25\" (UID: \"ea469a90-76a4-4712-90a7-af038c331ee2\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-f8h25" Nov 28 10:38:36 crc kubenswrapper[4838]: I1128 10:38:36.210498 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ea469a90-76a4-4712-90a7-af038c331ee2-ssh-key\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-f8h25\" (UID: \"ea469a90-76a4-4712-90a7-af038c331ee2\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-f8h25" Nov 28 10:38:36 crc kubenswrapper[4838]: I1128 10:38:36.210572 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pbfwl\" (UniqueName: \"kubernetes.io/projected/ea469a90-76a4-4712-90a7-af038c331ee2-kube-api-access-pbfwl\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-f8h25\" (UID: \"ea469a90-76a4-4712-90a7-af038c331ee2\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-f8h25" Nov 28 10:38:36 crc kubenswrapper[4838]: I1128 10:38:36.312504 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ea469a90-76a4-4712-90a7-af038c331ee2-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-f8h25\" (UID: \"ea469a90-76a4-4712-90a7-af038c331ee2\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-f8h25" Nov 28 10:38:36 crc kubenswrapper[4838]: I1128 10:38:36.312745 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/ea469a90-76a4-4712-90a7-af038c331ee2-ceph\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-f8h25\" (UID: \"ea469a90-76a4-4712-90a7-af038c331ee2\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-f8h25" Nov 28 10:38:36 crc kubenswrapper[4838]: I1128 10:38:36.312815 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ea469a90-76a4-4712-90a7-af038c331ee2-ssh-key\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-f8h25\" (UID: \"ea469a90-76a4-4712-90a7-af038c331ee2\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-f8h25" Nov 28 10:38:36 crc kubenswrapper[4838]: I1128 10:38:36.312950 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pbfwl\" (UniqueName: \"kubernetes.io/projected/ea469a90-76a4-4712-90a7-af038c331ee2-kube-api-access-pbfwl\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-f8h25\" (UID: \"ea469a90-76a4-4712-90a7-af038c331ee2\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-f8h25" Nov 28 10:38:36 crc kubenswrapper[4838]: I1128 10:38:36.318499 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ea469a90-76a4-4712-90a7-af038c331ee2-ssh-key\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-f8h25\" (UID: \"ea469a90-76a4-4712-90a7-af038c331ee2\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-f8h25" Nov 28 10:38:36 crc kubenswrapper[4838]: I1128 10:38:36.319512 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/ea469a90-76a4-4712-90a7-af038c331ee2-ceph\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-f8h25\" (UID: \"ea469a90-76a4-4712-90a7-af038c331ee2\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-f8h25" Nov 28 10:38:36 crc kubenswrapper[4838]: I1128 10:38:36.326818 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ea469a90-76a4-4712-90a7-af038c331ee2-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-f8h25\" (UID: \"ea469a90-76a4-4712-90a7-af038c331ee2\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-f8h25" Nov 28 10:38:36 crc kubenswrapper[4838]: I1128 10:38:36.329558 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pbfwl\" (UniqueName: \"kubernetes.io/projected/ea469a90-76a4-4712-90a7-af038c331ee2-kube-api-access-pbfwl\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-f8h25\" (UID: \"ea469a90-76a4-4712-90a7-af038c331ee2\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-f8h25" Nov 28 10:38:36 crc kubenswrapper[4838]: I1128 10:38:36.454654 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-f8h25" Nov 28 10:38:37 crc kubenswrapper[4838]: I1128 10:38:37.030410 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-f8h25"] Nov 28 10:38:37 crc kubenswrapper[4838]: W1128 10:38:37.039426 4838 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podea469a90_76a4_4712_90a7_af038c331ee2.slice/crio-5eac76d1276354247f624e1cde86dfe05c342bec649e07cd8ba0b2a209633435 WatchSource:0}: Error finding container 5eac76d1276354247f624e1cde86dfe05c342bec649e07cd8ba0b2a209633435: Status 404 returned error can't find the container with id 5eac76d1276354247f624e1cde86dfe05c342bec649e07cd8ba0b2a209633435 Nov 28 10:38:38 crc kubenswrapper[4838]: I1128 10:38:38.056204 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-f8h25" event={"ID":"ea469a90-76a4-4712-90a7-af038c331ee2","Type":"ContainerStarted","Data":"5eac76d1276354247f624e1cde86dfe05c342bec649e07cd8ba0b2a209633435"} Nov 28 10:38:39 crc kubenswrapper[4838]: I1128 10:38:39.066040 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-f8h25" event={"ID":"ea469a90-76a4-4712-90a7-af038c331ee2","Type":"ContainerStarted","Data":"ab471809d0785a19856ba5c811076c978141a25c05311c97b87956c2fcc764ee"} Nov 28 10:38:39 crc kubenswrapper[4838]: I1128 10:38:39.096276 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-f8h25" podStartSLOduration=2.067348486 podStartE2EDuration="3.096262322s" podCreationTimestamp="2025-11-28 10:38:36 +0000 UTC" firstStartedPulling="2025-11-28 10:38:37.042332015 +0000 UTC m=+2488.741306225" lastFinishedPulling="2025-11-28 10:38:38.071245861 +0000 UTC m=+2489.770220061" observedRunningTime="2025-11-28 10:38:39.09216955 +0000 UTC m=+2490.791143720" watchObservedRunningTime="2025-11-28 10:38:39.096262322 +0000 UTC m=+2490.795236492" Nov 28 10:38:44 crc kubenswrapper[4838]: I1128 10:38:44.562549 4838 scope.go:117] "RemoveContainer" containerID="eb7c3a5fb0c9835207f14cd442a9c164c2c7763a8bc95f818d99864ca09179f7" Nov 28 10:38:44 crc kubenswrapper[4838]: E1128 10:38:44.564497 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-5dxdd_openshift-machine-config-operator(5c3daa53-8c4e-4e30-aeba-146602dd45cd)\"" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" Nov 28 10:38:47 crc kubenswrapper[4838]: I1128 10:38:47.154314 4838 generic.go:334] "Generic (PLEG): container finished" podID="ea469a90-76a4-4712-90a7-af038c331ee2" containerID="ab471809d0785a19856ba5c811076c978141a25c05311c97b87956c2fcc764ee" exitCode=0 Nov 28 10:38:47 crc kubenswrapper[4838]: I1128 10:38:47.154426 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-f8h25" event={"ID":"ea469a90-76a4-4712-90a7-af038c331ee2","Type":"ContainerDied","Data":"ab471809d0785a19856ba5c811076c978141a25c05311c97b87956c2fcc764ee"} Nov 28 10:38:48 crc kubenswrapper[4838]: I1128 10:38:48.607676 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-f8h25" Nov 28 10:38:48 crc kubenswrapper[4838]: I1128 10:38:48.676552 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/ea469a90-76a4-4712-90a7-af038c331ee2-ceph\") pod \"ea469a90-76a4-4712-90a7-af038c331ee2\" (UID: \"ea469a90-76a4-4712-90a7-af038c331ee2\") " Nov 28 10:38:48 crc kubenswrapper[4838]: I1128 10:38:48.676962 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ea469a90-76a4-4712-90a7-af038c331ee2-inventory\") pod \"ea469a90-76a4-4712-90a7-af038c331ee2\" (UID: \"ea469a90-76a4-4712-90a7-af038c331ee2\") " Nov 28 10:38:48 crc kubenswrapper[4838]: I1128 10:38:48.677127 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pbfwl\" (UniqueName: \"kubernetes.io/projected/ea469a90-76a4-4712-90a7-af038c331ee2-kube-api-access-pbfwl\") pod \"ea469a90-76a4-4712-90a7-af038c331ee2\" (UID: \"ea469a90-76a4-4712-90a7-af038c331ee2\") " Nov 28 10:38:48 crc kubenswrapper[4838]: I1128 10:38:48.677228 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ea469a90-76a4-4712-90a7-af038c331ee2-ssh-key\") pod \"ea469a90-76a4-4712-90a7-af038c331ee2\" (UID: \"ea469a90-76a4-4712-90a7-af038c331ee2\") " Nov 28 10:38:48 crc kubenswrapper[4838]: I1128 10:38:48.685887 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ea469a90-76a4-4712-90a7-af038c331ee2-ceph" (OuterVolumeSpecName: "ceph") pod "ea469a90-76a4-4712-90a7-af038c331ee2" (UID: "ea469a90-76a4-4712-90a7-af038c331ee2"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:38:48 crc kubenswrapper[4838]: I1128 10:38:48.685894 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ea469a90-76a4-4712-90a7-af038c331ee2-kube-api-access-pbfwl" (OuterVolumeSpecName: "kube-api-access-pbfwl") pod "ea469a90-76a4-4712-90a7-af038c331ee2" (UID: "ea469a90-76a4-4712-90a7-af038c331ee2"). InnerVolumeSpecName "kube-api-access-pbfwl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:38:48 crc kubenswrapper[4838]: I1128 10:38:48.701104 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ea469a90-76a4-4712-90a7-af038c331ee2-inventory" (OuterVolumeSpecName: "inventory") pod "ea469a90-76a4-4712-90a7-af038c331ee2" (UID: "ea469a90-76a4-4712-90a7-af038c331ee2"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:38:48 crc kubenswrapper[4838]: I1128 10:38:48.701996 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ea469a90-76a4-4712-90a7-af038c331ee2-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "ea469a90-76a4-4712-90a7-af038c331ee2" (UID: "ea469a90-76a4-4712-90a7-af038c331ee2"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:38:48 crc kubenswrapper[4838]: I1128 10:38:48.779982 4838 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/ea469a90-76a4-4712-90a7-af038c331ee2-ceph\") on node \"crc\" DevicePath \"\"" Nov 28 10:38:48 crc kubenswrapper[4838]: I1128 10:38:48.780022 4838 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ea469a90-76a4-4712-90a7-af038c331ee2-inventory\") on node \"crc\" DevicePath \"\"" Nov 28 10:38:48 crc kubenswrapper[4838]: I1128 10:38:48.780036 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pbfwl\" (UniqueName: \"kubernetes.io/projected/ea469a90-76a4-4712-90a7-af038c331ee2-kube-api-access-pbfwl\") on node \"crc\" DevicePath \"\"" Nov 28 10:38:48 crc kubenswrapper[4838]: I1128 10:38:48.780047 4838 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ea469a90-76a4-4712-90a7-af038c331ee2-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 28 10:38:49 crc kubenswrapper[4838]: I1128 10:38:49.179220 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-f8h25" event={"ID":"ea469a90-76a4-4712-90a7-af038c331ee2","Type":"ContainerDied","Data":"5eac76d1276354247f624e1cde86dfe05c342bec649e07cd8ba0b2a209633435"} Nov 28 10:38:49 crc kubenswrapper[4838]: I1128 10:38:49.179278 4838 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5eac76d1276354247f624e1cde86dfe05c342bec649e07cd8ba0b2a209633435" Nov 28 10:38:49 crc kubenswrapper[4838]: I1128 10:38:49.179306 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-f8h25" Nov 28 10:38:49 crc kubenswrapper[4838]: I1128 10:38:49.299793 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-p5n7c"] Nov 28 10:38:49 crc kubenswrapper[4838]: E1128 10:38:49.300402 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ea469a90-76a4-4712-90a7-af038c331ee2" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Nov 28 10:38:49 crc kubenswrapper[4838]: I1128 10:38:49.300435 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="ea469a90-76a4-4712-90a7-af038c331ee2" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Nov 28 10:38:49 crc kubenswrapper[4838]: I1128 10:38:49.300867 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="ea469a90-76a4-4712-90a7-af038c331ee2" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Nov 28 10:38:49 crc kubenswrapper[4838]: I1128 10:38:49.301880 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-p5n7c" Nov 28 10:38:49 crc kubenswrapper[4838]: I1128 10:38:49.308025 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 28 10:38:49 crc kubenswrapper[4838]: I1128 10:38:49.308219 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-t6dwn" Nov 28 10:38:49 crc kubenswrapper[4838]: I1128 10:38:49.308802 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceph-conf-files" Nov 28 10:38:49 crc kubenswrapper[4838]: I1128 10:38:49.308920 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 28 10:38:49 crc kubenswrapper[4838]: I1128 10:38:49.313211 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 28 10:38:49 crc kubenswrapper[4838]: I1128 10:38:49.318796 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-p5n7c"] Nov 28 10:38:49 crc kubenswrapper[4838]: I1128 10:38:49.391785 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/801ebab7-c9de-423e-910d-32e56be5cc7b-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-p5n7c\" (UID: \"801ebab7-c9de-423e-910d-32e56be5cc7b\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-p5n7c" Nov 28 10:38:49 crc kubenswrapper[4838]: I1128 10:38:49.391837 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jffbh\" (UniqueName: \"kubernetes.io/projected/801ebab7-c9de-423e-910d-32e56be5cc7b-kube-api-access-jffbh\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-p5n7c\" (UID: \"801ebab7-c9de-423e-910d-32e56be5cc7b\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-p5n7c" Nov 28 10:38:49 crc kubenswrapper[4838]: I1128 10:38:49.391891 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/801ebab7-c9de-423e-910d-32e56be5cc7b-ceph\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-p5n7c\" (UID: \"801ebab7-c9de-423e-910d-32e56be5cc7b\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-p5n7c" Nov 28 10:38:49 crc kubenswrapper[4838]: I1128 10:38:49.391947 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/801ebab7-c9de-423e-910d-32e56be5cc7b-ssh-key\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-p5n7c\" (UID: \"801ebab7-c9de-423e-910d-32e56be5cc7b\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-p5n7c" Nov 28 10:38:49 crc kubenswrapper[4838]: I1128 10:38:49.493857 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/801ebab7-c9de-423e-910d-32e56be5cc7b-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-p5n7c\" (UID: \"801ebab7-c9de-423e-910d-32e56be5cc7b\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-p5n7c" Nov 28 10:38:49 crc kubenswrapper[4838]: I1128 10:38:49.493951 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jffbh\" (UniqueName: \"kubernetes.io/projected/801ebab7-c9de-423e-910d-32e56be5cc7b-kube-api-access-jffbh\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-p5n7c\" (UID: \"801ebab7-c9de-423e-910d-32e56be5cc7b\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-p5n7c" Nov 28 10:38:49 crc kubenswrapper[4838]: I1128 10:38:49.494041 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/801ebab7-c9de-423e-910d-32e56be5cc7b-ceph\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-p5n7c\" (UID: \"801ebab7-c9de-423e-910d-32e56be5cc7b\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-p5n7c" Nov 28 10:38:49 crc kubenswrapper[4838]: I1128 10:38:49.494154 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/801ebab7-c9de-423e-910d-32e56be5cc7b-ssh-key\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-p5n7c\" (UID: \"801ebab7-c9de-423e-910d-32e56be5cc7b\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-p5n7c" Nov 28 10:38:49 crc kubenswrapper[4838]: I1128 10:38:49.499952 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/801ebab7-c9de-423e-910d-32e56be5cc7b-ssh-key\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-p5n7c\" (UID: \"801ebab7-c9de-423e-910d-32e56be5cc7b\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-p5n7c" Nov 28 10:38:49 crc kubenswrapper[4838]: I1128 10:38:49.500162 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/801ebab7-c9de-423e-910d-32e56be5cc7b-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-p5n7c\" (UID: \"801ebab7-c9de-423e-910d-32e56be5cc7b\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-p5n7c" Nov 28 10:38:49 crc kubenswrapper[4838]: I1128 10:38:49.503837 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/801ebab7-c9de-423e-910d-32e56be5cc7b-ceph\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-p5n7c\" (UID: \"801ebab7-c9de-423e-910d-32e56be5cc7b\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-p5n7c" Nov 28 10:38:49 crc kubenswrapper[4838]: I1128 10:38:49.515930 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jffbh\" (UniqueName: \"kubernetes.io/projected/801ebab7-c9de-423e-910d-32e56be5cc7b-kube-api-access-jffbh\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-p5n7c\" (UID: \"801ebab7-c9de-423e-910d-32e56be5cc7b\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-p5n7c" Nov 28 10:38:49 crc kubenswrapper[4838]: I1128 10:38:49.625255 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-p5n7c" Nov 28 10:38:50 crc kubenswrapper[4838]: I1128 10:38:50.013183 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-p5n7c"] Nov 28 10:38:50 crc kubenswrapper[4838]: I1128 10:38:50.189868 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-p5n7c" event={"ID":"801ebab7-c9de-423e-910d-32e56be5cc7b","Type":"ContainerStarted","Data":"021385c0a83c3a1da03f0ad91b997c88c508103c493ca7bb15bde15b7581c5e5"} Nov 28 10:38:51 crc kubenswrapper[4838]: I1128 10:38:51.213591 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-p5n7c" event={"ID":"801ebab7-c9de-423e-910d-32e56be5cc7b","Type":"ContainerStarted","Data":"65a725483b156c8886e19a1adc6aaf72cbe38beca984f3e8569d0eb2ca1db77e"} Nov 28 10:38:51 crc kubenswrapper[4838]: I1128 10:38:51.248613 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-p5n7c" podStartSLOduration=1.649725369 podStartE2EDuration="2.248587172s" podCreationTimestamp="2025-11-28 10:38:49 +0000 UTC" firstStartedPulling="2025-11-28 10:38:50.013392677 +0000 UTC m=+2501.712366857" lastFinishedPulling="2025-11-28 10:38:50.61225448 +0000 UTC m=+2502.311228660" observedRunningTime="2025-11-28 10:38:51.235089665 +0000 UTC m=+2502.934063875" watchObservedRunningTime="2025-11-28 10:38:51.248587172 +0000 UTC m=+2502.947561372" Nov 28 10:38:55 crc kubenswrapper[4838]: I1128 10:38:55.561697 4838 scope.go:117] "RemoveContainer" containerID="eb7c3a5fb0c9835207f14cd442a9c164c2c7763a8bc95f818d99864ca09179f7" Nov 28 10:38:55 crc kubenswrapper[4838]: E1128 10:38:55.562528 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-5dxdd_openshift-machine-config-operator(5c3daa53-8c4e-4e30-aeba-146602dd45cd)\"" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" Nov 28 10:39:02 crc kubenswrapper[4838]: I1128 10:39:02.343912 4838 generic.go:334] "Generic (PLEG): container finished" podID="801ebab7-c9de-423e-910d-32e56be5cc7b" containerID="65a725483b156c8886e19a1adc6aaf72cbe38beca984f3e8569d0eb2ca1db77e" exitCode=0 Nov 28 10:39:02 crc kubenswrapper[4838]: I1128 10:39:02.344037 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-p5n7c" event={"ID":"801ebab7-c9de-423e-910d-32e56be5cc7b","Type":"ContainerDied","Data":"65a725483b156c8886e19a1adc6aaf72cbe38beca984f3e8569d0eb2ca1db77e"} Nov 28 10:39:03 crc kubenswrapper[4838]: I1128 10:39:03.905972 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-p5n7c" Nov 28 10:39:04 crc kubenswrapper[4838]: I1128 10:39:04.033819 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/801ebab7-c9de-423e-910d-32e56be5cc7b-ceph\") pod \"801ebab7-c9de-423e-910d-32e56be5cc7b\" (UID: \"801ebab7-c9de-423e-910d-32e56be5cc7b\") " Nov 28 10:39:04 crc kubenswrapper[4838]: I1128 10:39:04.033927 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jffbh\" (UniqueName: \"kubernetes.io/projected/801ebab7-c9de-423e-910d-32e56be5cc7b-kube-api-access-jffbh\") pod \"801ebab7-c9de-423e-910d-32e56be5cc7b\" (UID: \"801ebab7-c9de-423e-910d-32e56be5cc7b\") " Nov 28 10:39:04 crc kubenswrapper[4838]: I1128 10:39:04.033986 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/801ebab7-c9de-423e-910d-32e56be5cc7b-ssh-key\") pod \"801ebab7-c9de-423e-910d-32e56be5cc7b\" (UID: \"801ebab7-c9de-423e-910d-32e56be5cc7b\") " Nov 28 10:39:04 crc kubenswrapper[4838]: I1128 10:39:04.034037 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/801ebab7-c9de-423e-910d-32e56be5cc7b-inventory\") pod \"801ebab7-c9de-423e-910d-32e56be5cc7b\" (UID: \"801ebab7-c9de-423e-910d-32e56be5cc7b\") " Nov 28 10:39:04 crc kubenswrapper[4838]: I1128 10:39:04.045921 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/801ebab7-c9de-423e-910d-32e56be5cc7b-ceph" (OuterVolumeSpecName: "ceph") pod "801ebab7-c9de-423e-910d-32e56be5cc7b" (UID: "801ebab7-c9de-423e-910d-32e56be5cc7b"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:39:04 crc kubenswrapper[4838]: I1128 10:39:04.068215 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/801ebab7-c9de-423e-910d-32e56be5cc7b-kube-api-access-jffbh" (OuterVolumeSpecName: "kube-api-access-jffbh") pod "801ebab7-c9de-423e-910d-32e56be5cc7b" (UID: "801ebab7-c9de-423e-910d-32e56be5cc7b"). InnerVolumeSpecName "kube-api-access-jffbh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:39:04 crc kubenswrapper[4838]: I1128 10:39:04.080740 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/801ebab7-c9de-423e-910d-32e56be5cc7b-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "801ebab7-c9de-423e-910d-32e56be5cc7b" (UID: "801ebab7-c9de-423e-910d-32e56be5cc7b"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:39:04 crc kubenswrapper[4838]: I1128 10:39:04.081825 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/801ebab7-c9de-423e-910d-32e56be5cc7b-inventory" (OuterVolumeSpecName: "inventory") pod "801ebab7-c9de-423e-910d-32e56be5cc7b" (UID: "801ebab7-c9de-423e-910d-32e56be5cc7b"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:39:04 crc kubenswrapper[4838]: I1128 10:39:04.137776 4838 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/801ebab7-c9de-423e-910d-32e56be5cc7b-ceph\") on node \"crc\" DevicePath \"\"" Nov 28 10:39:04 crc kubenswrapper[4838]: I1128 10:39:04.137813 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jffbh\" (UniqueName: \"kubernetes.io/projected/801ebab7-c9de-423e-910d-32e56be5cc7b-kube-api-access-jffbh\") on node \"crc\" DevicePath \"\"" Nov 28 10:39:04 crc kubenswrapper[4838]: I1128 10:39:04.137828 4838 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/801ebab7-c9de-423e-910d-32e56be5cc7b-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 28 10:39:04 crc kubenswrapper[4838]: I1128 10:39:04.137838 4838 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/801ebab7-c9de-423e-910d-32e56be5cc7b-inventory\") on node \"crc\" DevicePath \"\"" Nov 28 10:39:04 crc kubenswrapper[4838]: I1128 10:39:04.370237 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-p5n7c" event={"ID":"801ebab7-c9de-423e-910d-32e56be5cc7b","Type":"ContainerDied","Data":"021385c0a83c3a1da03f0ad91b997c88c508103c493ca7bb15bde15b7581c5e5"} Nov 28 10:39:04 crc kubenswrapper[4838]: I1128 10:39:04.370307 4838 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="021385c0a83c3a1da03f0ad91b997c88c508103c493ca7bb15bde15b7581c5e5" Nov 28 10:39:04 crc kubenswrapper[4838]: I1128 10:39:04.370312 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-p5n7c" Nov 28 10:39:04 crc kubenswrapper[4838]: I1128 10:39:04.473860 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/install-certs-edpm-deployment-openstack-edpm-ipam-bbz46"] Nov 28 10:39:04 crc kubenswrapper[4838]: E1128 10:39:04.474633 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="801ebab7-c9de-423e-910d-32e56be5cc7b" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Nov 28 10:39:04 crc kubenswrapper[4838]: I1128 10:39:04.474669 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="801ebab7-c9de-423e-910d-32e56be5cc7b" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Nov 28 10:39:04 crc kubenswrapper[4838]: I1128 10:39:04.475139 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="801ebab7-c9de-423e-910d-32e56be5cc7b" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Nov 28 10:39:04 crc kubenswrapper[4838]: I1128 10:39:04.476439 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-bbz46" Nov 28 10:39:04 crc kubenswrapper[4838]: I1128 10:39:04.478528 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 28 10:39:04 crc kubenswrapper[4838]: I1128 10:39:04.478563 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-t6dwn" Nov 28 10:39:04 crc kubenswrapper[4838]: I1128 10:39:04.479429 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 28 10:39:04 crc kubenswrapper[4838]: I1128 10:39:04.479462 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 28 10:39:04 crc kubenswrapper[4838]: I1128 10:39:04.479992 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-libvirt-default-certs-0" Nov 28 10:39:04 crc kubenswrapper[4838]: I1128 10:39:04.480249 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-ovn-default-certs-0" Nov 28 10:39:04 crc kubenswrapper[4838]: I1128 10:39:04.480527 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-neutron-metadata-default-certs-0" Nov 28 10:39:04 crc kubenswrapper[4838]: I1128 10:39:04.483180 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceph-conf-files" Nov 28 10:39:04 crc kubenswrapper[4838]: I1128 10:39:04.489259 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-certs-edpm-deployment-openstack-edpm-ipam-bbz46"] Nov 28 10:39:04 crc kubenswrapper[4838]: I1128 10:39:04.650130 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/145f5aa8-896b-4b3c-846d-e896d932097d-ssh-key\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-bbz46\" (UID: \"145f5aa8-896b-4b3c-846d-e896d932097d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-bbz46" Nov 28 10:39:04 crc kubenswrapper[4838]: I1128 10:39:04.650214 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/145f5aa8-896b-4b3c-846d-e896d932097d-neutron-metadata-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-bbz46\" (UID: \"145f5aa8-896b-4b3c-846d-e896d932097d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-bbz46" Nov 28 10:39:04 crc kubenswrapper[4838]: I1128 10:39:04.650287 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/145f5aa8-896b-4b3c-846d-e896d932097d-openstack-edpm-ipam-ovn-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-bbz46\" (UID: \"145f5aa8-896b-4b3c-846d-e896d932097d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-bbz46" Nov 28 10:39:04 crc kubenswrapper[4838]: I1128 10:39:04.650416 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/145f5aa8-896b-4b3c-846d-e896d932097d-nova-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-bbz46\" (UID: \"145f5aa8-896b-4b3c-846d-e896d932097d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-bbz46" Nov 28 10:39:04 crc kubenswrapper[4838]: I1128 10:39:04.650485 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/145f5aa8-896b-4b3c-846d-e896d932097d-repo-setup-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-bbz46\" (UID: \"145f5aa8-896b-4b3c-846d-e896d932097d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-bbz46" Nov 28 10:39:04 crc kubenswrapper[4838]: I1128 10:39:04.650679 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/145f5aa8-896b-4b3c-846d-e896d932097d-libvirt-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-bbz46\" (UID: \"145f5aa8-896b-4b3c-846d-e896d932097d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-bbz46" Nov 28 10:39:04 crc kubenswrapper[4838]: I1128 10:39:04.650906 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/145f5aa8-896b-4b3c-846d-e896d932097d-inventory\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-bbz46\" (UID: \"145f5aa8-896b-4b3c-846d-e896d932097d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-bbz46" Nov 28 10:39:04 crc kubenswrapper[4838]: I1128 10:39:04.651089 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/145f5aa8-896b-4b3c-846d-e896d932097d-ovn-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-bbz46\" (UID: \"145f5aa8-896b-4b3c-846d-e896d932097d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-bbz46" Nov 28 10:39:04 crc kubenswrapper[4838]: I1128 10:39:04.651262 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/145f5aa8-896b-4b3c-846d-e896d932097d-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-bbz46\" (UID: \"145f5aa8-896b-4b3c-846d-e896d932097d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-bbz46" Nov 28 10:39:04 crc kubenswrapper[4838]: I1128 10:39:04.651335 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/145f5aa8-896b-4b3c-846d-e896d932097d-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-bbz46\" (UID: \"145f5aa8-896b-4b3c-846d-e896d932097d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-bbz46" Nov 28 10:39:04 crc kubenswrapper[4838]: I1128 10:39:04.651395 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ppkk7\" (UniqueName: \"kubernetes.io/projected/145f5aa8-896b-4b3c-846d-e896d932097d-kube-api-access-ppkk7\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-bbz46\" (UID: \"145f5aa8-896b-4b3c-846d-e896d932097d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-bbz46" Nov 28 10:39:04 crc kubenswrapper[4838]: I1128 10:39:04.651597 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/145f5aa8-896b-4b3c-846d-e896d932097d-bootstrap-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-bbz46\" (UID: \"145f5aa8-896b-4b3c-846d-e896d932097d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-bbz46" Nov 28 10:39:04 crc kubenswrapper[4838]: I1128 10:39:04.651666 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/145f5aa8-896b-4b3c-846d-e896d932097d-ceph\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-bbz46\" (UID: \"145f5aa8-896b-4b3c-846d-e896d932097d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-bbz46" Nov 28 10:39:04 crc kubenswrapper[4838]: I1128 10:39:04.753993 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/145f5aa8-896b-4b3c-846d-e896d932097d-ssh-key\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-bbz46\" (UID: \"145f5aa8-896b-4b3c-846d-e896d932097d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-bbz46" Nov 28 10:39:04 crc kubenswrapper[4838]: I1128 10:39:04.754554 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/145f5aa8-896b-4b3c-846d-e896d932097d-neutron-metadata-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-bbz46\" (UID: \"145f5aa8-896b-4b3c-846d-e896d932097d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-bbz46" Nov 28 10:39:04 crc kubenswrapper[4838]: I1128 10:39:04.754632 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/145f5aa8-896b-4b3c-846d-e896d932097d-openstack-edpm-ipam-ovn-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-bbz46\" (UID: \"145f5aa8-896b-4b3c-846d-e896d932097d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-bbz46" Nov 28 10:39:04 crc kubenswrapper[4838]: I1128 10:39:04.754752 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/145f5aa8-896b-4b3c-846d-e896d932097d-nova-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-bbz46\" (UID: \"145f5aa8-896b-4b3c-846d-e896d932097d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-bbz46" Nov 28 10:39:04 crc kubenswrapper[4838]: I1128 10:39:04.754811 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/145f5aa8-896b-4b3c-846d-e896d932097d-repo-setup-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-bbz46\" (UID: \"145f5aa8-896b-4b3c-846d-e896d932097d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-bbz46" Nov 28 10:39:04 crc kubenswrapper[4838]: I1128 10:39:04.754932 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/145f5aa8-896b-4b3c-846d-e896d932097d-libvirt-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-bbz46\" (UID: \"145f5aa8-896b-4b3c-846d-e896d932097d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-bbz46" Nov 28 10:39:04 crc kubenswrapper[4838]: I1128 10:39:04.755020 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/145f5aa8-896b-4b3c-846d-e896d932097d-inventory\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-bbz46\" (UID: \"145f5aa8-896b-4b3c-846d-e896d932097d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-bbz46" Nov 28 10:39:04 crc kubenswrapper[4838]: I1128 10:39:04.755125 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/145f5aa8-896b-4b3c-846d-e896d932097d-ovn-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-bbz46\" (UID: \"145f5aa8-896b-4b3c-846d-e896d932097d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-bbz46" Nov 28 10:39:04 crc kubenswrapper[4838]: I1128 10:39:04.755240 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/145f5aa8-896b-4b3c-846d-e896d932097d-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-bbz46\" (UID: \"145f5aa8-896b-4b3c-846d-e896d932097d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-bbz46" Nov 28 10:39:04 crc kubenswrapper[4838]: I1128 10:39:04.755319 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/145f5aa8-896b-4b3c-846d-e896d932097d-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-bbz46\" (UID: \"145f5aa8-896b-4b3c-846d-e896d932097d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-bbz46" Nov 28 10:39:04 crc kubenswrapper[4838]: I1128 10:39:04.755370 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ppkk7\" (UniqueName: \"kubernetes.io/projected/145f5aa8-896b-4b3c-846d-e896d932097d-kube-api-access-ppkk7\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-bbz46\" (UID: \"145f5aa8-896b-4b3c-846d-e896d932097d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-bbz46" Nov 28 10:39:04 crc kubenswrapper[4838]: I1128 10:39:04.755453 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/145f5aa8-896b-4b3c-846d-e896d932097d-bootstrap-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-bbz46\" (UID: \"145f5aa8-896b-4b3c-846d-e896d932097d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-bbz46" Nov 28 10:39:04 crc kubenswrapper[4838]: I1128 10:39:04.755507 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/145f5aa8-896b-4b3c-846d-e896d932097d-ceph\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-bbz46\" (UID: \"145f5aa8-896b-4b3c-846d-e896d932097d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-bbz46" Nov 28 10:39:04 crc kubenswrapper[4838]: I1128 10:39:04.762348 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/145f5aa8-896b-4b3c-846d-e896d932097d-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-bbz46\" (UID: \"145f5aa8-896b-4b3c-846d-e896d932097d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-bbz46" Nov 28 10:39:04 crc kubenswrapper[4838]: I1128 10:39:04.762611 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/145f5aa8-896b-4b3c-846d-e896d932097d-ssh-key\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-bbz46\" (UID: \"145f5aa8-896b-4b3c-846d-e896d932097d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-bbz46" Nov 28 10:39:04 crc kubenswrapper[4838]: I1128 10:39:04.763027 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/145f5aa8-896b-4b3c-846d-e896d932097d-bootstrap-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-bbz46\" (UID: \"145f5aa8-896b-4b3c-846d-e896d932097d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-bbz46" Nov 28 10:39:04 crc kubenswrapper[4838]: I1128 10:39:04.763943 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/145f5aa8-896b-4b3c-846d-e896d932097d-inventory\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-bbz46\" (UID: \"145f5aa8-896b-4b3c-846d-e896d932097d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-bbz46" Nov 28 10:39:04 crc kubenswrapper[4838]: I1128 10:39:04.765444 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/145f5aa8-896b-4b3c-846d-e896d932097d-ceph\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-bbz46\" (UID: \"145f5aa8-896b-4b3c-846d-e896d932097d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-bbz46" Nov 28 10:39:04 crc kubenswrapper[4838]: I1128 10:39:04.766522 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/145f5aa8-896b-4b3c-846d-e896d932097d-nova-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-bbz46\" (UID: \"145f5aa8-896b-4b3c-846d-e896d932097d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-bbz46" Nov 28 10:39:04 crc kubenswrapper[4838]: I1128 10:39:04.769911 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/145f5aa8-896b-4b3c-846d-e896d932097d-openstack-edpm-ipam-ovn-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-bbz46\" (UID: \"145f5aa8-896b-4b3c-846d-e896d932097d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-bbz46" Nov 28 10:39:04 crc kubenswrapper[4838]: I1128 10:39:04.771581 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/145f5aa8-896b-4b3c-846d-e896d932097d-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-bbz46\" (UID: \"145f5aa8-896b-4b3c-846d-e896d932097d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-bbz46" Nov 28 10:39:04 crc kubenswrapper[4838]: I1128 10:39:04.772493 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/145f5aa8-896b-4b3c-846d-e896d932097d-neutron-metadata-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-bbz46\" (UID: \"145f5aa8-896b-4b3c-846d-e896d932097d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-bbz46" Nov 28 10:39:04 crc kubenswrapper[4838]: I1128 10:39:04.774964 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/145f5aa8-896b-4b3c-846d-e896d932097d-libvirt-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-bbz46\" (UID: \"145f5aa8-896b-4b3c-846d-e896d932097d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-bbz46" Nov 28 10:39:04 crc kubenswrapper[4838]: I1128 10:39:04.775386 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/145f5aa8-896b-4b3c-846d-e896d932097d-ovn-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-bbz46\" (UID: \"145f5aa8-896b-4b3c-846d-e896d932097d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-bbz46" Nov 28 10:39:04 crc kubenswrapper[4838]: I1128 10:39:04.775573 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/145f5aa8-896b-4b3c-846d-e896d932097d-repo-setup-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-bbz46\" (UID: \"145f5aa8-896b-4b3c-846d-e896d932097d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-bbz46" Nov 28 10:39:04 crc kubenswrapper[4838]: I1128 10:39:04.779400 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ppkk7\" (UniqueName: \"kubernetes.io/projected/145f5aa8-896b-4b3c-846d-e896d932097d-kube-api-access-ppkk7\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-bbz46\" (UID: \"145f5aa8-896b-4b3c-846d-e896d932097d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-bbz46" Nov 28 10:39:04 crc kubenswrapper[4838]: I1128 10:39:04.815984 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-bbz46" Nov 28 10:39:05 crc kubenswrapper[4838]: I1128 10:39:05.185099 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-certs-edpm-deployment-openstack-edpm-ipam-bbz46"] Nov 28 10:39:05 crc kubenswrapper[4838]: I1128 10:39:05.378070 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-bbz46" event={"ID":"145f5aa8-896b-4b3c-846d-e896d932097d","Type":"ContainerStarted","Data":"3568bf265c4296f1ad8f01c173c830a0f7a6c6bccbb8b2ce2ff423060050ca19"} Nov 28 10:39:06 crc kubenswrapper[4838]: I1128 10:39:06.392638 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-bbz46" event={"ID":"145f5aa8-896b-4b3c-846d-e896d932097d","Type":"ContainerStarted","Data":"fbd1ed602e4120c2b4ca31a39713c560ce26f5103a82299845f772edf528f6ca"} Nov 28 10:39:06 crc kubenswrapper[4838]: I1128 10:39:06.451964 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-bbz46" podStartSLOduration=1.921813997 podStartE2EDuration="2.451932521s" podCreationTimestamp="2025-11-28 10:39:04 +0000 UTC" firstStartedPulling="2025-11-28 10:39:05.210542587 +0000 UTC m=+2516.909516757" lastFinishedPulling="2025-11-28 10:39:05.740661071 +0000 UTC m=+2517.439635281" observedRunningTime="2025-11-28 10:39:06.431076483 +0000 UTC m=+2518.130050723" watchObservedRunningTime="2025-11-28 10:39:06.451932521 +0000 UTC m=+2518.150906731" Nov 28 10:39:07 crc kubenswrapper[4838]: I1128 10:39:07.562777 4838 scope.go:117] "RemoveContainer" containerID="eb7c3a5fb0c9835207f14cd442a9c164c2c7763a8bc95f818d99864ca09179f7" Nov 28 10:39:07 crc kubenswrapper[4838]: E1128 10:39:07.563575 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-5dxdd_openshift-machine-config-operator(5c3daa53-8c4e-4e30-aeba-146602dd45cd)\"" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" Nov 28 10:39:20 crc kubenswrapper[4838]: I1128 10:39:20.563604 4838 scope.go:117] "RemoveContainer" containerID="eb7c3a5fb0c9835207f14cd442a9c164c2c7763a8bc95f818d99864ca09179f7" Nov 28 10:39:20 crc kubenswrapper[4838]: E1128 10:39:20.564992 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-5dxdd_openshift-machine-config-operator(5c3daa53-8c4e-4e30-aeba-146602dd45cd)\"" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" Nov 28 10:39:32 crc kubenswrapper[4838]: I1128 10:39:32.563459 4838 scope.go:117] "RemoveContainer" containerID="eb7c3a5fb0c9835207f14cd442a9c164c2c7763a8bc95f818d99864ca09179f7" Nov 28 10:39:32 crc kubenswrapper[4838]: E1128 10:39:32.564274 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-5dxdd_openshift-machine-config-operator(5c3daa53-8c4e-4e30-aeba-146602dd45cd)\"" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" Nov 28 10:39:42 crc kubenswrapper[4838]: I1128 10:39:42.640720 4838 generic.go:334] "Generic (PLEG): container finished" podID="145f5aa8-896b-4b3c-846d-e896d932097d" containerID="fbd1ed602e4120c2b4ca31a39713c560ce26f5103a82299845f772edf528f6ca" exitCode=0 Nov 28 10:39:42 crc kubenswrapper[4838]: I1128 10:39:42.640832 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-bbz46" event={"ID":"145f5aa8-896b-4b3c-846d-e896d932097d","Type":"ContainerDied","Data":"fbd1ed602e4120c2b4ca31a39713c560ce26f5103a82299845f772edf528f6ca"} Nov 28 10:39:44 crc kubenswrapper[4838]: I1128 10:39:44.199689 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-bbz46" Nov 28 10:39:44 crc kubenswrapper[4838]: I1128 10:39:44.285123 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/145f5aa8-896b-4b3c-846d-e896d932097d-openstack-edpm-ipam-ovn-default-certs-0\") pod \"145f5aa8-896b-4b3c-846d-e896d932097d\" (UID: \"145f5aa8-896b-4b3c-846d-e896d932097d\") " Nov 28 10:39:44 crc kubenswrapper[4838]: I1128 10:39:44.285173 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/145f5aa8-896b-4b3c-846d-e896d932097d-inventory\") pod \"145f5aa8-896b-4b3c-846d-e896d932097d\" (UID: \"145f5aa8-896b-4b3c-846d-e896d932097d\") " Nov 28 10:39:44 crc kubenswrapper[4838]: I1128 10:39:44.285236 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/145f5aa8-896b-4b3c-846d-e896d932097d-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"145f5aa8-896b-4b3c-846d-e896d932097d\" (UID: \"145f5aa8-896b-4b3c-846d-e896d932097d\") " Nov 28 10:39:44 crc kubenswrapper[4838]: I1128 10:39:44.285266 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/145f5aa8-896b-4b3c-846d-e896d932097d-nova-combined-ca-bundle\") pod \"145f5aa8-896b-4b3c-846d-e896d932097d\" (UID: \"145f5aa8-896b-4b3c-846d-e896d932097d\") " Nov 28 10:39:44 crc kubenswrapper[4838]: I1128 10:39:44.285312 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/145f5aa8-896b-4b3c-846d-e896d932097d-neutron-metadata-combined-ca-bundle\") pod \"145f5aa8-896b-4b3c-846d-e896d932097d\" (UID: \"145f5aa8-896b-4b3c-846d-e896d932097d\") " Nov 28 10:39:44 crc kubenswrapper[4838]: I1128 10:39:44.285333 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/145f5aa8-896b-4b3c-846d-e896d932097d-ovn-combined-ca-bundle\") pod \"145f5aa8-896b-4b3c-846d-e896d932097d\" (UID: \"145f5aa8-896b-4b3c-846d-e896d932097d\") " Nov 28 10:39:44 crc kubenswrapper[4838]: I1128 10:39:44.285373 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/145f5aa8-896b-4b3c-846d-e896d932097d-ssh-key\") pod \"145f5aa8-896b-4b3c-846d-e896d932097d\" (UID: \"145f5aa8-896b-4b3c-846d-e896d932097d\") " Nov 28 10:39:44 crc kubenswrapper[4838]: I1128 10:39:44.285467 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ppkk7\" (UniqueName: \"kubernetes.io/projected/145f5aa8-896b-4b3c-846d-e896d932097d-kube-api-access-ppkk7\") pod \"145f5aa8-896b-4b3c-846d-e896d932097d\" (UID: \"145f5aa8-896b-4b3c-846d-e896d932097d\") " Nov 28 10:39:44 crc kubenswrapper[4838]: I1128 10:39:44.285513 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/145f5aa8-896b-4b3c-846d-e896d932097d-repo-setup-combined-ca-bundle\") pod \"145f5aa8-896b-4b3c-846d-e896d932097d\" (UID: \"145f5aa8-896b-4b3c-846d-e896d932097d\") " Nov 28 10:39:44 crc kubenswrapper[4838]: I1128 10:39:44.285571 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/145f5aa8-896b-4b3c-846d-e896d932097d-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"145f5aa8-896b-4b3c-846d-e896d932097d\" (UID: \"145f5aa8-896b-4b3c-846d-e896d932097d\") " Nov 28 10:39:44 crc kubenswrapper[4838]: I1128 10:39:44.285603 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/145f5aa8-896b-4b3c-846d-e896d932097d-bootstrap-combined-ca-bundle\") pod \"145f5aa8-896b-4b3c-846d-e896d932097d\" (UID: \"145f5aa8-896b-4b3c-846d-e896d932097d\") " Nov 28 10:39:44 crc kubenswrapper[4838]: I1128 10:39:44.285628 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/145f5aa8-896b-4b3c-846d-e896d932097d-ceph\") pod \"145f5aa8-896b-4b3c-846d-e896d932097d\" (UID: \"145f5aa8-896b-4b3c-846d-e896d932097d\") " Nov 28 10:39:44 crc kubenswrapper[4838]: I1128 10:39:44.285649 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/145f5aa8-896b-4b3c-846d-e896d932097d-libvirt-combined-ca-bundle\") pod \"145f5aa8-896b-4b3c-846d-e896d932097d\" (UID: \"145f5aa8-896b-4b3c-846d-e896d932097d\") " Nov 28 10:39:44 crc kubenswrapper[4838]: I1128 10:39:44.291247 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/145f5aa8-896b-4b3c-846d-e896d932097d-openstack-edpm-ipam-ovn-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-ovn-default-certs-0") pod "145f5aa8-896b-4b3c-846d-e896d932097d" (UID: "145f5aa8-896b-4b3c-846d-e896d932097d"). InnerVolumeSpecName "openstack-edpm-ipam-ovn-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:39:44 crc kubenswrapper[4838]: I1128 10:39:44.291837 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/145f5aa8-896b-4b3c-846d-e896d932097d-openstack-edpm-ipam-neutron-metadata-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-neutron-metadata-default-certs-0") pod "145f5aa8-896b-4b3c-846d-e896d932097d" (UID: "145f5aa8-896b-4b3c-846d-e896d932097d"). InnerVolumeSpecName "openstack-edpm-ipam-neutron-metadata-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:39:44 crc kubenswrapper[4838]: I1128 10:39:44.294001 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/145f5aa8-896b-4b3c-846d-e896d932097d-bootstrap-combined-ca-bundle" (OuterVolumeSpecName: "bootstrap-combined-ca-bundle") pod "145f5aa8-896b-4b3c-846d-e896d932097d" (UID: "145f5aa8-896b-4b3c-846d-e896d932097d"). InnerVolumeSpecName "bootstrap-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:39:44 crc kubenswrapper[4838]: I1128 10:39:44.294001 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/145f5aa8-896b-4b3c-846d-e896d932097d-kube-api-access-ppkk7" (OuterVolumeSpecName: "kube-api-access-ppkk7") pod "145f5aa8-896b-4b3c-846d-e896d932097d" (UID: "145f5aa8-896b-4b3c-846d-e896d932097d"). InnerVolumeSpecName "kube-api-access-ppkk7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:39:44 crc kubenswrapper[4838]: I1128 10:39:44.294057 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/145f5aa8-896b-4b3c-846d-e896d932097d-openstack-edpm-ipam-libvirt-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-libvirt-default-certs-0") pod "145f5aa8-896b-4b3c-846d-e896d932097d" (UID: "145f5aa8-896b-4b3c-846d-e896d932097d"). InnerVolumeSpecName "openstack-edpm-ipam-libvirt-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:39:44 crc kubenswrapper[4838]: I1128 10:39:44.294213 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/145f5aa8-896b-4b3c-846d-e896d932097d-neutron-metadata-combined-ca-bundle" (OuterVolumeSpecName: "neutron-metadata-combined-ca-bundle") pod "145f5aa8-896b-4b3c-846d-e896d932097d" (UID: "145f5aa8-896b-4b3c-846d-e896d932097d"). InnerVolumeSpecName "neutron-metadata-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:39:44 crc kubenswrapper[4838]: I1128 10:39:44.294237 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/145f5aa8-896b-4b3c-846d-e896d932097d-ceph" (OuterVolumeSpecName: "ceph") pod "145f5aa8-896b-4b3c-846d-e896d932097d" (UID: "145f5aa8-896b-4b3c-846d-e896d932097d"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:39:44 crc kubenswrapper[4838]: I1128 10:39:44.294655 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/145f5aa8-896b-4b3c-846d-e896d932097d-libvirt-combined-ca-bundle" (OuterVolumeSpecName: "libvirt-combined-ca-bundle") pod "145f5aa8-896b-4b3c-846d-e896d932097d" (UID: "145f5aa8-896b-4b3c-846d-e896d932097d"). InnerVolumeSpecName "libvirt-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:39:44 crc kubenswrapper[4838]: I1128 10:39:44.296590 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/145f5aa8-896b-4b3c-846d-e896d932097d-nova-combined-ca-bundle" (OuterVolumeSpecName: "nova-combined-ca-bundle") pod "145f5aa8-896b-4b3c-846d-e896d932097d" (UID: "145f5aa8-896b-4b3c-846d-e896d932097d"). InnerVolumeSpecName "nova-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:39:44 crc kubenswrapper[4838]: I1128 10:39:44.298381 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/145f5aa8-896b-4b3c-846d-e896d932097d-repo-setup-combined-ca-bundle" (OuterVolumeSpecName: "repo-setup-combined-ca-bundle") pod "145f5aa8-896b-4b3c-846d-e896d932097d" (UID: "145f5aa8-896b-4b3c-846d-e896d932097d"). InnerVolumeSpecName "repo-setup-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:39:44 crc kubenswrapper[4838]: I1128 10:39:44.301532 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/145f5aa8-896b-4b3c-846d-e896d932097d-ovn-combined-ca-bundle" (OuterVolumeSpecName: "ovn-combined-ca-bundle") pod "145f5aa8-896b-4b3c-846d-e896d932097d" (UID: "145f5aa8-896b-4b3c-846d-e896d932097d"). InnerVolumeSpecName "ovn-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:39:44 crc kubenswrapper[4838]: I1128 10:39:44.315672 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/145f5aa8-896b-4b3c-846d-e896d932097d-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "145f5aa8-896b-4b3c-846d-e896d932097d" (UID: "145f5aa8-896b-4b3c-846d-e896d932097d"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:39:44 crc kubenswrapper[4838]: I1128 10:39:44.316644 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/145f5aa8-896b-4b3c-846d-e896d932097d-inventory" (OuterVolumeSpecName: "inventory") pod "145f5aa8-896b-4b3c-846d-e896d932097d" (UID: "145f5aa8-896b-4b3c-846d-e896d932097d"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:39:44 crc kubenswrapper[4838]: I1128 10:39:44.388020 4838 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/145f5aa8-896b-4b3c-846d-e896d932097d-openstack-edpm-ipam-ovn-default-certs-0\") on node \"crc\" DevicePath \"\"" Nov 28 10:39:44 crc kubenswrapper[4838]: I1128 10:39:44.388059 4838 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/145f5aa8-896b-4b3c-846d-e896d932097d-inventory\") on node \"crc\" DevicePath \"\"" Nov 28 10:39:44 crc kubenswrapper[4838]: I1128 10:39:44.388074 4838 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/145f5aa8-896b-4b3c-846d-e896d932097d-openstack-edpm-ipam-libvirt-default-certs-0\") on node \"crc\" DevicePath \"\"" Nov 28 10:39:44 crc kubenswrapper[4838]: I1128 10:39:44.388117 4838 reconciler_common.go:293] "Volume detached for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/145f5aa8-896b-4b3c-846d-e896d932097d-nova-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 10:39:44 crc kubenswrapper[4838]: I1128 10:39:44.388133 4838 reconciler_common.go:293] "Volume detached for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/145f5aa8-896b-4b3c-846d-e896d932097d-neutron-metadata-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 10:39:44 crc kubenswrapper[4838]: I1128 10:39:44.388143 4838 reconciler_common.go:293] "Volume detached for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/145f5aa8-896b-4b3c-846d-e896d932097d-ovn-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 10:39:44 crc kubenswrapper[4838]: I1128 10:39:44.388152 4838 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/145f5aa8-896b-4b3c-846d-e896d932097d-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 28 10:39:44 crc kubenswrapper[4838]: I1128 10:39:44.388160 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ppkk7\" (UniqueName: \"kubernetes.io/projected/145f5aa8-896b-4b3c-846d-e896d932097d-kube-api-access-ppkk7\") on node \"crc\" DevicePath \"\"" Nov 28 10:39:44 crc kubenswrapper[4838]: I1128 10:39:44.388170 4838 reconciler_common.go:293] "Volume detached for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/145f5aa8-896b-4b3c-846d-e896d932097d-repo-setup-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 10:39:44 crc kubenswrapper[4838]: I1128 10:39:44.388180 4838 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/145f5aa8-896b-4b3c-846d-e896d932097d-openstack-edpm-ipam-neutron-metadata-default-certs-0\") on node \"crc\" DevicePath \"\"" Nov 28 10:39:44 crc kubenswrapper[4838]: I1128 10:39:44.388188 4838 reconciler_common.go:293] "Volume detached for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/145f5aa8-896b-4b3c-846d-e896d932097d-bootstrap-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 10:39:44 crc kubenswrapper[4838]: I1128 10:39:44.388199 4838 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/145f5aa8-896b-4b3c-846d-e896d932097d-ceph\") on node \"crc\" DevicePath \"\"" Nov 28 10:39:44 crc kubenswrapper[4838]: I1128 10:39:44.388206 4838 reconciler_common.go:293] "Volume detached for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/145f5aa8-896b-4b3c-846d-e896d932097d-libvirt-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 10:39:44 crc kubenswrapper[4838]: I1128 10:39:44.562172 4838 scope.go:117] "RemoveContainer" containerID="eb7c3a5fb0c9835207f14cd442a9c164c2c7763a8bc95f818d99864ca09179f7" Nov 28 10:39:44 crc kubenswrapper[4838]: E1128 10:39:44.562688 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-5dxdd_openshift-machine-config-operator(5c3daa53-8c4e-4e30-aeba-146602dd45cd)\"" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" Nov 28 10:39:44 crc kubenswrapper[4838]: I1128 10:39:44.673027 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-bbz46" event={"ID":"145f5aa8-896b-4b3c-846d-e896d932097d","Type":"ContainerDied","Data":"3568bf265c4296f1ad8f01c173c830a0f7a6c6bccbb8b2ce2ff423060050ca19"} Nov 28 10:39:44 crc kubenswrapper[4838]: I1128 10:39:44.673099 4838 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3568bf265c4296f1ad8f01c173c830a0f7a6c6bccbb8b2ce2ff423060050ca19" Nov 28 10:39:44 crc kubenswrapper[4838]: I1128 10:39:44.673194 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-bbz46" Nov 28 10:39:44 crc kubenswrapper[4838]: I1128 10:39:44.881526 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-npvhp"] Nov 28 10:39:44 crc kubenswrapper[4838]: E1128 10:39:44.882233 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="145f5aa8-896b-4b3c-846d-e896d932097d" containerName="install-certs-edpm-deployment-openstack-edpm-ipam" Nov 28 10:39:44 crc kubenswrapper[4838]: I1128 10:39:44.882255 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="145f5aa8-896b-4b3c-846d-e896d932097d" containerName="install-certs-edpm-deployment-openstack-edpm-ipam" Nov 28 10:39:44 crc kubenswrapper[4838]: I1128 10:39:44.882453 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="145f5aa8-896b-4b3c-846d-e896d932097d" containerName="install-certs-edpm-deployment-openstack-edpm-ipam" Nov 28 10:39:44 crc kubenswrapper[4838]: I1128 10:39:44.883140 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-npvhp" Nov 28 10:39:44 crc kubenswrapper[4838]: I1128 10:39:44.885567 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 28 10:39:44 crc kubenswrapper[4838]: I1128 10:39:44.885657 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceph-conf-files" Nov 28 10:39:44 crc kubenswrapper[4838]: I1128 10:39:44.885664 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-t6dwn" Nov 28 10:39:44 crc kubenswrapper[4838]: I1128 10:39:44.886429 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 28 10:39:44 crc kubenswrapper[4838]: I1128 10:39:44.891858 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 28 10:39:44 crc kubenswrapper[4838]: I1128 10:39:44.896706 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-npvhp"] Nov 28 10:39:45 crc kubenswrapper[4838]: I1128 10:39:45.001192 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/6ce03fb4-60ff-4aff-a42d-ad1e083b1d3a-ceph\") pod \"ceph-client-edpm-deployment-openstack-edpm-ipam-npvhp\" (UID: \"6ce03fb4-60ff-4aff-a42d-ad1e083b1d3a\") " pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-npvhp" Nov 28 10:39:45 crc kubenswrapper[4838]: I1128 10:39:45.001246 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/6ce03fb4-60ff-4aff-a42d-ad1e083b1d3a-inventory\") pod \"ceph-client-edpm-deployment-openstack-edpm-ipam-npvhp\" (UID: \"6ce03fb4-60ff-4aff-a42d-ad1e083b1d3a\") " pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-npvhp" Nov 28 10:39:45 crc kubenswrapper[4838]: I1128 10:39:45.001303 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/6ce03fb4-60ff-4aff-a42d-ad1e083b1d3a-ssh-key\") pod \"ceph-client-edpm-deployment-openstack-edpm-ipam-npvhp\" (UID: \"6ce03fb4-60ff-4aff-a42d-ad1e083b1d3a\") " pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-npvhp" Nov 28 10:39:45 crc kubenswrapper[4838]: I1128 10:39:45.001372 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gmd4n\" (UniqueName: \"kubernetes.io/projected/6ce03fb4-60ff-4aff-a42d-ad1e083b1d3a-kube-api-access-gmd4n\") pod \"ceph-client-edpm-deployment-openstack-edpm-ipam-npvhp\" (UID: \"6ce03fb4-60ff-4aff-a42d-ad1e083b1d3a\") " pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-npvhp" Nov 28 10:39:45 crc kubenswrapper[4838]: I1128 10:39:45.102445 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gmd4n\" (UniqueName: \"kubernetes.io/projected/6ce03fb4-60ff-4aff-a42d-ad1e083b1d3a-kube-api-access-gmd4n\") pod \"ceph-client-edpm-deployment-openstack-edpm-ipam-npvhp\" (UID: \"6ce03fb4-60ff-4aff-a42d-ad1e083b1d3a\") " pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-npvhp" Nov 28 10:39:45 crc kubenswrapper[4838]: I1128 10:39:45.102517 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/6ce03fb4-60ff-4aff-a42d-ad1e083b1d3a-ceph\") pod \"ceph-client-edpm-deployment-openstack-edpm-ipam-npvhp\" (UID: \"6ce03fb4-60ff-4aff-a42d-ad1e083b1d3a\") " pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-npvhp" Nov 28 10:39:45 crc kubenswrapper[4838]: I1128 10:39:45.102548 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/6ce03fb4-60ff-4aff-a42d-ad1e083b1d3a-inventory\") pod \"ceph-client-edpm-deployment-openstack-edpm-ipam-npvhp\" (UID: \"6ce03fb4-60ff-4aff-a42d-ad1e083b1d3a\") " pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-npvhp" Nov 28 10:39:45 crc kubenswrapper[4838]: I1128 10:39:45.102601 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/6ce03fb4-60ff-4aff-a42d-ad1e083b1d3a-ssh-key\") pod \"ceph-client-edpm-deployment-openstack-edpm-ipam-npvhp\" (UID: \"6ce03fb4-60ff-4aff-a42d-ad1e083b1d3a\") " pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-npvhp" Nov 28 10:39:45 crc kubenswrapper[4838]: I1128 10:39:45.108151 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/6ce03fb4-60ff-4aff-a42d-ad1e083b1d3a-ceph\") pod \"ceph-client-edpm-deployment-openstack-edpm-ipam-npvhp\" (UID: \"6ce03fb4-60ff-4aff-a42d-ad1e083b1d3a\") " pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-npvhp" Nov 28 10:39:45 crc kubenswrapper[4838]: I1128 10:39:45.114630 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/6ce03fb4-60ff-4aff-a42d-ad1e083b1d3a-inventory\") pod \"ceph-client-edpm-deployment-openstack-edpm-ipam-npvhp\" (UID: \"6ce03fb4-60ff-4aff-a42d-ad1e083b1d3a\") " pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-npvhp" Nov 28 10:39:45 crc kubenswrapper[4838]: I1128 10:39:45.121109 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/6ce03fb4-60ff-4aff-a42d-ad1e083b1d3a-ssh-key\") pod \"ceph-client-edpm-deployment-openstack-edpm-ipam-npvhp\" (UID: \"6ce03fb4-60ff-4aff-a42d-ad1e083b1d3a\") " pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-npvhp" Nov 28 10:39:45 crc kubenswrapper[4838]: I1128 10:39:45.135099 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gmd4n\" (UniqueName: \"kubernetes.io/projected/6ce03fb4-60ff-4aff-a42d-ad1e083b1d3a-kube-api-access-gmd4n\") pod \"ceph-client-edpm-deployment-openstack-edpm-ipam-npvhp\" (UID: \"6ce03fb4-60ff-4aff-a42d-ad1e083b1d3a\") " pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-npvhp" Nov 28 10:39:45 crc kubenswrapper[4838]: I1128 10:39:45.222175 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-npvhp" Nov 28 10:39:45 crc kubenswrapper[4838]: I1128 10:39:45.751526 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-npvhp"] Nov 28 10:39:46 crc kubenswrapper[4838]: I1128 10:39:46.691242 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-npvhp" event={"ID":"6ce03fb4-60ff-4aff-a42d-ad1e083b1d3a","Type":"ContainerStarted","Data":"ec6e9eee96758993cc35e184197634bfec02925caf02702d44cfb7263a27a1e7"} Nov 28 10:39:47 crc kubenswrapper[4838]: I1128 10:39:47.706122 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-npvhp" event={"ID":"6ce03fb4-60ff-4aff-a42d-ad1e083b1d3a","Type":"ContainerStarted","Data":"930b248e0ea4d84a470ea5834501f7d09e5ecda9e2c92c50f52771dabaf9d06d"} Nov 28 10:39:47 crc kubenswrapper[4838]: I1128 10:39:47.739816 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-npvhp" podStartSLOduration=3.014074419 podStartE2EDuration="3.739778612s" podCreationTimestamp="2025-11-28 10:39:44 +0000 UTC" firstStartedPulling="2025-11-28 10:39:45.764347229 +0000 UTC m=+2557.463321439" lastFinishedPulling="2025-11-28 10:39:46.490051422 +0000 UTC m=+2558.189025632" observedRunningTime="2025-11-28 10:39:47.733156893 +0000 UTC m=+2559.432131103" watchObservedRunningTime="2025-11-28 10:39:47.739778612 +0000 UTC m=+2559.438752822" Nov 28 10:39:52 crc kubenswrapper[4838]: I1128 10:39:52.763970 4838 generic.go:334] "Generic (PLEG): container finished" podID="6ce03fb4-60ff-4aff-a42d-ad1e083b1d3a" containerID="930b248e0ea4d84a470ea5834501f7d09e5ecda9e2c92c50f52771dabaf9d06d" exitCode=0 Nov 28 10:39:52 crc kubenswrapper[4838]: I1128 10:39:52.764063 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-npvhp" event={"ID":"6ce03fb4-60ff-4aff-a42d-ad1e083b1d3a","Type":"ContainerDied","Data":"930b248e0ea4d84a470ea5834501f7d09e5ecda9e2c92c50f52771dabaf9d06d"} Nov 28 10:39:54 crc kubenswrapper[4838]: I1128 10:39:54.296968 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-npvhp" Nov 28 10:39:54 crc kubenswrapper[4838]: I1128 10:39:54.436554 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gmd4n\" (UniqueName: \"kubernetes.io/projected/6ce03fb4-60ff-4aff-a42d-ad1e083b1d3a-kube-api-access-gmd4n\") pod \"6ce03fb4-60ff-4aff-a42d-ad1e083b1d3a\" (UID: \"6ce03fb4-60ff-4aff-a42d-ad1e083b1d3a\") " Nov 28 10:39:54 crc kubenswrapper[4838]: I1128 10:39:54.436600 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/6ce03fb4-60ff-4aff-a42d-ad1e083b1d3a-ssh-key\") pod \"6ce03fb4-60ff-4aff-a42d-ad1e083b1d3a\" (UID: \"6ce03fb4-60ff-4aff-a42d-ad1e083b1d3a\") " Nov 28 10:39:54 crc kubenswrapper[4838]: I1128 10:39:54.436703 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/6ce03fb4-60ff-4aff-a42d-ad1e083b1d3a-ceph\") pod \"6ce03fb4-60ff-4aff-a42d-ad1e083b1d3a\" (UID: \"6ce03fb4-60ff-4aff-a42d-ad1e083b1d3a\") " Nov 28 10:39:54 crc kubenswrapper[4838]: I1128 10:39:54.436745 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/6ce03fb4-60ff-4aff-a42d-ad1e083b1d3a-inventory\") pod \"6ce03fb4-60ff-4aff-a42d-ad1e083b1d3a\" (UID: \"6ce03fb4-60ff-4aff-a42d-ad1e083b1d3a\") " Nov 28 10:39:54 crc kubenswrapper[4838]: I1128 10:39:54.443950 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6ce03fb4-60ff-4aff-a42d-ad1e083b1d3a-ceph" (OuterVolumeSpecName: "ceph") pod "6ce03fb4-60ff-4aff-a42d-ad1e083b1d3a" (UID: "6ce03fb4-60ff-4aff-a42d-ad1e083b1d3a"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:39:54 crc kubenswrapper[4838]: I1128 10:39:54.446264 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6ce03fb4-60ff-4aff-a42d-ad1e083b1d3a-kube-api-access-gmd4n" (OuterVolumeSpecName: "kube-api-access-gmd4n") pod "6ce03fb4-60ff-4aff-a42d-ad1e083b1d3a" (UID: "6ce03fb4-60ff-4aff-a42d-ad1e083b1d3a"). InnerVolumeSpecName "kube-api-access-gmd4n". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:39:54 crc kubenswrapper[4838]: I1128 10:39:54.470039 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6ce03fb4-60ff-4aff-a42d-ad1e083b1d3a-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "6ce03fb4-60ff-4aff-a42d-ad1e083b1d3a" (UID: "6ce03fb4-60ff-4aff-a42d-ad1e083b1d3a"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:39:54 crc kubenswrapper[4838]: I1128 10:39:54.470841 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6ce03fb4-60ff-4aff-a42d-ad1e083b1d3a-inventory" (OuterVolumeSpecName: "inventory") pod "6ce03fb4-60ff-4aff-a42d-ad1e083b1d3a" (UID: "6ce03fb4-60ff-4aff-a42d-ad1e083b1d3a"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:39:54 crc kubenswrapper[4838]: I1128 10:39:54.539121 4838 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/6ce03fb4-60ff-4aff-a42d-ad1e083b1d3a-ceph\") on node \"crc\" DevicePath \"\"" Nov 28 10:39:54 crc kubenswrapper[4838]: I1128 10:39:54.539581 4838 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/6ce03fb4-60ff-4aff-a42d-ad1e083b1d3a-inventory\") on node \"crc\" DevicePath \"\"" Nov 28 10:39:54 crc kubenswrapper[4838]: I1128 10:39:54.539607 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gmd4n\" (UniqueName: \"kubernetes.io/projected/6ce03fb4-60ff-4aff-a42d-ad1e083b1d3a-kube-api-access-gmd4n\") on node \"crc\" DevicePath \"\"" Nov 28 10:39:54 crc kubenswrapper[4838]: I1128 10:39:54.539625 4838 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/6ce03fb4-60ff-4aff-a42d-ad1e083b1d3a-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 28 10:39:54 crc kubenswrapper[4838]: I1128 10:39:54.791233 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-npvhp" event={"ID":"6ce03fb4-60ff-4aff-a42d-ad1e083b1d3a","Type":"ContainerDied","Data":"ec6e9eee96758993cc35e184197634bfec02925caf02702d44cfb7263a27a1e7"} Nov 28 10:39:54 crc kubenswrapper[4838]: I1128 10:39:54.791294 4838 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ec6e9eee96758993cc35e184197634bfec02925caf02702d44cfb7263a27a1e7" Nov 28 10:39:54 crc kubenswrapper[4838]: I1128 10:39:54.791307 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-npvhp" Nov 28 10:39:54 crc kubenswrapper[4838]: I1128 10:39:54.887470 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-edpm-deployment-openstack-edpm-ipam-49cxw"] Nov 28 10:39:54 crc kubenswrapper[4838]: E1128 10:39:54.888045 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6ce03fb4-60ff-4aff-a42d-ad1e083b1d3a" containerName="ceph-client-edpm-deployment-openstack-edpm-ipam" Nov 28 10:39:54 crc kubenswrapper[4838]: I1128 10:39:54.888119 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="6ce03fb4-60ff-4aff-a42d-ad1e083b1d3a" containerName="ceph-client-edpm-deployment-openstack-edpm-ipam" Nov 28 10:39:54 crc kubenswrapper[4838]: I1128 10:39:54.888372 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="6ce03fb4-60ff-4aff-a42d-ad1e083b1d3a" containerName="ceph-client-edpm-deployment-openstack-edpm-ipam" Nov 28 10:39:54 crc kubenswrapper[4838]: I1128 10:39:54.889054 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-49cxw" Nov 28 10:39:54 crc kubenswrapper[4838]: I1128 10:39:54.892222 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceph-conf-files" Nov 28 10:39:54 crc kubenswrapper[4838]: I1128 10:39:54.893257 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-config" Nov 28 10:39:54 crc kubenswrapper[4838]: I1128 10:39:54.893615 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-t6dwn" Nov 28 10:39:54 crc kubenswrapper[4838]: I1128 10:39:54.893702 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 28 10:39:54 crc kubenswrapper[4838]: I1128 10:39:54.893944 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 28 10:39:54 crc kubenswrapper[4838]: I1128 10:39:54.894078 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 28 10:39:54 crc kubenswrapper[4838]: I1128 10:39:54.907891 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-edpm-deployment-openstack-edpm-ipam-49cxw"] Nov 28 10:39:55 crc kubenswrapper[4838]: I1128 10:39:55.055977 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/e2bfe003-bf8c-444c-97ea-57a3b0a1c4ae-ceph\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-49cxw\" (UID: \"e2bfe003-bf8c-444c-97ea-57a3b0a1c4ae\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-49cxw" Nov 28 10:39:55 crc kubenswrapper[4838]: I1128 10:39:55.056198 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e2bfe003-bf8c-444c-97ea-57a3b0a1c4ae-ovn-combined-ca-bundle\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-49cxw\" (UID: \"e2bfe003-bf8c-444c-97ea-57a3b0a1c4ae\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-49cxw" Nov 28 10:39:55 crc kubenswrapper[4838]: I1128 10:39:55.056251 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/e2bfe003-bf8c-444c-97ea-57a3b0a1c4ae-ovncontroller-config-0\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-49cxw\" (UID: \"e2bfe003-bf8c-444c-97ea-57a3b0a1c4ae\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-49cxw" Nov 28 10:39:55 crc kubenswrapper[4838]: I1128 10:39:55.056292 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6g57z\" (UniqueName: \"kubernetes.io/projected/e2bfe003-bf8c-444c-97ea-57a3b0a1c4ae-kube-api-access-6g57z\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-49cxw\" (UID: \"e2bfe003-bf8c-444c-97ea-57a3b0a1c4ae\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-49cxw" Nov 28 10:39:55 crc kubenswrapper[4838]: I1128 10:39:55.057570 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/e2bfe003-bf8c-444c-97ea-57a3b0a1c4ae-ssh-key\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-49cxw\" (UID: \"e2bfe003-bf8c-444c-97ea-57a3b0a1c4ae\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-49cxw" Nov 28 10:39:55 crc kubenswrapper[4838]: I1128 10:39:55.058592 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e2bfe003-bf8c-444c-97ea-57a3b0a1c4ae-inventory\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-49cxw\" (UID: \"e2bfe003-bf8c-444c-97ea-57a3b0a1c4ae\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-49cxw" Nov 28 10:39:55 crc kubenswrapper[4838]: I1128 10:39:55.160909 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e2bfe003-bf8c-444c-97ea-57a3b0a1c4ae-inventory\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-49cxw\" (UID: \"e2bfe003-bf8c-444c-97ea-57a3b0a1c4ae\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-49cxw" Nov 28 10:39:55 crc kubenswrapper[4838]: I1128 10:39:55.161180 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/e2bfe003-bf8c-444c-97ea-57a3b0a1c4ae-ceph\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-49cxw\" (UID: \"e2bfe003-bf8c-444c-97ea-57a3b0a1c4ae\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-49cxw" Nov 28 10:39:55 crc kubenswrapper[4838]: I1128 10:39:55.161284 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e2bfe003-bf8c-444c-97ea-57a3b0a1c4ae-ovn-combined-ca-bundle\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-49cxw\" (UID: \"e2bfe003-bf8c-444c-97ea-57a3b0a1c4ae\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-49cxw" Nov 28 10:39:55 crc kubenswrapper[4838]: I1128 10:39:55.161330 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/e2bfe003-bf8c-444c-97ea-57a3b0a1c4ae-ovncontroller-config-0\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-49cxw\" (UID: \"e2bfe003-bf8c-444c-97ea-57a3b0a1c4ae\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-49cxw" Nov 28 10:39:55 crc kubenswrapper[4838]: I1128 10:39:55.161374 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6g57z\" (UniqueName: \"kubernetes.io/projected/e2bfe003-bf8c-444c-97ea-57a3b0a1c4ae-kube-api-access-6g57z\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-49cxw\" (UID: \"e2bfe003-bf8c-444c-97ea-57a3b0a1c4ae\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-49cxw" Nov 28 10:39:55 crc kubenswrapper[4838]: I1128 10:39:55.161443 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/e2bfe003-bf8c-444c-97ea-57a3b0a1c4ae-ssh-key\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-49cxw\" (UID: \"e2bfe003-bf8c-444c-97ea-57a3b0a1c4ae\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-49cxw" Nov 28 10:39:55 crc kubenswrapper[4838]: I1128 10:39:55.163834 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/e2bfe003-bf8c-444c-97ea-57a3b0a1c4ae-ovncontroller-config-0\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-49cxw\" (UID: \"e2bfe003-bf8c-444c-97ea-57a3b0a1c4ae\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-49cxw" Nov 28 10:39:55 crc kubenswrapper[4838]: I1128 10:39:55.167688 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e2bfe003-bf8c-444c-97ea-57a3b0a1c4ae-inventory\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-49cxw\" (UID: \"e2bfe003-bf8c-444c-97ea-57a3b0a1c4ae\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-49cxw" Nov 28 10:39:55 crc kubenswrapper[4838]: I1128 10:39:55.169426 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e2bfe003-bf8c-444c-97ea-57a3b0a1c4ae-ovn-combined-ca-bundle\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-49cxw\" (UID: \"e2bfe003-bf8c-444c-97ea-57a3b0a1c4ae\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-49cxw" Nov 28 10:39:55 crc kubenswrapper[4838]: I1128 10:39:55.169619 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/e2bfe003-bf8c-444c-97ea-57a3b0a1c4ae-ssh-key\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-49cxw\" (UID: \"e2bfe003-bf8c-444c-97ea-57a3b0a1c4ae\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-49cxw" Nov 28 10:39:55 crc kubenswrapper[4838]: I1128 10:39:55.171368 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/e2bfe003-bf8c-444c-97ea-57a3b0a1c4ae-ceph\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-49cxw\" (UID: \"e2bfe003-bf8c-444c-97ea-57a3b0a1c4ae\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-49cxw" Nov 28 10:39:55 crc kubenswrapper[4838]: I1128 10:39:55.200166 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6g57z\" (UniqueName: \"kubernetes.io/projected/e2bfe003-bf8c-444c-97ea-57a3b0a1c4ae-kube-api-access-6g57z\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-49cxw\" (UID: \"e2bfe003-bf8c-444c-97ea-57a3b0a1c4ae\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-49cxw" Nov 28 10:39:55 crc kubenswrapper[4838]: I1128 10:39:55.211913 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-49cxw" Nov 28 10:39:55 crc kubenswrapper[4838]: I1128 10:39:55.619820 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-edpm-deployment-openstack-edpm-ipam-49cxw"] Nov 28 10:39:55 crc kubenswrapper[4838]: I1128 10:39:55.803939 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-49cxw" event={"ID":"e2bfe003-bf8c-444c-97ea-57a3b0a1c4ae","Type":"ContainerStarted","Data":"50b406c01af22eb8a258643e4009e13d3b21fec3af9c01db0c47383cc86d3451"} Nov 28 10:39:56 crc kubenswrapper[4838]: I1128 10:39:56.826800 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-49cxw" event={"ID":"e2bfe003-bf8c-444c-97ea-57a3b0a1c4ae","Type":"ContainerStarted","Data":"66d34c85084ee7bc25ef7dd8a0043ae77516fcdce90783b17d862593c363eb56"} Nov 28 10:39:56 crc kubenswrapper[4838]: I1128 10:39:56.861125 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-49cxw" podStartSLOduration=2.148342487 podStartE2EDuration="2.861099806s" podCreationTimestamp="2025-11-28 10:39:54 +0000 UTC" firstStartedPulling="2025-11-28 10:39:55.630457955 +0000 UTC m=+2567.329432135" lastFinishedPulling="2025-11-28 10:39:56.343215284 +0000 UTC m=+2568.042189454" observedRunningTime="2025-11-28 10:39:56.850273532 +0000 UTC m=+2568.549247732" watchObservedRunningTime="2025-11-28 10:39:56.861099806 +0000 UTC m=+2568.560074006" Nov 28 10:39:57 crc kubenswrapper[4838]: I1128 10:39:57.562301 4838 scope.go:117] "RemoveContainer" containerID="eb7c3a5fb0c9835207f14cd442a9c164c2c7763a8bc95f818d99864ca09179f7" Nov 28 10:39:57 crc kubenswrapper[4838]: E1128 10:39:57.563061 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-5dxdd_openshift-machine-config-operator(5c3daa53-8c4e-4e30-aeba-146602dd45cd)\"" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" Nov 28 10:40:12 crc kubenswrapper[4838]: I1128 10:40:12.562283 4838 scope.go:117] "RemoveContainer" containerID="eb7c3a5fb0c9835207f14cd442a9c164c2c7763a8bc95f818d99864ca09179f7" Nov 28 10:40:12 crc kubenswrapper[4838]: E1128 10:40:12.563496 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-5dxdd_openshift-machine-config-operator(5c3daa53-8c4e-4e30-aeba-146602dd45cd)\"" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" Nov 28 10:40:26 crc kubenswrapper[4838]: I1128 10:40:26.563779 4838 scope.go:117] "RemoveContainer" containerID="eb7c3a5fb0c9835207f14cd442a9c164c2c7763a8bc95f818d99864ca09179f7" Nov 28 10:40:26 crc kubenswrapper[4838]: E1128 10:40:26.566266 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-5dxdd_openshift-machine-config-operator(5c3daa53-8c4e-4e30-aeba-146602dd45cd)\"" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" Nov 28 10:40:41 crc kubenswrapper[4838]: I1128 10:40:41.563147 4838 scope.go:117] "RemoveContainer" containerID="eb7c3a5fb0c9835207f14cd442a9c164c2c7763a8bc95f818d99864ca09179f7" Nov 28 10:40:41 crc kubenswrapper[4838]: E1128 10:40:41.563970 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-5dxdd_openshift-machine-config-operator(5c3daa53-8c4e-4e30-aeba-146602dd45cd)\"" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" Nov 28 10:40:55 crc kubenswrapper[4838]: I1128 10:40:55.563171 4838 scope.go:117] "RemoveContainer" containerID="eb7c3a5fb0c9835207f14cd442a9c164c2c7763a8bc95f818d99864ca09179f7" Nov 28 10:40:55 crc kubenswrapper[4838]: E1128 10:40:55.568390 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-5dxdd_openshift-machine-config-operator(5c3daa53-8c4e-4e30-aeba-146602dd45cd)\"" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" Nov 28 10:41:07 crc kubenswrapper[4838]: I1128 10:41:07.562455 4838 scope.go:117] "RemoveContainer" containerID="eb7c3a5fb0c9835207f14cd442a9c164c2c7763a8bc95f818d99864ca09179f7" Nov 28 10:41:07 crc kubenswrapper[4838]: E1128 10:41:07.563893 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-5dxdd_openshift-machine-config-operator(5c3daa53-8c4e-4e30-aeba-146602dd45cd)\"" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" Nov 28 10:41:17 crc kubenswrapper[4838]: I1128 10:41:17.979555 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-92tqw"] Nov 28 10:41:17 crc kubenswrapper[4838]: I1128 10:41:17.983544 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-92tqw" Nov 28 10:41:17 crc kubenswrapper[4838]: I1128 10:41:17.990358 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/444518cd-8fef-438c-953c-7534997b068d-catalog-content\") pod \"redhat-operators-92tqw\" (UID: \"444518cd-8fef-438c-953c-7534997b068d\") " pod="openshift-marketplace/redhat-operators-92tqw" Nov 28 10:41:17 crc kubenswrapper[4838]: I1128 10:41:17.990440 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/444518cd-8fef-438c-953c-7534997b068d-utilities\") pod \"redhat-operators-92tqw\" (UID: \"444518cd-8fef-438c-953c-7534997b068d\") " pod="openshift-marketplace/redhat-operators-92tqw" Nov 28 10:41:17 crc kubenswrapper[4838]: I1128 10:41:17.990759 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bs99t\" (UniqueName: \"kubernetes.io/projected/444518cd-8fef-438c-953c-7534997b068d-kube-api-access-bs99t\") pod \"redhat-operators-92tqw\" (UID: \"444518cd-8fef-438c-953c-7534997b068d\") " pod="openshift-marketplace/redhat-operators-92tqw" Nov 28 10:41:17 crc kubenswrapper[4838]: I1128 10:41:17.996450 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-92tqw"] Nov 28 10:41:18 crc kubenswrapper[4838]: I1128 10:41:18.093134 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/444518cd-8fef-438c-953c-7534997b068d-catalog-content\") pod \"redhat-operators-92tqw\" (UID: \"444518cd-8fef-438c-953c-7534997b068d\") " pod="openshift-marketplace/redhat-operators-92tqw" Nov 28 10:41:18 crc kubenswrapper[4838]: I1128 10:41:18.093523 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/444518cd-8fef-438c-953c-7534997b068d-utilities\") pod \"redhat-operators-92tqw\" (UID: \"444518cd-8fef-438c-953c-7534997b068d\") " pod="openshift-marketplace/redhat-operators-92tqw" Nov 28 10:41:18 crc kubenswrapper[4838]: I1128 10:41:18.093611 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bs99t\" (UniqueName: \"kubernetes.io/projected/444518cd-8fef-438c-953c-7534997b068d-kube-api-access-bs99t\") pod \"redhat-operators-92tqw\" (UID: \"444518cd-8fef-438c-953c-7534997b068d\") " pod="openshift-marketplace/redhat-operators-92tqw" Nov 28 10:41:18 crc kubenswrapper[4838]: I1128 10:41:18.093613 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/444518cd-8fef-438c-953c-7534997b068d-catalog-content\") pod \"redhat-operators-92tqw\" (UID: \"444518cd-8fef-438c-953c-7534997b068d\") " pod="openshift-marketplace/redhat-operators-92tqw" Nov 28 10:41:18 crc kubenswrapper[4838]: I1128 10:41:18.094286 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/444518cd-8fef-438c-953c-7534997b068d-utilities\") pod \"redhat-operators-92tqw\" (UID: \"444518cd-8fef-438c-953c-7534997b068d\") " pod="openshift-marketplace/redhat-operators-92tqw" Nov 28 10:41:18 crc kubenswrapper[4838]: I1128 10:41:18.122686 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bs99t\" (UniqueName: \"kubernetes.io/projected/444518cd-8fef-438c-953c-7534997b068d-kube-api-access-bs99t\") pod \"redhat-operators-92tqw\" (UID: \"444518cd-8fef-438c-953c-7534997b068d\") " pod="openshift-marketplace/redhat-operators-92tqw" Nov 28 10:41:18 crc kubenswrapper[4838]: I1128 10:41:18.325574 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-92tqw" Nov 28 10:41:18 crc kubenswrapper[4838]: I1128 10:41:18.394287 4838 scope.go:117] "RemoveContainer" containerID="dcb042ca2e6cfafba53a2319df73bd0f0b7b74751049b001d5dc3c04e588b5a8" Nov 28 10:41:18 crc kubenswrapper[4838]: I1128 10:41:18.471339 4838 scope.go:117] "RemoveContainer" containerID="ef9394d1e1d4b0362c0d6fbefaa5e3ab39d55623d383bbfaded928865456e91c" Nov 28 10:41:18 crc kubenswrapper[4838]: I1128 10:41:18.517945 4838 scope.go:117] "RemoveContainer" containerID="fb54351693018e84580beeeedba572f5d9cc885be62159dfd4a2bc021dd24fe1" Nov 28 10:41:18 crc kubenswrapper[4838]: I1128 10:41:18.782135 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-92tqw"] Nov 28 10:41:18 crc kubenswrapper[4838]: W1128 10:41:18.785394 4838 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod444518cd_8fef_438c_953c_7534997b068d.slice/crio-0f62e90eb3880a6f557fe6ffd5f4385c58b0e5351354345c5ca6e122bc52aff6 WatchSource:0}: Error finding container 0f62e90eb3880a6f557fe6ffd5f4385c58b0e5351354345c5ca6e122bc52aff6: Status 404 returned error can't find the container with id 0f62e90eb3880a6f557fe6ffd5f4385c58b0e5351354345c5ca6e122bc52aff6 Nov 28 10:41:19 crc kubenswrapper[4838]: I1128 10:41:19.562500 4838 scope.go:117] "RemoveContainer" containerID="eb7c3a5fb0c9835207f14cd442a9c164c2c7763a8bc95f818d99864ca09179f7" Nov 28 10:41:19 crc kubenswrapper[4838]: E1128 10:41:19.563349 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-5dxdd_openshift-machine-config-operator(5c3daa53-8c4e-4e30-aeba-146602dd45cd)\"" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" Nov 28 10:41:19 crc kubenswrapper[4838]: I1128 10:41:19.760344 4838 generic.go:334] "Generic (PLEG): container finished" podID="444518cd-8fef-438c-953c-7534997b068d" containerID="15e2304722c47e26521ce3298d17e50a2440961b0dda5f7d9b8305b43fdd806c" exitCode=0 Nov 28 10:41:19 crc kubenswrapper[4838]: I1128 10:41:19.760461 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-92tqw" event={"ID":"444518cd-8fef-438c-953c-7534997b068d","Type":"ContainerDied","Data":"15e2304722c47e26521ce3298d17e50a2440961b0dda5f7d9b8305b43fdd806c"} Nov 28 10:41:19 crc kubenswrapper[4838]: I1128 10:41:19.760501 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-92tqw" event={"ID":"444518cd-8fef-438c-953c-7534997b068d","Type":"ContainerStarted","Data":"0f62e90eb3880a6f557fe6ffd5f4385c58b0e5351354345c5ca6e122bc52aff6"} Nov 28 10:41:19 crc kubenswrapper[4838]: I1128 10:41:19.763842 4838 generic.go:334] "Generic (PLEG): container finished" podID="e2bfe003-bf8c-444c-97ea-57a3b0a1c4ae" containerID="66d34c85084ee7bc25ef7dd8a0043ae77516fcdce90783b17d862593c363eb56" exitCode=0 Nov 28 10:41:19 crc kubenswrapper[4838]: I1128 10:41:19.763902 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-49cxw" event={"ID":"e2bfe003-bf8c-444c-97ea-57a3b0a1c4ae","Type":"ContainerDied","Data":"66d34c85084ee7bc25ef7dd8a0043ae77516fcdce90783b17d862593c363eb56"} Nov 28 10:41:19 crc kubenswrapper[4838]: I1128 10:41:19.764153 4838 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 28 10:41:21 crc kubenswrapper[4838]: I1128 10:41:21.321623 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-49cxw" Nov 28 10:41:21 crc kubenswrapper[4838]: I1128 10:41:21.369653 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e2bfe003-bf8c-444c-97ea-57a3b0a1c4ae-inventory\") pod \"e2bfe003-bf8c-444c-97ea-57a3b0a1c4ae\" (UID: \"e2bfe003-bf8c-444c-97ea-57a3b0a1c4ae\") " Nov 28 10:41:21 crc kubenswrapper[4838]: I1128 10:41:21.369701 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/e2bfe003-bf8c-444c-97ea-57a3b0a1c4ae-ovncontroller-config-0\") pod \"e2bfe003-bf8c-444c-97ea-57a3b0a1c4ae\" (UID: \"e2bfe003-bf8c-444c-97ea-57a3b0a1c4ae\") " Nov 28 10:41:21 crc kubenswrapper[4838]: I1128 10:41:21.369749 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/e2bfe003-bf8c-444c-97ea-57a3b0a1c4ae-ceph\") pod \"e2bfe003-bf8c-444c-97ea-57a3b0a1c4ae\" (UID: \"e2bfe003-bf8c-444c-97ea-57a3b0a1c4ae\") " Nov 28 10:41:21 crc kubenswrapper[4838]: I1128 10:41:21.369827 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/e2bfe003-bf8c-444c-97ea-57a3b0a1c4ae-ssh-key\") pod \"e2bfe003-bf8c-444c-97ea-57a3b0a1c4ae\" (UID: \"e2bfe003-bf8c-444c-97ea-57a3b0a1c4ae\") " Nov 28 10:41:21 crc kubenswrapper[4838]: I1128 10:41:21.369853 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e2bfe003-bf8c-444c-97ea-57a3b0a1c4ae-ovn-combined-ca-bundle\") pod \"e2bfe003-bf8c-444c-97ea-57a3b0a1c4ae\" (UID: \"e2bfe003-bf8c-444c-97ea-57a3b0a1c4ae\") " Nov 28 10:41:21 crc kubenswrapper[4838]: I1128 10:41:21.369901 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6g57z\" (UniqueName: \"kubernetes.io/projected/e2bfe003-bf8c-444c-97ea-57a3b0a1c4ae-kube-api-access-6g57z\") pod \"e2bfe003-bf8c-444c-97ea-57a3b0a1c4ae\" (UID: \"e2bfe003-bf8c-444c-97ea-57a3b0a1c4ae\") " Nov 28 10:41:21 crc kubenswrapper[4838]: I1128 10:41:21.388703 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e2bfe003-bf8c-444c-97ea-57a3b0a1c4ae-kube-api-access-6g57z" (OuterVolumeSpecName: "kube-api-access-6g57z") pod "e2bfe003-bf8c-444c-97ea-57a3b0a1c4ae" (UID: "e2bfe003-bf8c-444c-97ea-57a3b0a1c4ae"). InnerVolumeSpecName "kube-api-access-6g57z". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:41:21 crc kubenswrapper[4838]: I1128 10:41:21.389091 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e2bfe003-bf8c-444c-97ea-57a3b0a1c4ae-ceph" (OuterVolumeSpecName: "ceph") pod "e2bfe003-bf8c-444c-97ea-57a3b0a1c4ae" (UID: "e2bfe003-bf8c-444c-97ea-57a3b0a1c4ae"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:41:21 crc kubenswrapper[4838]: I1128 10:41:21.389135 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e2bfe003-bf8c-444c-97ea-57a3b0a1c4ae-ovn-combined-ca-bundle" (OuterVolumeSpecName: "ovn-combined-ca-bundle") pod "e2bfe003-bf8c-444c-97ea-57a3b0a1c4ae" (UID: "e2bfe003-bf8c-444c-97ea-57a3b0a1c4ae"). InnerVolumeSpecName "ovn-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:41:21 crc kubenswrapper[4838]: I1128 10:41:21.409375 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e2bfe003-bf8c-444c-97ea-57a3b0a1c4ae-inventory" (OuterVolumeSpecName: "inventory") pod "e2bfe003-bf8c-444c-97ea-57a3b0a1c4ae" (UID: "e2bfe003-bf8c-444c-97ea-57a3b0a1c4ae"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:41:21 crc kubenswrapper[4838]: I1128 10:41:21.414119 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e2bfe003-bf8c-444c-97ea-57a3b0a1c4ae-ovncontroller-config-0" (OuterVolumeSpecName: "ovncontroller-config-0") pod "e2bfe003-bf8c-444c-97ea-57a3b0a1c4ae" (UID: "e2bfe003-bf8c-444c-97ea-57a3b0a1c4ae"). InnerVolumeSpecName "ovncontroller-config-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 10:41:21 crc kubenswrapper[4838]: I1128 10:41:21.419938 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e2bfe003-bf8c-444c-97ea-57a3b0a1c4ae-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "e2bfe003-bf8c-444c-97ea-57a3b0a1c4ae" (UID: "e2bfe003-bf8c-444c-97ea-57a3b0a1c4ae"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:41:21 crc kubenswrapper[4838]: I1128 10:41:21.470996 4838 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e2bfe003-bf8c-444c-97ea-57a3b0a1c4ae-inventory\") on node \"crc\" DevicePath \"\"" Nov 28 10:41:21 crc kubenswrapper[4838]: I1128 10:41:21.471023 4838 reconciler_common.go:293] "Volume detached for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/e2bfe003-bf8c-444c-97ea-57a3b0a1c4ae-ovncontroller-config-0\") on node \"crc\" DevicePath \"\"" Nov 28 10:41:21 crc kubenswrapper[4838]: I1128 10:41:21.471082 4838 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/e2bfe003-bf8c-444c-97ea-57a3b0a1c4ae-ceph\") on node \"crc\" DevicePath \"\"" Nov 28 10:41:21 crc kubenswrapper[4838]: I1128 10:41:21.471092 4838 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/e2bfe003-bf8c-444c-97ea-57a3b0a1c4ae-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 28 10:41:21 crc kubenswrapper[4838]: I1128 10:41:21.471101 4838 reconciler_common.go:293] "Volume detached for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e2bfe003-bf8c-444c-97ea-57a3b0a1c4ae-ovn-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 10:41:21 crc kubenswrapper[4838]: I1128 10:41:21.471109 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6g57z\" (UniqueName: \"kubernetes.io/projected/e2bfe003-bf8c-444c-97ea-57a3b0a1c4ae-kube-api-access-6g57z\") on node \"crc\" DevicePath \"\"" Nov 28 10:41:21 crc kubenswrapper[4838]: I1128 10:41:21.789048 4838 generic.go:334] "Generic (PLEG): container finished" podID="444518cd-8fef-438c-953c-7534997b068d" containerID="0606598a19742f2887baa8bbb20668a8f574db08510fc1d9056bc0083ebd73e4" exitCode=0 Nov 28 10:41:21 crc kubenswrapper[4838]: I1128 10:41:21.789117 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-92tqw" event={"ID":"444518cd-8fef-438c-953c-7534997b068d","Type":"ContainerDied","Data":"0606598a19742f2887baa8bbb20668a8f574db08510fc1d9056bc0083ebd73e4"} Nov 28 10:41:21 crc kubenswrapper[4838]: I1128 10:41:21.793453 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-49cxw" event={"ID":"e2bfe003-bf8c-444c-97ea-57a3b0a1c4ae","Type":"ContainerDied","Data":"50b406c01af22eb8a258643e4009e13d3b21fec3af9c01db0c47383cc86d3451"} Nov 28 10:41:21 crc kubenswrapper[4838]: I1128 10:41:21.793503 4838 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="50b406c01af22eb8a258643e4009e13d3b21fec3af9c01db0c47383cc86d3451" Nov 28 10:41:21 crc kubenswrapper[4838]: I1128 10:41:21.793571 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-49cxw" Nov 28 10:41:21 crc kubenswrapper[4838]: I1128 10:41:21.917324 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-nnxmj"] Nov 28 10:41:21 crc kubenswrapper[4838]: E1128 10:41:21.917776 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e2bfe003-bf8c-444c-97ea-57a3b0a1c4ae" containerName="ovn-edpm-deployment-openstack-edpm-ipam" Nov 28 10:41:21 crc kubenswrapper[4838]: I1128 10:41:21.917799 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="e2bfe003-bf8c-444c-97ea-57a3b0a1c4ae" containerName="ovn-edpm-deployment-openstack-edpm-ipam" Nov 28 10:41:21 crc kubenswrapper[4838]: I1128 10:41:21.918012 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="e2bfe003-bf8c-444c-97ea-57a3b0a1c4ae" containerName="ovn-edpm-deployment-openstack-edpm-ipam" Nov 28 10:41:21 crc kubenswrapper[4838]: I1128 10:41:21.918695 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-nnxmj" Nov 28 10:41:21 crc kubenswrapper[4838]: I1128 10:41:21.926755 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-nnxmj"] Nov 28 10:41:21 crc kubenswrapper[4838]: I1128 10:41:21.928267 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-ovn-metadata-agent-neutron-config" Nov 28 10:41:21 crc kubenswrapper[4838]: I1128 10:41:21.928788 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceph-conf-files" Nov 28 10:41:21 crc kubenswrapper[4838]: I1128 10:41:21.929592 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 28 10:41:21 crc kubenswrapper[4838]: I1128 10:41:21.930775 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 28 10:41:21 crc kubenswrapper[4838]: I1128 10:41:21.931923 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-t6dwn" Nov 28 10:41:21 crc kubenswrapper[4838]: I1128 10:41:21.932057 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 28 10:41:21 crc kubenswrapper[4838]: I1128 10:41:21.932261 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-neutron-config" Nov 28 10:41:21 crc kubenswrapper[4838]: I1128 10:41:21.980538 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/777a7bbd-ba32-4b20-a263-de82be50d3b1-ssh-key\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-nnxmj\" (UID: \"777a7bbd-ba32-4b20-a263-de82be50d3b1\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-nnxmj" Nov 28 10:41:21 crc kubenswrapper[4838]: I1128 10:41:21.980693 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/777a7bbd-ba32-4b20-a263-de82be50d3b1-ceph\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-nnxmj\" (UID: \"777a7bbd-ba32-4b20-a263-de82be50d3b1\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-nnxmj" Nov 28 10:41:21 crc kubenswrapper[4838]: I1128 10:41:21.980847 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/777a7bbd-ba32-4b20-a263-de82be50d3b1-neutron-ovn-metadata-agent-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-nnxmj\" (UID: \"777a7bbd-ba32-4b20-a263-de82be50d3b1\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-nnxmj" Nov 28 10:41:21 crc kubenswrapper[4838]: I1128 10:41:21.981015 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/777a7bbd-ba32-4b20-a263-de82be50d3b1-inventory\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-nnxmj\" (UID: \"777a7bbd-ba32-4b20-a263-de82be50d3b1\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-nnxmj" Nov 28 10:41:21 crc kubenswrapper[4838]: I1128 10:41:21.981119 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/777a7bbd-ba32-4b20-a263-de82be50d3b1-nova-metadata-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-nnxmj\" (UID: \"777a7bbd-ba32-4b20-a263-de82be50d3b1\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-nnxmj" Nov 28 10:41:21 crc kubenswrapper[4838]: I1128 10:41:21.981339 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/777a7bbd-ba32-4b20-a263-de82be50d3b1-neutron-metadata-combined-ca-bundle\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-nnxmj\" (UID: \"777a7bbd-ba32-4b20-a263-de82be50d3b1\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-nnxmj" Nov 28 10:41:21 crc kubenswrapper[4838]: I1128 10:41:21.981544 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kphwj\" (UniqueName: \"kubernetes.io/projected/777a7bbd-ba32-4b20-a263-de82be50d3b1-kube-api-access-kphwj\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-nnxmj\" (UID: \"777a7bbd-ba32-4b20-a263-de82be50d3b1\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-nnxmj" Nov 28 10:41:22 crc kubenswrapper[4838]: I1128 10:41:22.084096 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kphwj\" (UniqueName: \"kubernetes.io/projected/777a7bbd-ba32-4b20-a263-de82be50d3b1-kube-api-access-kphwj\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-nnxmj\" (UID: \"777a7bbd-ba32-4b20-a263-de82be50d3b1\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-nnxmj" Nov 28 10:41:22 crc kubenswrapper[4838]: I1128 10:41:22.084237 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/777a7bbd-ba32-4b20-a263-de82be50d3b1-ssh-key\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-nnxmj\" (UID: \"777a7bbd-ba32-4b20-a263-de82be50d3b1\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-nnxmj" Nov 28 10:41:22 crc kubenswrapper[4838]: I1128 10:41:22.084306 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/777a7bbd-ba32-4b20-a263-de82be50d3b1-ceph\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-nnxmj\" (UID: \"777a7bbd-ba32-4b20-a263-de82be50d3b1\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-nnxmj" Nov 28 10:41:22 crc kubenswrapper[4838]: I1128 10:41:22.084373 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/777a7bbd-ba32-4b20-a263-de82be50d3b1-neutron-ovn-metadata-agent-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-nnxmj\" (UID: \"777a7bbd-ba32-4b20-a263-de82be50d3b1\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-nnxmj" Nov 28 10:41:22 crc kubenswrapper[4838]: I1128 10:41:22.084572 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/777a7bbd-ba32-4b20-a263-de82be50d3b1-inventory\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-nnxmj\" (UID: \"777a7bbd-ba32-4b20-a263-de82be50d3b1\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-nnxmj" Nov 28 10:41:22 crc kubenswrapper[4838]: I1128 10:41:22.085396 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/777a7bbd-ba32-4b20-a263-de82be50d3b1-nova-metadata-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-nnxmj\" (UID: \"777a7bbd-ba32-4b20-a263-de82be50d3b1\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-nnxmj" Nov 28 10:41:22 crc kubenswrapper[4838]: I1128 10:41:22.085513 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/777a7bbd-ba32-4b20-a263-de82be50d3b1-neutron-metadata-combined-ca-bundle\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-nnxmj\" (UID: \"777a7bbd-ba32-4b20-a263-de82be50d3b1\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-nnxmj" Nov 28 10:41:22 crc kubenswrapper[4838]: I1128 10:41:22.089784 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/777a7bbd-ba32-4b20-a263-de82be50d3b1-nova-metadata-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-nnxmj\" (UID: \"777a7bbd-ba32-4b20-a263-de82be50d3b1\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-nnxmj" Nov 28 10:41:22 crc kubenswrapper[4838]: I1128 10:41:22.089986 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/777a7bbd-ba32-4b20-a263-de82be50d3b1-inventory\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-nnxmj\" (UID: \"777a7bbd-ba32-4b20-a263-de82be50d3b1\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-nnxmj" Nov 28 10:41:22 crc kubenswrapper[4838]: I1128 10:41:22.090168 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/777a7bbd-ba32-4b20-a263-de82be50d3b1-ssh-key\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-nnxmj\" (UID: \"777a7bbd-ba32-4b20-a263-de82be50d3b1\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-nnxmj" Nov 28 10:41:22 crc kubenswrapper[4838]: I1128 10:41:22.091661 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/777a7bbd-ba32-4b20-a263-de82be50d3b1-ceph\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-nnxmj\" (UID: \"777a7bbd-ba32-4b20-a263-de82be50d3b1\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-nnxmj" Nov 28 10:41:22 crc kubenswrapper[4838]: I1128 10:41:22.093188 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/777a7bbd-ba32-4b20-a263-de82be50d3b1-neutron-metadata-combined-ca-bundle\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-nnxmj\" (UID: \"777a7bbd-ba32-4b20-a263-de82be50d3b1\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-nnxmj" Nov 28 10:41:22 crc kubenswrapper[4838]: I1128 10:41:22.093335 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/777a7bbd-ba32-4b20-a263-de82be50d3b1-neutron-ovn-metadata-agent-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-nnxmj\" (UID: \"777a7bbd-ba32-4b20-a263-de82be50d3b1\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-nnxmj" Nov 28 10:41:22 crc kubenswrapper[4838]: I1128 10:41:22.122378 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kphwj\" (UniqueName: \"kubernetes.io/projected/777a7bbd-ba32-4b20-a263-de82be50d3b1-kube-api-access-kphwj\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-nnxmj\" (UID: \"777a7bbd-ba32-4b20-a263-de82be50d3b1\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-nnxmj" Nov 28 10:41:22 crc kubenswrapper[4838]: I1128 10:41:22.237821 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-nnxmj" Nov 28 10:41:22 crc kubenswrapper[4838]: I1128 10:41:22.591549 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-nnxmj"] Nov 28 10:41:22 crc kubenswrapper[4838]: W1128 10:41:22.596538 4838 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod777a7bbd_ba32_4b20_a263_de82be50d3b1.slice/crio-e646679dfe0714e3a520c6ea24854930f77718b610a8bef67ab609a98b341b18 WatchSource:0}: Error finding container e646679dfe0714e3a520c6ea24854930f77718b610a8bef67ab609a98b341b18: Status 404 returned error can't find the container with id e646679dfe0714e3a520c6ea24854930f77718b610a8bef67ab609a98b341b18 Nov 28 10:41:22 crc kubenswrapper[4838]: I1128 10:41:22.807325 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-nnxmj" event={"ID":"777a7bbd-ba32-4b20-a263-de82be50d3b1","Type":"ContainerStarted","Data":"e646679dfe0714e3a520c6ea24854930f77718b610a8bef67ab609a98b341b18"} Nov 28 10:41:22 crc kubenswrapper[4838]: I1128 10:41:22.810541 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-92tqw" event={"ID":"444518cd-8fef-438c-953c-7534997b068d","Type":"ContainerStarted","Data":"74a07a8124f2a14aa7219f2dd5e1d072f8f4b994ef71356c3bd8948f96b3d3c3"} Nov 28 10:41:22 crc kubenswrapper[4838]: I1128 10:41:22.843247 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-92tqw" podStartSLOduration=3.32750223 podStartE2EDuration="5.843207574s" podCreationTimestamp="2025-11-28 10:41:17 +0000 UTC" firstStartedPulling="2025-11-28 10:41:19.76369388 +0000 UTC m=+2651.462668080" lastFinishedPulling="2025-11-28 10:41:22.279399264 +0000 UTC m=+2653.978373424" observedRunningTime="2025-11-28 10:41:22.833459559 +0000 UTC m=+2654.532433729" watchObservedRunningTime="2025-11-28 10:41:22.843207574 +0000 UTC m=+2654.542181784" Nov 28 10:41:23 crc kubenswrapper[4838]: I1128 10:41:23.825695 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-nnxmj" event={"ID":"777a7bbd-ba32-4b20-a263-de82be50d3b1","Type":"ContainerStarted","Data":"48ac46e6eeee2e7f92a98d85132dbb7c5c3e983e02c8535b7be302fda58cdec7"} Nov 28 10:41:23 crc kubenswrapper[4838]: I1128 10:41:23.855709 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-nnxmj" podStartSLOduration=2.127159954 podStartE2EDuration="2.855680953s" podCreationTimestamp="2025-11-28 10:41:21 +0000 UTC" firstStartedPulling="2025-11-28 10:41:22.598898771 +0000 UTC m=+2654.297872941" lastFinishedPulling="2025-11-28 10:41:23.32741977 +0000 UTC m=+2655.026393940" observedRunningTime="2025-11-28 10:41:23.848633992 +0000 UTC m=+2655.547608232" watchObservedRunningTime="2025-11-28 10:41:23.855680953 +0000 UTC m=+2655.554655153" Nov 28 10:41:28 crc kubenswrapper[4838]: I1128 10:41:28.326074 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-92tqw" Nov 28 10:41:28 crc kubenswrapper[4838]: I1128 10:41:28.326816 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-92tqw" Nov 28 10:41:29 crc kubenswrapper[4838]: I1128 10:41:29.418589 4838 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-92tqw" podUID="444518cd-8fef-438c-953c-7534997b068d" containerName="registry-server" probeResult="failure" output=< Nov 28 10:41:29 crc kubenswrapper[4838]: timeout: failed to connect service ":50051" within 1s Nov 28 10:41:29 crc kubenswrapper[4838]: > Nov 28 10:41:30 crc kubenswrapper[4838]: I1128 10:41:30.563375 4838 scope.go:117] "RemoveContainer" containerID="eb7c3a5fb0c9835207f14cd442a9c164c2c7763a8bc95f818d99864ca09179f7" Nov 28 10:41:30 crc kubenswrapper[4838]: E1128 10:41:30.564094 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-5dxdd_openshift-machine-config-operator(5c3daa53-8c4e-4e30-aeba-146602dd45cd)\"" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" Nov 28 10:41:38 crc kubenswrapper[4838]: I1128 10:41:38.387529 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-92tqw" Nov 28 10:41:38 crc kubenswrapper[4838]: I1128 10:41:38.471415 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-92tqw" Nov 28 10:41:38 crc kubenswrapper[4838]: I1128 10:41:38.637008 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-92tqw"] Nov 28 10:41:40 crc kubenswrapper[4838]: I1128 10:41:39.999977 4838 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-92tqw" podUID="444518cd-8fef-438c-953c-7534997b068d" containerName="registry-server" containerID="cri-o://74a07a8124f2a14aa7219f2dd5e1d072f8f4b994ef71356c3bd8948f96b3d3c3" gracePeriod=2 Nov 28 10:41:40 crc kubenswrapper[4838]: I1128 10:41:40.562535 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-92tqw" Nov 28 10:41:40 crc kubenswrapper[4838]: I1128 10:41:40.627043 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/444518cd-8fef-438c-953c-7534997b068d-catalog-content\") pod \"444518cd-8fef-438c-953c-7534997b068d\" (UID: \"444518cd-8fef-438c-953c-7534997b068d\") " Nov 28 10:41:40 crc kubenswrapper[4838]: I1128 10:41:40.627214 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bs99t\" (UniqueName: \"kubernetes.io/projected/444518cd-8fef-438c-953c-7534997b068d-kube-api-access-bs99t\") pod \"444518cd-8fef-438c-953c-7534997b068d\" (UID: \"444518cd-8fef-438c-953c-7534997b068d\") " Nov 28 10:41:40 crc kubenswrapper[4838]: I1128 10:41:40.627318 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/444518cd-8fef-438c-953c-7534997b068d-utilities\") pod \"444518cd-8fef-438c-953c-7534997b068d\" (UID: \"444518cd-8fef-438c-953c-7534997b068d\") " Nov 28 10:41:40 crc kubenswrapper[4838]: I1128 10:41:40.628894 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/444518cd-8fef-438c-953c-7534997b068d-utilities" (OuterVolumeSpecName: "utilities") pod "444518cd-8fef-438c-953c-7534997b068d" (UID: "444518cd-8fef-438c-953c-7534997b068d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 10:41:40 crc kubenswrapper[4838]: I1128 10:41:40.637212 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/444518cd-8fef-438c-953c-7534997b068d-kube-api-access-bs99t" (OuterVolumeSpecName: "kube-api-access-bs99t") pod "444518cd-8fef-438c-953c-7534997b068d" (UID: "444518cd-8fef-438c-953c-7534997b068d"). InnerVolumeSpecName "kube-api-access-bs99t". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:41:40 crc kubenswrapper[4838]: I1128 10:41:40.729842 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bs99t\" (UniqueName: \"kubernetes.io/projected/444518cd-8fef-438c-953c-7534997b068d-kube-api-access-bs99t\") on node \"crc\" DevicePath \"\"" Nov 28 10:41:40 crc kubenswrapper[4838]: I1128 10:41:40.729898 4838 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/444518cd-8fef-438c-953c-7534997b068d-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 10:41:40 crc kubenswrapper[4838]: I1128 10:41:40.769659 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/444518cd-8fef-438c-953c-7534997b068d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "444518cd-8fef-438c-953c-7534997b068d" (UID: "444518cd-8fef-438c-953c-7534997b068d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 10:41:40 crc kubenswrapper[4838]: I1128 10:41:40.832148 4838 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/444518cd-8fef-438c-953c-7534997b068d-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 10:41:41 crc kubenswrapper[4838]: I1128 10:41:41.015879 4838 generic.go:334] "Generic (PLEG): container finished" podID="444518cd-8fef-438c-953c-7534997b068d" containerID="74a07a8124f2a14aa7219f2dd5e1d072f8f4b994ef71356c3bd8948f96b3d3c3" exitCode=0 Nov 28 10:41:41 crc kubenswrapper[4838]: I1128 10:41:41.015940 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-92tqw" event={"ID":"444518cd-8fef-438c-953c-7534997b068d","Type":"ContainerDied","Data":"74a07a8124f2a14aa7219f2dd5e1d072f8f4b994ef71356c3bd8948f96b3d3c3"} Nov 28 10:41:41 crc kubenswrapper[4838]: I1128 10:41:41.015981 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-92tqw" event={"ID":"444518cd-8fef-438c-953c-7534997b068d","Type":"ContainerDied","Data":"0f62e90eb3880a6f557fe6ffd5f4385c58b0e5351354345c5ca6e122bc52aff6"} Nov 28 10:41:41 crc kubenswrapper[4838]: I1128 10:41:41.016012 4838 scope.go:117] "RemoveContainer" containerID="74a07a8124f2a14aa7219f2dd5e1d072f8f4b994ef71356c3bd8948f96b3d3c3" Nov 28 10:41:41 crc kubenswrapper[4838]: I1128 10:41:41.016197 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-92tqw" Nov 28 10:41:41 crc kubenswrapper[4838]: I1128 10:41:41.048194 4838 scope.go:117] "RemoveContainer" containerID="0606598a19742f2887baa8bbb20668a8f574db08510fc1d9056bc0083ebd73e4" Nov 28 10:41:41 crc kubenswrapper[4838]: I1128 10:41:41.073338 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-92tqw"] Nov 28 10:41:41 crc kubenswrapper[4838]: I1128 10:41:41.088832 4838 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-92tqw"] Nov 28 10:41:41 crc kubenswrapper[4838]: I1128 10:41:41.091532 4838 scope.go:117] "RemoveContainer" containerID="15e2304722c47e26521ce3298d17e50a2440961b0dda5f7d9b8305b43fdd806c" Nov 28 10:41:41 crc kubenswrapper[4838]: I1128 10:41:41.134186 4838 scope.go:117] "RemoveContainer" containerID="74a07a8124f2a14aa7219f2dd5e1d072f8f4b994ef71356c3bd8948f96b3d3c3" Nov 28 10:41:41 crc kubenswrapper[4838]: E1128 10:41:41.135160 4838 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"74a07a8124f2a14aa7219f2dd5e1d072f8f4b994ef71356c3bd8948f96b3d3c3\": container with ID starting with 74a07a8124f2a14aa7219f2dd5e1d072f8f4b994ef71356c3bd8948f96b3d3c3 not found: ID does not exist" containerID="74a07a8124f2a14aa7219f2dd5e1d072f8f4b994ef71356c3bd8948f96b3d3c3" Nov 28 10:41:41 crc kubenswrapper[4838]: I1128 10:41:41.135203 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"74a07a8124f2a14aa7219f2dd5e1d072f8f4b994ef71356c3bd8948f96b3d3c3"} err="failed to get container status \"74a07a8124f2a14aa7219f2dd5e1d072f8f4b994ef71356c3bd8948f96b3d3c3\": rpc error: code = NotFound desc = could not find container \"74a07a8124f2a14aa7219f2dd5e1d072f8f4b994ef71356c3bd8948f96b3d3c3\": container with ID starting with 74a07a8124f2a14aa7219f2dd5e1d072f8f4b994ef71356c3bd8948f96b3d3c3 not found: ID does not exist" Nov 28 10:41:41 crc kubenswrapper[4838]: I1128 10:41:41.135230 4838 scope.go:117] "RemoveContainer" containerID="0606598a19742f2887baa8bbb20668a8f574db08510fc1d9056bc0083ebd73e4" Nov 28 10:41:41 crc kubenswrapper[4838]: E1128 10:41:41.135731 4838 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0606598a19742f2887baa8bbb20668a8f574db08510fc1d9056bc0083ebd73e4\": container with ID starting with 0606598a19742f2887baa8bbb20668a8f574db08510fc1d9056bc0083ebd73e4 not found: ID does not exist" containerID="0606598a19742f2887baa8bbb20668a8f574db08510fc1d9056bc0083ebd73e4" Nov 28 10:41:41 crc kubenswrapper[4838]: I1128 10:41:41.135757 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0606598a19742f2887baa8bbb20668a8f574db08510fc1d9056bc0083ebd73e4"} err="failed to get container status \"0606598a19742f2887baa8bbb20668a8f574db08510fc1d9056bc0083ebd73e4\": rpc error: code = NotFound desc = could not find container \"0606598a19742f2887baa8bbb20668a8f574db08510fc1d9056bc0083ebd73e4\": container with ID starting with 0606598a19742f2887baa8bbb20668a8f574db08510fc1d9056bc0083ebd73e4 not found: ID does not exist" Nov 28 10:41:41 crc kubenswrapper[4838]: I1128 10:41:41.135771 4838 scope.go:117] "RemoveContainer" containerID="15e2304722c47e26521ce3298d17e50a2440961b0dda5f7d9b8305b43fdd806c" Nov 28 10:41:41 crc kubenswrapper[4838]: E1128 10:41:41.136087 4838 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"15e2304722c47e26521ce3298d17e50a2440961b0dda5f7d9b8305b43fdd806c\": container with ID starting with 15e2304722c47e26521ce3298d17e50a2440961b0dda5f7d9b8305b43fdd806c not found: ID does not exist" containerID="15e2304722c47e26521ce3298d17e50a2440961b0dda5f7d9b8305b43fdd806c" Nov 28 10:41:41 crc kubenswrapper[4838]: I1128 10:41:41.136115 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"15e2304722c47e26521ce3298d17e50a2440961b0dda5f7d9b8305b43fdd806c"} err="failed to get container status \"15e2304722c47e26521ce3298d17e50a2440961b0dda5f7d9b8305b43fdd806c\": rpc error: code = NotFound desc = could not find container \"15e2304722c47e26521ce3298d17e50a2440961b0dda5f7d9b8305b43fdd806c\": container with ID starting with 15e2304722c47e26521ce3298d17e50a2440961b0dda5f7d9b8305b43fdd806c not found: ID does not exist" Nov 28 10:41:42 crc kubenswrapper[4838]: I1128 10:41:42.562630 4838 scope.go:117] "RemoveContainer" containerID="eb7c3a5fb0c9835207f14cd442a9c164c2c7763a8bc95f818d99864ca09179f7" Nov 28 10:41:42 crc kubenswrapper[4838]: E1128 10:41:42.566240 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-5dxdd_openshift-machine-config-operator(5c3daa53-8c4e-4e30-aeba-146602dd45cd)\"" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" Nov 28 10:41:42 crc kubenswrapper[4838]: I1128 10:41:42.577185 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="444518cd-8fef-438c-953c-7534997b068d" path="/var/lib/kubelet/pods/444518cd-8fef-438c-953c-7534997b068d/volumes" Nov 28 10:41:54 crc kubenswrapper[4838]: I1128 10:41:54.563104 4838 scope.go:117] "RemoveContainer" containerID="eb7c3a5fb0c9835207f14cd442a9c164c2c7763a8bc95f818d99864ca09179f7" Nov 28 10:41:54 crc kubenswrapper[4838]: E1128 10:41:54.564258 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-5dxdd_openshift-machine-config-operator(5c3daa53-8c4e-4e30-aeba-146602dd45cd)\"" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" Nov 28 10:42:08 crc kubenswrapper[4838]: I1128 10:42:08.563532 4838 scope.go:117] "RemoveContainer" containerID="eb7c3a5fb0c9835207f14cd442a9c164c2c7763a8bc95f818d99864ca09179f7" Nov 28 10:42:08 crc kubenswrapper[4838]: E1128 10:42:08.568459 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-5dxdd_openshift-machine-config-operator(5c3daa53-8c4e-4e30-aeba-146602dd45cd)\"" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" Nov 28 10:42:23 crc kubenswrapper[4838]: I1128 10:42:23.562583 4838 scope.go:117] "RemoveContainer" containerID="eb7c3a5fb0c9835207f14cd442a9c164c2c7763a8bc95f818d99864ca09179f7" Nov 28 10:42:23 crc kubenswrapper[4838]: E1128 10:42:23.563858 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-5dxdd_openshift-machine-config-operator(5c3daa53-8c4e-4e30-aeba-146602dd45cd)\"" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" Nov 28 10:42:34 crc kubenswrapper[4838]: I1128 10:42:34.596160 4838 generic.go:334] "Generic (PLEG): container finished" podID="777a7bbd-ba32-4b20-a263-de82be50d3b1" containerID="48ac46e6eeee2e7f92a98d85132dbb7c5c3e983e02c8535b7be302fda58cdec7" exitCode=0 Nov 28 10:42:34 crc kubenswrapper[4838]: I1128 10:42:34.596268 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-nnxmj" event={"ID":"777a7bbd-ba32-4b20-a263-de82be50d3b1","Type":"ContainerDied","Data":"48ac46e6eeee2e7f92a98d85132dbb7c5c3e983e02c8535b7be302fda58cdec7"} Nov 28 10:42:36 crc kubenswrapper[4838]: I1128 10:42:36.071114 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-nnxmj" Nov 28 10:42:36 crc kubenswrapper[4838]: I1128 10:42:36.255020 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/777a7bbd-ba32-4b20-a263-de82be50d3b1-neutron-ovn-metadata-agent-neutron-config-0\") pod \"777a7bbd-ba32-4b20-a263-de82be50d3b1\" (UID: \"777a7bbd-ba32-4b20-a263-de82be50d3b1\") " Nov 28 10:42:36 crc kubenswrapper[4838]: I1128 10:42:36.255840 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kphwj\" (UniqueName: \"kubernetes.io/projected/777a7bbd-ba32-4b20-a263-de82be50d3b1-kube-api-access-kphwj\") pod \"777a7bbd-ba32-4b20-a263-de82be50d3b1\" (UID: \"777a7bbd-ba32-4b20-a263-de82be50d3b1\") " Nov 28 10:42:36 crc kubenswrapper[4838]: I1128 10:42:36.255958 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/777a7bbd-ba32-4b20-a263-de82be50d3b1-ssh-key\") pod \"777a7bbd-ba32-4b20-a263-de82be50d3b1\" (UID: \"777a7bbd-ba32-4b20-a263-de82be50d3b1\") " Nov 28 10:42:36 crc kubenswrapper[4838]: I1128 10:42:36.256093 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/777a7bbd-ba32-4b20-a263-de82be50d3b1-neutron-metadata-combined-ca-bundle\") pod \"777a7bbd-ba32-4b20-a263-de82be50d3b1\" (UID: \"777a7bbd-ba32-4b20-a263-de82be50d3b1\") " Nov 28 10:42:36 crc kubenswrapper[4838]: I1128 10:42:36.256470 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/777a7bbd-ba32-4b20-a263-de82be50d3b1-nova-metadata-neutron-config-0\") pod \"777a7bbd-ba32-4b20-a263-de82be50d3b1\" (UID: \"777a7bbd-ba32-4b20-a263-de82be50d3b1\") " Nov 28 10:42:36 crc kubenswrapper[4838]: I1128 10:42:36.256648 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/777a7bbd-ba32-4b20-a263-de82be50d3b1-ceph\") pod \"777a7bbd-ba32-4b20-a263-de82be50d3b1\" (UID: \"777a7bbd-ba32-4b20-a263-de82be50d3b1\") " Nov 28 10:42:36 crc kubenswrapper[4838]: I1128 10:42:36.256694 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/777a7bbd-ba32-4b20-a263-de82be50d3b1-inventory\") pod \"777a7bbd-ba32-4b20-a263-de82be50d3b1\" (UID: \"777a7bbd-ba32-4b20-a263-de82be50d3b1\") " Nov 28 10:42:36 crc kubenswrapper[4838]: I1128 10:42:36.262454 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/777a7bbd-ba32-4b20-a263-de82be50d3b1-ceph" (OuterVolumeSpecName: "ceph") pod "777a7bbd-ba32-4b20-a263-de82be50d3b1" (UID: "777a7bbd-ba32-4b20-a263-de82be50d3b1"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:42:36 crc kubenswrapper[4838]: I1128 10:42:36.262810 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/777a7bbd-ba32-4b20-a263-de82be50d3b1-kube-api-access-kphwj" (OuterVolumeSpecName: "kube-api-access-kphwj") pod "777a7bbd-ba32-4b20-a263-de82be50d3b1" (UID: "777a7bbd-ba32-4b20-a263-de82be50d3b1"). InnerVolumeSpecName "kube-api-access-kphwj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:42:36 crc kubenswrapper[4838]: I1128 10:42:36.264786 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/777a7bbd-ba32-4b20-a263-de82be50d3b1-neutron-metadata-combined-ca-bundle" (OuterVolumeSpecName: "neutron-metadata-combined-ca-bundle") pod "777a7bbd-ba32-4b20-a263-de82be50d3b1" (UID: "777a7bbd-ba32-4b20-a263-de82be50d3b1"). InnerVolumeSpecName "neutron-metadata-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:42:36 crc kubenswrapper[4838]: I1128 10:42:36.282888 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/777a7bbd-ba32-4b20-a263-de82be50d3b1-nova-metadata-neutron-config-0" (OuterVolumeSpecName: "nova-metadata-neutron-config-0") pod "777a7bbd-ba32-4b20-a263-de82be50d3b1" (UID: "777a7bbd-ba32-4b20-a263-de82be50d3b1"). InnerVolumeSpecName "nova-metadata-neutron-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:42:36 crc kubenswrapper[4838]: I1128 10:42:36.283820 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/777a7bbd-ba32-4b20-a263-de82be50d3b1-inventory" (OuterVolumeSpecName: "inventory") pod "777a7bbd-ba32-4b20-a263-de82be50d3b1" (UID: "777a7bbd-ba32-4b20-a263-de82be50d3b1"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:42:36 crc kubenswrapper[4838]: I1128 10:42:36.294181 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/777a7bbd-ba32-4b20-a263-de82be50d3b1-neutron-ovn-metadata-agent-neutron-config-0" (OuterVolumeSpecName: "neutron-ovn-metadata-agent-neutron-config-0") pod "777a7bbd-ba32-4b20-a263-de82be50d3b1" (UID: "777a7bbd-ba32-4b20-a263-de82be50d3b1"). InnerVolumeSpecName "neutron-ovn-metadata-agent-neutron-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:42:36 crc kubenswrapper[4838]: I1128 10:42:36.306439 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/777a7bbd-ba32-4b20-a263-de82be50d3b1-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "777a7bbd-ba32-4b20-a263-de82be50d3b1" (UID: "777a7bbd-ba32-4b20-a263-de82be50d3b1"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:42:36 crc kubenswrapper[4838]: I1128 10:42:36.359925 4838 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/777a7bbd-ba32-4b20-a263-de82be50d3b1-ceph\") on node \"crc\" DevicePath \"\"" Nov 28 10:42:36 crc kubenswrapper[4838]: I1128 10:42:36.359958 4838 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/777a7bbd-ba32-4b20-a263-de82be50d3b1-inventory\") on node \"crc\" DevicePath \"\"" Nov 28 10:42:36 crc kubenswrapper[4838]: I1128 10:42:36.359971 4838 reconciler_common.go:293] "Volume detached for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/777a7bbd-ba32-4b20-a263-de82be50d3b1-neutron-ovn-metadata-agent-neutron-config-0\") on node \"crc\" DevicePath \"\"" Nov 28 10:42:36 crc kubenswrapper[4838]: I1128 10:42:36.359982 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kphwj\" (UniqueName: \"kubernetes.io/projected/777a7bbd-ba32-4b20-a263-de82be50d3b1-kube-api-access-kphwj\") on node \"crc\" DevicePath \"\"" Nov 28 10:42:36 crc kubenswrapper[4838]: I1128 10:42:36.359991 4838 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/777a7bbd-ba32-4b20-a263-de82be50d3b1-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 28 10:42:36 crc kubenswrapper[4838]: I1128 10:42:36.360000 4838 reconciler_common.go:293] "Volume detached for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/777a7bbd-ba32-4b20-a263-de82be50d3b1-neutron-metadata-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 10:42:36 crc kubenswrapper[4838]: I1128 10:42:36.360009 4838 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/777a7bbd-ba32-4b20-a263-de82be50d3b1-nova-metadata-neutron-config-0\") on node \"crc\" DevicePath \"\"" Nov 28 10:42:36 crc kubenswrapper[4838]: I1128 10:42:36.621071 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-nnxmj" event={"ID":"777a7bbd-ba32-4b20-a263-de82be50d3b1","Type":"ContainerDied","Data":"e646679dfe0714e3a520c6ea24854930f77718b610a8bef67ab609a98b341b18"} Nov 28 10:42:36 crc kubenswrapper[4838]: I1128 10:42:36.621128 4838 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e646679dfe0714e3a520c6ea24854930f77718b610a8bef67ab609a98b341b18" Nov 28 10:42:36 crc kubenswrapper[4838]: I1128 10:42:36.621209 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-nnxmj" Nov 28 10:42:36 crc kubenswrapper[4838]: I1128 10:42:36.750568 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/libvirt-edpm-deployment-openstack-edpm-ipam-8958p"] Nov 28 10:42:36 crc kubenswrapper[4838]: E1128 10:42:36.751070 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="777a7bbd-ba32-4b20-a263-de82be50d3b1" containerName="neutron-metadata-edpm-deployment-openstack-edpm-ipam" Nov 28 10:42:36 crc kubenswrapper[4838]: I1128 10:42:36.751087 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="777a7bbd-ba32-4b20-a263-de82be50d3b1" containerName="neutron-metadata-edpm-deployment-openstack-edpm-ipam" Nov 28 10:42:36 crc kubenswrapper[4838]: E1128 10:42:36.751103 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="444518cd-8fef-438c-953c-7534997b068d" containerName="extract-utilities" Nov 28 10:42:36 crc kubenswrapper[4838]: I1128 10:42:36.751111 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="444518cd-8fef-438c-953c-7534997b068d" containerName="extract-utilities" Nov 28 10:42:36 crc kubenswrapper[4838]: E1128 10:42:36.751129 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="444518cd-8fef-438c-953c-7534997b068d" containerName="extract-content" Nov 28 10:42:36 crc kubenswrapper[4838]: I1128 10:42:36.751135 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="444518cd-8fef-438c-953c-7534997b068d" containerName="extract-content" Nov 28 10:42:36 crc kubenswrapper[4838]: E1128 10:42:36.751160 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="444518cd-8fef-438c-953c-7534997b068d" containerName="registry-server" Nov 28 10:42:36 crc kubenswrapper[4838]: I1128 10:42:36.751165 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="444518cd-8fef-438c-953c-7534997b068d" containerName="registry-server" Nov 28 10:42:36 crc kubenswrapper[4838]: I1128 10:42:36.751310 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="444518cd-8fef-438c-953c-7534997b068d" containerName="registry-server" Nov 28 10:42:36 crc kubenswrapper[4838]: I1128 10:42:36.751324 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="777a7bbd-ba32-4b20-a263-de82be50d3b1" containerName="neutron-metadata-edpm-deployment-openstack-edpm-ipam" Nov 28 10:42:36 crc kubenswrapper[4838]: I1128 10:42:36.752028 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-8958p" Nov 28 10:42:36 crc kubenswrapper[4838]: I1128 10:42:36.755696 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-t6dwn" Nov 28 10:42:36 crc kubenswrapper[4838]: I1128 10:42:36.755781 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"libvirt-secret" Nov 28 10:42:36 crc kubenswrapper[4838]: I1128 10:42:36.755812 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 28 10:42:36 crc kubenswrapper[4838]: I1128 10:42:36.755921 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 28 10:42:36 crc kubenswrapper[4838]: I1128 10:42:36.756134 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 28 10:42:36 crc kubenswrapper[4838]: I1128 10:42:36.758524 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceph-conf-files" Nov 28 10:42:36 crc kubenswrapper[4838]: I1128 10:42:36.768330 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/libvirt-edpm-deployment-openstack-edpm-ipam-8958p"] Nov 28 10:42:36 crc kubenswrapper[4838]: I1128 10:42:36.871221 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/d861b650-a017-43fc-8da3-b65d8f9e8ce8-libvirt-secret-0\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-8958p\" (UID: \"d861b650-a017-43fc-8da3-b65d8f9e8ce8\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-8958p" Nov 28 10:42:36 crc kubenswrapper[4838]: I1128 10:42:36.871784 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/d861b650-a017-43fc-8da3-b65d8f9e8ce8-ceph\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-8958p\" (UID: \"d861b650-a017-43fc-8da3-b65d8f9e8ce8\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-8958p" Nov 28 10:42:36 crc kubenswrapper[4838]: I1128 10:42:36.872304 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kr42l\" (UniqueName: \"kubernetes.io/projected/d861b650-a017-43fc-8da3-b65d8f9e8ce8-kube-api-access-kr42l\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-8958p\" (UID: \"d861b650-a017-43fc-8da3-b65d8f9e8ce8\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-8958p" Nov 28 10:42:36 crc kubenswrapper[4838]: I1128 10:42:36.872510 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d861b650-a017-43fc-8da3-b65d8f9e8ce8-ssh-key\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-8958p\" (UID: \"d861b650-a017-43fc-8da3-b65d8f9e8ce8\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-8958p" Nov 28 10:42:36 crc kubenswrapper[4838]: I1128 10:42:36.872695 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d861b650-a017-43fc-8da3-b65d8f9e8ce8-inventory\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-8958p\" (UID: \"d861b650-a017-43fc-8da3-b65d8f9e8ce8\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-8958p" Nov 28 10:42:36 crc kubenswrapper[4838]: I1128 10:42:36.873036 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d861b650-a017-43fc-8da3-b65d8f9e8ce8-libvirt-combined-ca-bundle\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-8958p\" (UID: \"d861b650-a017-43fc-8da3-b65d8f9e8ce8\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-8958p" Nov 28 10:42:36 crc kubenswrapper[4838]: I1128 10:42:36.975544 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/d861b650-a017-43fc-8da3-b65d8f9e8ce8-libvirt-secret-0\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-8958p\" (UID: \"d861b650-a017-43fc-8da3-b65d8f9e8ce8\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-8958p" Nov 28 10:42:36 crc kubenswrapper[4838]: I1128 10:42:36.975609 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/d861b650-a017-43fc-8da3-b65d8f9e8ce8-ceph\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-8958p\" (UID: \"d861b650-a017-43fc-8da3-b65d8f9e8ce8\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-8958p" Nov 28 10:42:36 crc kubenswrapper[4838]: I1128 10:42:36.975673 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kr42l\" (UniqueName: \"kubernetes.io/projected/d861b650-a017-43fc-8da3-b65d8f9e8ce8-kube-api-access-kr42l\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-8958p\" (UID: \"d861b650-a017-43fc-8da3-b65d8f9e8ce8\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-8958p" Nov 28 10:42:36 crc kubenswrapper[4838]: I1128 10:42:36.975703 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d861b650-a017-43fc-8da3-b65d8f9e8ce8-ssh-key\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-8958p\" (UID: \"d861b650-a017-43fc-8da3-b65d8f9e8ce8\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-8958p" Nov 28 10:42:36 crc kubenswrapper[4838]: I1128 10:42:36.975804 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d861b650-a017-43fc-8da3-b65d8f9e8ce8-inventory\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-8958p\" (UID: \"d861b650-a017-43fc-8da3-b65d8f9e8ce8\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-8958p" Nov 28 10:42:36 crc kubenswrapper[4838]: I1128 10:42:36.976265 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d861b650-a017-43fc-8da3-b65d8f9e8ce8-libvirt-combined-ca-bundle\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-8958p\" (UID: \"d861b650-a017-43fc-8da3-b65d8f9e8ce8\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-8958p" Nov 28 10:42:36 crc kubenswrapper[4838]: I1128 10:42:36.979391 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/d861b650-a017-43fc-8da3-b65d8f9e8ce8-libvirt-secret-0\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-8958p\" (UID: \"d861b650-a017-43fc-8da3-b65d8f9e8ce8\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-8958p" Nov 28 10:42:36 crc kubenswrapper[4838]: I1128 10:42:36.979753 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/d861b650-a017-43fc-8da3-b65d8f9e8ce8-ceph\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-8958p\" (UID: \"d861b650-a017-43fc-8da3-b65d8f9e8ce8\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-8958p" Nov 28 10:42:36 crc kubenswrapper[4838]: I1128 10:42:36.980549 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d861b650-a017-43fc-8da3-b65d8f9e8ce8-ssh-key\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-8958p\" (UID: \"d861b650-a017-43fc-8da3-b65d8f9e8ce8\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-8958p" Nov 28 10:42:36 crc kubenswrapper[4838]: I1128 10:42:36.982602 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d861b650-a017-43fc-8da3-b65d8f9e8ce8-libvirt-combined-ca-bundle\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-8958p\" (UID: \"d861b650-a017-43fc-8da3-b65d8f9e8ce8\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-8958p" Nov 28 10:42:36 crc kubenswrapper[4838]: I1128 10:42:36.986520 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d861b650-a017-43fc-8da3-b65d8f9e8ce8-inventory\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-8958p\" (UID: \"d861b650-a017-43fc-8da3-b65d8f9e8ce8\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-8958p" Nov 28 10:42:36 crc kubenswrapper[4838]: I1128 10:42:36.993265 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kr42l\" (UniqueName: \"kubernetes.io/projected/d861b650-a017-43fc-8da3-b65d8f9e8ce8-kube-api-access-kr42l\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-8958p\" (UID: \"d861b650-a017-43fc-8da3-b65d8f9e8ce8\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-8958p" Nov 28 10:42:37 crc kubenswrapper[4838]: I1128 10:42:37.080419 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-8958p" Nov 28 10:42:37 crc kubenswrapper[4838]: I1128 10:42:37.471441 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/libvirt-edpm-deployment-openstack-edpm-ipam-8958p"] Nov 28 10:42:37 crc kubenswrapper[4838]: I1128 10:42:37.562930 4838 scope.go:117] "RemoveContainer" containerID="eb7c3a5fb0c9835207f14cd442a9c164c2c7763a8bc95f818d99864ca09179f7" Nov 28 10:42:37 crc kubenswrapper[4838]: E1128 10:42:37.563304 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-5dxdd_openshift-machine-config-operator(5c3daa53-8c4e-4e30-aeba-146602dd45cd)\"" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" Nov 28 10:42:37 crc kubenswrapper[4838]: I1128 10:42:37.631374 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-8958p" event={"ID":"d861b650-a017-43fc-8da3-b65d8f9e8ce8","Type":"ContainerStarted","Data":"e95d01f1e810b4fc99a2682971c4bce11be8d60569532b0dbe3a6836f53c3896"} Nov 28 10:42:38 crc kubenswrapper[4838]: I1128 10:42:38.643929 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-8958p" event={"ID":"d861b650-a017-43fc-8da3-b65d8f9e8ce8","Type":"ContainerStarted","Data":"594517f1ab7c7c66ad576c9682b5e48c1e2a57fad013ad72bc60205349f7f88d"} Nov 28 10:42:38 crc kubenswrapper[4838]: I1128 10:42:38.682481 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-8958p" podStartSLOduration=2.112768101 podStartE2EDuration="2.68245213s" podCreationTimestamp="2025-11-28 10:42:36 +0000 UTC" firstStartedPulling="2025-11-28 10:42:37.48016893 +0000 UTC m=+2729.179143110" lastFinishedPulling="2025-11-28 10:42:38.049852929 +0000 UTC m=+2729.748827139" observedRunningTime="2025-11-28 10:42:38.664153582 +0000 UTC m=+2730.363127752" watchObservedRunningTime="2025-11-28 10:42:38.68245213 +0000 UTC m=+2730.381426340" Nov 28 10:42:52 crc kubenswrapper[4838]: I1128 10:42:52.562503 4838 scope.go:117] "RemoveContainer" containerID="eb7c3a5fb0c9835207f14cd442a9c164c2c7763a8bc95f818d99864ca09179f7" Nov 28 10:42:52 crc kubenswrapper[4838]: E1128 10:42:52.563593 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-5dxdd_openshift-machine-config-operator(5c3daa53-8c4e-4e30-aeba-146602dd45cd)\"" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" Nov 28 10:43:07 crc kubenswrapper[4838]: I1128 10:43:07.562682 4838 scope.go:117] "RemoveContainer" containerID="eb7c3a5fb0c9835207f14cd442a9c164c2c7763a8bc95f818d99864ca09179f7" Nov 28 10:43:08 crc kubenswrapper[4838]: I1128 10:43:08.976100 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" event={"ID":"5c3daa53-8c4e-4e30-aeba-146602dd45cd","Type":"ContainerStarted","Data":"c3bd2f09ef29e69880ca8ba24161da6d816f538d277f4ad3c4e163e96b582c98"} Nov 28 10:44:33 crc kubenswrapper[4838]: I1128 10:44:33.358363 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-q42cg"] Nov 28 10:44:33 crc kubenswrapper[4838]: I1128 10:44:33.365424 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-q42cg" Nov 28 10:44:33 crc kubenswrapper[4838]: I1128 10:44:33.380349 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-q42cg"] Nov 28 10:44:33 crc kubenswrapper[4838]: I1128 10:44:33.492928 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/35af3dcf-1be5-49e1-92a5-3c7110e7afef-catalog-content\") pod \"redhat-marketplace-q42cg\" (UID: \"35af3dcf-1be5-49e1-92a5-3c7110e7afef\") " pod="openshift-marketplace/redhat-marketplace-q42cg" Nov 28 10:44:33 crc kubenswrapper[4838]: I1128 10:44:33.493077 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/35af3dcf-1be5-49e1-92a5-3c7110e7afef-utilities\") pod \"redhat-marketplace-q42cg\" (UID: \"35af3dcf-1be5-49e1-92a5-3c7110e7afef\") " pod="openshift-marketplace/redhat-marketplace-q42cg" Nov 28 10:44:33 crc kubenswrapper[4838]: I1128 10:44:33.493113 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6qz77\" (UniqueName: \"kubernetes.io/projected/35af3dcf-1be5-49e1-92a5-3c7110e7afef-kube-api-access-6qz77\") pod \"redhat-marketplace-q42cg\" (UID: \"35af3dcf-1be5-49e1-92a5-3c7110e7afef\") " pod="openshift-marketplace/redhat-marketplace-q42cg" Nov 28 10:44:33 crc kubenswrapper[4838]: I1128 10:44:33.594786 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6qz77\" (UniqueName: \"kubernetes.io/projected/35af3dcf-1be5-49e1-92a5-3c7110e7afef-kube-api-access-6qz77\") pod \"redhat-marketplace-q42cg\" (UID: \"35af3dcf-1be5-49e1-92a5-3c7110e7afef\") " pod="openshift-marketplace/redhat-marketplace-q42cg" Nov 28 10:44:33 crc kubenswrapper[4838]: I1128 10:44:33.594936 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/35af3dcf-1be5-49e1-92a5-3c7110e7afef-catalog-content\") pod \"redhat-marketplace-q42cg\" (UID: \"35af3dcf-1be5-49e1-92a5-3c7110e7afef\") " pod="openshift-marketplace/redhat-marketplace-q42cg" Nov 28 10:44:33 crc kubenswrapper[4838]: I1128 10:44:33.595176 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/35af3dcf-1be5-49e1-92a5-3c7110e7afef-utilities\") pod \"redhat-marketplace-q42cg\" (UID: \"35af3dcf-1be5-49e1-92a5-3c7110e7afef\") " pod="openshift-marketplace/redhat-marketplace-q42cg" Nov 28 10:44:33 crc kubenswrapper[4838]: I1128 10:44:33.596300 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/35af3dcf-1be5-49e1-92a5-3c7110e7afef-catalog-content\") pod \"redhat-marketplace-q42cg\" (UID: \"35af3dcf-1be5-49e1-92a5-3c7110e7afef\") " pod="openshift-marketplace/redhat-marketplace-q42cg" Nov 28 10:44:33 crc kubenswrapper[4838]: I1128 10:44:33.596527 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/35af3dcf-1be5-49e1-92a5-3c7110e7afef-utilities\") pod \"redhat-marketplace-q42cg\" (UID: \"35af3dcf-1be5-49e1-92a5-3c7110e7afef\") " pod="openshift-marketplace/redhat-marketplace-q42cg" Nov 28 10:44:33 crc kubenswrapper[4838]: I1128 10:44:33.627481 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6qz77\" (UniqueName: \"kubernetes.io/projected/35af3dcf-1be5-49e1-92a5-3c7110e7afef-kube-api-access-6qz77\") pod \"redhat-marketplace-q42cg\" (UID: \"35af3dcf-1be5-49e1-92a5-3c7110e7afef\") " pod="openshift-marketplace/redhat-marketplace-q42cg" Nov 28 10:44:33 crc kubenswrapper[4838]: I1128 10:44:33.699559 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-q42cg" Nov 28 10:44:34 crc kubenswrapper[4838]: I1128 10:44:34.236849 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-q42cg"] Nov 28 10:44:34 crc kubenswrapper[4838]: I1128 10:44:34.897595 4838 generic.go:334] "Generic (PLEG): container finished" podID="35af3dcf-1be5-49e1-92a5-3c7110e7afef" containerID="2bf31e6d910f086caa7b8c388b4d4e540687d93be7acce28017261cf4e8dab07" exitCode=0 Nov 28 10:44:34 crc kubenswrapper[4838]: I1128 10:44:34.897660 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-q42cg" event={"ID":"35af3dcf-1be5-49e1-92a5-3c7110e7afef","Type":"ContainerDied","Data":"2bf31e6d910f086caa7b8c388b4d4e540687d93be7acce28017261cf4e8dab07"} Nov 28 10:44:34 crc kubenswrapper[4838]: I1128 10:44:34.897702 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-q42cg" event={"ID":"35af3dcf-1be5-49e1-92a5-3c7110e7afef","Type":"ContainerStarted","Data":"5418c2e5be8cd98c6bb74ace17079b89734d15d16e94230d5eedbb69cf516c82"} Nov 28 10:44:36 crc kubenswrapper[4838]: I1128 10:44:36.928532 4838 generic.go:334] "Generic (PLEG): container finished" podID="35af3dcf-1be5-49e1-92a5-3c7110e7afef" containerID="5b3b4c1bf9b246d80907be66c41339d22a0c0263eae10266d47171577358da79" exitCode=0 Nov 28 10:44:36 crc kubenswrapper[4838]: I1128 10:44:36.928624 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-q42cg" event={"ID":"35af3dcf-1be5-49e1-92a5-3c7110e7afef","Type":"ContainerDied","Data":"5b3b4c1bf9b246d80907be66c41339d22a0c0263eae10266d47171577358da79"} Nov 28 10:44:38 crc kubenswrapper[4838]: I1128 10:44:38.955355 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-q42cg" event={"ID":"35af3dcf-1be5-49e1-92a5-3c7110e7afef","Type":"ContainerStarted","Data":"5dc0743c410d36d7adae4905e3d9b8e856bdcb1ad29c106c692bd7f059405a6b"} Nov 28 10:44:38 crc kubenswrapper[4838]: I1128 10:44:38.981908 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-q42cg" podStartSLOduration=3.121628834 podStartE2EDuration="5.981891426s" podCreationTimestamp="2025-11-28 10:44:33 +0000 UTC" firstStartedPulling="2025-11-28 10:44:34.902915066 +0000 UTC m=+2846.601889276" lastFinishedPulling="2025-11-28 10:44:37.763177648 +0000 UTC m=+2849.462151868" observedRunningTime="2025-11-28 10:44:38.980246031 +0000 UTC m=+2850.679220211" watchObservedRunningTime="2025-11-28 10:44:38.981891426 +0000 UTC m=+2850.680865596" Nov 28 10:44:43 crc kubenswrapper[4838]: I1128 10:44:43.700836 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-q42cg" Nov 28 10:44:43 crc kubenswrapper[4838]: I1128 10:44:43.703577 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-q42cg" Nov 28 10:44:43 crc kubenswrapper[4838]: I1128 10:44:43.767910 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-q42cg" Nov 28 10:44:44 crc kubenswrapper[4838]: I1128 10:44:44.463000 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-q42cg" Nov 28 10:44:44 crc kubenswrapper[4838]: I1128 10:44:44.528327 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-q42cg"] Nov 28 10:44:46 crc kubenswrapper[4838]: I1128 10:44:46.424951 4838 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-q42cg" podUID="35af3dcf-1be5-49e1-92a5-3c7110e7afef" containerName="registry-server" containerID="cri-o://5dc0743c410d36d7adae4905e3d9b8e856bdcb1ad29c106c692bd7f059405a6b" gracePeriod=2 Nov 28 10:44:47 crc kubenswrapper[4838]: I1128 10:44:47.440494 4838 generic.go:334] "Generic (PLEG): container finished" podID="35af3dcf-1be5-49e1-92a5-3c7110e7afef" containerID="5dc0743c410d36d7adae4905e3d9b8e856bdcb1ad29c106c692bd7f059405a6b" exitCode=0 Nov 28 10:44:47 crc kubenswrapper[4838]: I1128 10:44:47.440535 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-q42cg" event={"ID":"35af3dcf-1be5-49e1-92a5-3c7110e7afef","Type":"ContainerDied","Data":"5dc0743c410d36d7adae4905e3d9b8e856bdcb1ad29c106c692bd7f059405a6b"} Nov 28 10:44:47 crc kubenswrapper[4838]: I1128 10:44:47.569345 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-q42cg" Nov 28 10:44:47 crc kubenswrapper[4838]: I1128 10:44:47.719187 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/35af3dcf-1be5-49e1-92a5-3c7110e7afef-utilities\") pod \"35af3dcf-1be5-49e1-92a5-3c7110e7afef\" (UID: \"35af3dcf-1be5-49e1-92a5-3c7110e7afef\") " Nov 28 10:44:47 crc kubenswrapper[4838]: I1128 10:44:47.719231 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6qz77\" (UniqueName: \"kubernetes.io/projected/35af3dcf-1be5-49e1-92a5-3c7110e7afef-kube-api-access-6qz77\") pod \"35af3dcf-1be5-49e1-92a5-3c7110e7afef\" (UID: \"35af3dcf-1be5-49e1-92a5-3c7110e7afef\") " Nov 28 10:44:47 crc kubenswrapper[4838]: I1128 10:44:47.719362 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/35af3dcf-1be5-49e1-92a5-3c7110e7afef-catalog-content\") pod \"35af3dcf-1be5-49e1-92a5-3c7110e7afef\" (UID: \"35af3dcf-1be5-49e1-92a5-3c7110e7afef\") " Nov 28 10:44:47 crc kubenswrapper[4838]: I1128 10:44:47.720467 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/35af3dcf-1be5-49e1-92a5-3c7110e7afef-utilities" (OuterVolumeSpecName: "utilities") pod "35af3dcf-1be5-49e1-92a5-3c7110e7afef" (UID: "35af3dcf-1be5-49e1-92a5-3c7110e7afef"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 10:44:47 crc kubenswrapper[4838]: I1128 10:44:47.728891 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/35af3dcf-1be5-49e1-92a5-3c7110e7afef-kube-api-access-6qz77" (OuterVolumeSpecName: "kube-api-access-6qz77") pod "35af3dcf-1be5-49e1-92a5-3c7110e7afef" (UID: "35af3dcf-1be5-49e1-92a5-3c7110e7afef"). InnerVolumeSpecName "kube-api-access-6qz77". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:44:47 crc kubenswrapper[4838]: I1128 10:44:47.737921 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/35af3dcf-1be5-49e1-92a5-3c7110e7afef-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "35af3dcf-1be5-49e1-92a5-3c7110e7afef" (UID: "35af3dcf-1be5-49e1-92a5-3c7110e7afef"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 10:44:47 crc kubenswrapper[4838]: I1128 10:44:47.820841 4838 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/35af3dcf-1be5-49e1-92a5-3c7110e7afef-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 10:44:47 crc kubenswrapper[4838]: I1128 10:44:47.820871 4838 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/35af3dcf-1be5-49e1-92a5-3c7110e7afef-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 10:44:47 crc kubenswrapper[4838]: I1128 10:44:47.820881 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6qz77\" (UniqueName: \"kubernetes.io/projected/35af3dcf-1be5-49e1-92a5-3c7110e7afef-kube-api-access-6qz77\") on node \"crc\" DevicePath \"\"" Nov 28 10:44:48 crc kubenswrapper[4838]: I1128 10:44:48.452162 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-q42cg" event={"ID":"35af3dcf-1be5-49e1-92a5-3c7110e7afef","Type":"ContainerDied","Data":"5418c2e5be8cd98c6bb74ace17079b89734d15d16e94230d5eedbb69cf516c82"} Nov 28 10:44:48 crc kubenswrapper[4838]: I1128 10:44:48.452214 4838 scope.go:117] "RemoveContainer" containerID="5dc0743c410d36d7adae4905e3d9b8e856bdcb1ad29c106c692bd7f059405a6b" Nov 28 10:44:48 crc kubenswrapper[4838]: I1128 10:44:48.452247 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-q42cg" Nov 28 10:44:48 crc kubenswrapper[4838]: I1128 10:44:48.479191 4838 scope.go:117] "RemoveContainer" containerID="5b3b4c1bf9b246d80907be66c41339d22a0c0263eae10266d47171577358da79" Nov 28 10:44:48 crc kubenswrapper[4838]: I1128 10:44:48.495224 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-q42cg"] Nov 28 10:44:48 crc kubenswrapper[4838]: I1128 10:44:48.503051 4838 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-q42cg"] Nov 28 10:44:48 crc kubenswrapper[4838]: I1128 10:44:48.523186 4838 scope.go:117] "RemoveContainer" containerID="2bf31e6d910f086caa7b8c388b4d4e540687d93be7acce28017261cf4e8dab07" Nov 28 10:44:48 crc kubenswrapper[4838]: I1128 10:44:48.580948 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="35af3dcf-1be5-49e1-92a5-3c7110e7afef" path="/var/lib/kubelet/pods/35af3dcf-1be5-49e1-92a5-3c7110e7afef/volumes" Nov 28 10:45:00 crc kubenswrapper[4838]: I1128 10:45:00.191017 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405445-x7q6m"] Nov 28 10:45:00 crc kubenswrapper[4838]: E1128 10:45:00.192433 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="35af3dcf-1be5-49e1-92a5-3c7110e7afef" containerName="extract-content" Nov 28 10:45:00 crc kubenswrapper[4838]: I1128 10:45:00.192531 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="35af3dcf-1be5-49e1-92a5-3c7110e7afef" containerName="extract-content" Nov 28 10:45:00 crc kubenswrapper[4838]: E1128 10:45:00.192946 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="35af3dcf-1be5-49e1-92a5-3c7110e7afef" containerName="registry-server" Nov 28 10:45:00 crc kubenswrapper[4838]: I1128 10:45:00.192970 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="35af3dcf-1be5-49e1-92a5-3c7110e7afef" containerName="registry-server" Nov 28 10:45:00 crc kubenswrapper[4838]: E1128 10:45:00.192986 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="35af3dcf-1be5-49e1-92a5-3c7110e7afef" containerName="extract-utilities" Nov 28 10:45:00 crc kubenswrapper[4838]: I1128 10:45:00.192994 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="35af3dcf-1be5-49e1-92a5-3c7110e7afef" containerName="extract-utilities" Nov 28 10:45:00 crc kubenswrapper[4838]: I1128 10:45:00.193227 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="35af3dcf-1be5-49e1-92a5-3c7110e7afef" containerName="registry-server" Nov 28 10:45:00 crc kubenswrapper[4838]: I1128 10:45:00.194139 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405445-x7q6m" Nov 28 10:45:00 crc kubenswrapper[4838]: I1128 10:45:00.195670 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 28 10:45:00 crc kubenswrapper[4838]: I1128 10:45:00.196003 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 28 10:45:00 crc kubenswrapper[4838]: I1128 10:45:00.202946 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405445-x7q6m"] Nov 28 10:45:00 crc kubenswrapper[4838]: I1128 10:45:00.267500 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7dzwk\" (UniqueName: \"kubernetes.io/projected/7eb9ce71-f522-46c8-a29e-4654c6c66a4a-kube-api-access-7dzwk\") pod \"collect-profiles-29405445-x7q6m\" (UID: \"7eb9ce71-f522-46c8-a29e-4654c6c66a4a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405445-x7q6m" Nov 28 10:45:00 crc kubenswrapper[4838]: I1128 10:45:00.267592 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/7eb9ce71-f522-46c8-a29e-4654c6c66a4a-config-volume\") pod \"collect-profiles-29405445-x7q6m\" (UID: \"7eb9ce71-f522-46c8-a29e-4654c6c66a4a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405445-x7q6m" Nov 28 10:45:00 crc kubenswrapper[4838]: I1128 10:45:00.267667 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/7eb9ce71-f522-46c8-a29e-4654c6c66a4a-secret-volume\") pod \"collect-profiles-29405445-x7q6m\" (UID: \"7eb9ce71-f522-46c8-a29e-4654c6c66a4a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405445-x7q6m" Nov 28 10:45:00 crc kubenswrapper[4838]: I1128 10:45:00.369123 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7dzwk\" (UniqueName: \"kubernetes.io/projected/7eb9ce71-f522-46c8-a29e-4654c6c66a4a-kube-api-access-7dzwk\") pod \"collect-profiles-29405445-x7q6m\" (UID: \"7eb9ce71-f522-46c8-a29e-4654c6c66a4a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405445-x7q6m" Nov 28 10:45:00 crc kubenswrapper[4838]: I1128 10:45:00.369398 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/7eb9ce71-f522-46c8-a29e-4654c6c66a4a-config-volume\") pod \"collect-profiles-29405445-x7q6m\" (UID: \"7eb9ce71-f522-46c8-a29e-4654c6c66a4a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405445-x7q6m" Nov 28 10:45:00 crc kubenswrapper[4838]: I1128 10:45:00.369440 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/7eb9ce71-f522-46c8-a29e-4654c6c66a4a-secret-volume\") pod \"collect-profiles-29405445-x7q6m\" (UID: \"7eb9ce71-f522-46c8-a29e-4654c6c66a4a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405445-x7q6m" Nov 28 10:45:00 crc kubenswrapper[4838]: I1128 10:45:00.370790 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/7eb9ce71-f522-46c8-a29e-4654c6c66a4a-config-volume\") pod \"collect-profiles-29405445-x7q6m\" (UID: \"7eb9ce71-f522-46c8-a29e-4654c6c66a4a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405445-x7q6m" Nov 28 10:45:00 crc kubenswrapper[4838]: I1128 10:45:00.382654 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/7eb9ce71-f522-46c8-a29e-4654c6c66a4a-secret-volume\") pod \"collect-profiles-29405445-x7q6m\" (UID: \"7eb9ce71-f522-46c8-a29e-4654c6c66a4a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405445-x7q6m" Nov 28 10:45:00 crc kubenswrapper[4838]: I1128 10:45:00.407035 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7dzwk\" (UniqueName: \"kubernetes.io/projected/7eb9ce71-f522-46c8-a29e-4654c6c66a4a-kube-api-access-7dzwk\") pod \"collect-profiles-29405445-x7q6m\" (UID: \"7eb9ce71-f522-46c8-a29e-4654c6c66a4a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405445-x7q6m" Nov 28 10:45:00 crc kubenswrapper[4838]: I1128 10:45:00.517292 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405445-x7q6m" Nov 28 10:45:00 crc kubenswrapper[4838]: I1128 10:45:00.793950 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405445-x7q6m"] Nov 28 10:45:00 crc kubenswrapper[4838]: W1128 10:45:00.831418 4838 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod7eb9ce71_f522_46c8_a29e_4654c6c66a4a.slice/crio-e45f2b1f762d577040a2e3a7427bd86a3b27507541802bfffaf3c8d220fd16a0 WatchSource:0}: Error finding container e45f2b1f762d577040a2e3a7427bd86a3b27507541802bfffaf3c8d220fd16a0: Status 404 returned error can't find the container with id e45f2b1f762d577040a2e3a7427bd86a3b27507541802bfffaf3c8d220fd16a0 Nov 28 10:45:01 crc kubenswrapper[4838]: I1128 10:45:01.623137 4838 generic.go:334] "Generic (PLEG): container finished" podID="7eb9ce71-f522-46c8-a29e-4654c6c66a4a" containerID="306759f30aa1daae486d3a0c8ab145d388afac7145bfea98386f9fdf74cb5cae" exitCode=0 Nov 28 10:45:01 crc kubenswrapper[4838]: I1128 10:45:01.623206 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405445-x7q6m" event={"ID":"7eb9ce71-f522-46c8-a29e-4654c6c66a4a","Type":"ContainerDied","Data":"306759f30aa1daae486d3a0c8ab145d388afac7145bfea98386f9fdf74cb5cae"} Nov 28 10:45:01 crc kubenswrapper[4838]: I1128 10:45:01.623405 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405445-x7q6m" event={"ID":"7eb9ce71-f522-46c8-a29e-4654c6c66a4a","Type":"ContainerStarted","Data":"e45f2b1f762d577040a2e3a7427bd86a3b27507541802bfffaf3c8d220fd16a0"} Nov 28 10:45:03 crc kubenswrapper[4838]: I1128 10:45:03.011195 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405445-x7q6m" Nov 28 10:45:03 crc kubenswrapper[4838]: I1128 10:45:03.120947 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7dzwk\" (UniqueName: \"kubernetes.io/projected/7eb9ce71-f522-46c8-a29e-4654c6c66a4a-kube-api-access-7dzwk\") pod \"7eb9ce71-f522-46c8-a29e-4654c6c66a4a\" (UID: \"7eb9ce71-f522-46c8-a29e-4654c6c66a4a\") " Nov 28 10:45:03 crc kubenswrapper[4838]: I1128 10:45:03.121023 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/7eb9ce71-f522-46c8-a29e-4654c6c66a4a-config-volume\") pod \"7eb9ce71-f522-46c8-a29e-4654c6c66a4a\" (UID: \"7eb9ce71-f522-46c8-a29e-4654c6c66a4a\") " Nov 28 10:45:03 crc kubenswrapper[4838]: I1128 10:45:03.121108 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/7eb9ce71-f522-46c8-a29e-4654c6c66a4a-secret-volume\") pod \"7eb9ce71-f522-46c8-a29e-4654c6c66a4a\" (UID: \"7eb9ce71-f522-46c8-a29e-4654c6c66a4a\") " Nov 28 10:45:03 crc kubenswrapper[4838]: I1128 10:45:03.123253 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7eb9ce71-f522-46c8-a29e-4654c6c66a4a-config-volume" (OuterVolumeSpecName: "config-volume") pod "7eb9ce71-f522-46c8-a29e-4654c6c66a4a" (UID: "7eb9ce71-f522-46c8-a29e-4654c6c66a4a"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 10:45:03 crc kubenswrapper[4838]: I1128 10:45:03.135626 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7eb9ce71-f522-46c8-a29e-4654c6c66a4a-kube-api-access-7dzwk" (OuterVolumeSpecName: "kube-api-access-7dzwk") pod "7eb9ce71-f522-46c8-a29e-4654c6c66a4a" (UID: "7eb9ce71-f522-46c8-a29e-4654c6c66a4a"). InnerVolumeSpecName "kube-api-access-7dzwk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:45:03 crc kubenswrapper[4838]: I1128 10:45:03.137952 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7eb9ce71-f522-46c8-a29e-4654c6c66a4a-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "7eb9ce71-f522-46c8-a29e-4654c6c66a4a" (UID: "7eb9ce71-f522-46c8-a29e-4654c6c66a4a"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:45:03 crc kubenswrapper[4838]: I1128 10:45:03.223179 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7dzwk\" (UniqueName: \"kubernetes.io/projected/7eb9ce71-f522-46c8-a29e-4654c6c66a4a-kube-api-access-7dzwk\") on node \"crc\" DevicePath \"\"" Nov 28 10:45:03 crc kubenswrapper[4838]: I1128 10:45:03.223230 4838 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/7eb9ce71-f522-46c8-a29e-4654c6c66a4a-config-volume\") on node \"crc\" DevicePath \"\"" Nov 28 10:45:03 crc kubenswrapper[4838]: I1128 10:45:03.223250 4838 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/7eb9ce71-f522-46c8-a29e-4654c6c66a4a-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 28 10:45:03 crc kubenswrapper[4838]: I1128 10:45:03.648782 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405445-x7q6m" event={"ID":"7eb9ce71-f522-46c8-a29e-4654c6c66a4a","Type":"ContainerDied","Data":"e45f2b1f762d577040a2e3a7427bd86a3b27507541802bfffaf3c8d220fd16a0"} Nov 28 10:45:03 crc kubenswrapper[4838]: I1128 10:45:03.648854 4838 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e45f2b1f762d577040a2e3a7427bd86a3b27507541802bfffaf3c8d220fd16a0" Nov 28 10:45:03 crc kubenswrapper[4838]: I1128 10:45:03.649396 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405445-x7q6m" Nov 28 10:45:04 crc kubenswrapper[4838]: I1128 10:45:04.120479 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405400-6d4qp"] Nov 28 10:45:04 crc kubenswrapper[4838]: I1128 10:45:04.135026 4838 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405400-6d4qp"] Nov 28 10:45:04 crc kubenswrapper[4838]: I1128 10:45:04.581196 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="89ca4c63-f97b-4a71-8b2e-613170fdde6b" path="/var/lib/kubelet/pods/89ca4c63-f97b-4a71-8b2e-613170fdde6b/volumes" Nov 28 10:45:18 crc kubenswrapper[4838]: I1128 10:45:18.683613 4838 scope.go:117] "RemoveContainer" containerID="b99401cb44d9c0221a17a49233459c6b66892d5c9d6f4fa120c97998fd551abe" Nov 28 10:45:23 crc kubenswrapper[4838]: I1128 10:45:23.939878 4838 patch_prober.go:28] interesting pod/machine-config-daemon-5dxdd container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 10:45:23 crc kubenswrapper[4838]: I1128 10:45:23.940379 4838 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 10:45:53 crc kubenswrapper[4838]: I1128 10:45:53.939788 4838 patch_prober.go:28] interesting pod/machine-config-daemon-5dxdd container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 10:45:53 crc kubenswrapper[4838]: I1128 10:45:53.940783 4838 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 10:45:54 crc kubenswrapper[4838]: I1128 10:45:54.637745 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-6kktk"] Nov 28 10:45:54 crc kubenswrapper[4838]: E1128 10:45:54.638171 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7eb9ce71-f522-46c8-a29e-4654c6c66a4a" containerName="collect-profiles" Nov 28 10:45:54 crc kubenswrapper[4838]: I1128 10:45:54.638188 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="7eb9ce71-f522-46c8-a29e-4654c6c66a4a" containerName="collect-profiles" Nov 28 10:45:54 crc kubenswrapper[4838]: I1128 10:45:54.638491 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="7eb9ce71-f522-46c8-a29e-4654c6c66a4a" containerName="collect-profiles" Nov 28 10:45:54 crc kubenswrapper[4838]: I1128 10:45:54.640549 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-6kktk" Nov 28 10:45:54 crc kubenswrapper[4838]: I1128 10:45:54.658646 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-6kktk"] Nov 28 10:45:54 crc kubenswrapper[4838]: I1128 10:45:54.738823 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gnjhg\" (UniqueName: \"kubernetes.io/projected/5557af77-f9d9-42b3-a2fd-71e0632bc567-kube-api-access-gnjhg\") pod \"community-operators-6kktk\" (UID: \"5557af77-f9d9-42b3-a2fd-71e0632bc567\") " pod="openshift-marketplace/community-operators-6kktk" Nov 28 10:45:54 crc kubenswrapper[4838]: I1128 10:45:54.738901 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5557af77-f9d9-42b3-a2fd-71e0632bc567-catalog-content\") pod \"community-operators-6kktk\" (UID: \"5557af77-f9d9-42b3-a2fd-71e0632bc567\") " pod="openshift-marketplace/community-operators-6kktk" Nov 28 10:45:54 crc kubenswrapper[4838]: I1128 10:45:54.738970 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5557af77-f9d9-42b3-a2fd-71e0632bc567-utilities\") pod \"community-operators-6kktk\" (UID: \"5557af77-f9d9-42b3-a2fd-71e0632bc567\") " pod="openshift-marketplace/community-operators-6kktk" Nov 28 10:45:54 crc kubenswrapper[4838]: I1128 10:45:54.841111 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5557af77-f9d9-42b3-a2fd-71e0632bc567-catalog-content\") pod \"community-operators-6kktk\" (UID: \"5557af77-f9d9-42b3-a2fd-71e0632bc567\") " pod="openshift-marketplace/community-operators-6kktk" Nov 28 10:45:54 crc kubenswrapper[4838]: I1128 10:45:54.841183 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5557af77-f9d9-42b3-a2fd-71e0632bc567-utilities\") pod \"community-operators-6kktk\" (UID: \"5557af77-f9d9-42b3-a2fd-71e0632bc567\") " pod="openshift-marketplace/community-operators-6kktk" Nov 28 10:45:54 crc kubenswrapper[4838]: I1128 10:45:54.841351 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gnjhg\" (UniqueName: \"kubernetes.io/projected/5557af77-f9d9-42b3-a2fd-71e0632bc567-kube-api-access-gnjhg\") pod \"community-operators-6kktk\" (UID: \"5557af77-f9d9-42b3-a2fd-71e0632bc567\") " pod="openshift-marketplace/community-operators-6kktk" Nov 28 10:45:54 crc kubenswrapper[4838]: I1128 10:45:54.841976 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5557af77-f9d9-42b3-a2fd-71e0632bc567-utilities\") pod \"community-operators-6kktk\" (UID: \"5557af77-f9d9-42b3-a2fd-71e0632bc567\") " pod="openshift-marketplace/community-operators-6kktk" Nov 28 10:45:54 crc kubenswrapper[4838]: I1128 10:45:54.842263 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5557af77-f9d9-42b3-a2fd-71e0632bc567-catalog-content\") pod \"community-operators-6kktk\" (UID: \"5557af77-f9d9-42b3-a2fd-71e0632bc567\") " pod="openshift-marketplace/community-operators-6kktk" Nov 28 10:45:54 crc kubenswrapper[4838]: I1128 10:45:54.867041 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gnjhg\" (UniqueName: \"kubernetes.io/projected/5557af77-f9d9-42b3-a2fd-71e0632bc567-kube-api-access-gnjhg\") pod \"community-operators-6kktk\" (UID: \"5557af77-f9d9-42b3-a2fd-71e0632bc567\") " pod="openshift-marketplace/community-operators-6kktk" Nov 28 10:45:54 crc kubenswrapper[4838]: I1128 10:45:54.992056 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-6kktk" Nov 28 10:45:55 crc kubenswrapper[4838]: I1128 10:45:55.548353 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-6kktk"] Nov 28 10:45:56 crc kubenswrapper[4838]: I1128 10:45:56.238278 4838 generic.go:334] "Generic (PLEG): container finished" podID="5557af77-f9d9-42b3-a2fd-71e0632bc567" containerID="efcb7fb36498a498901db499fb7c2a695fb0923a6ef34391f71c7c4ffc7a367d" exitCode=0 Nov 28 10:45:56 crc kubenswrapper[4838]: I1128 10:45:56.238393 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6kktk" event={"ID":"5557af77-f9d9-42b3-a2fd-71e0632bc567","Type":"ContainerDied","Data":"efcb7fb36498a498901db499fb7c2a695fb0923a6ef34391f71c7c4ffc7a367d"} Nov 28 10:45:56 crc kubenswrapper[4838]: I1128 10:45:56.238612 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6kktk" event={"ID":"5557af77-f9d9-42b3-a2fd-71e0632bc567","Type":"ContainerStarted","Data":"9f14890e1f99818042864ee043920db812a1bdba251eb3d243fd794a3100f6f0"} Nov 28 10:45:57 crc kubenswrapper[4838]: I1128 10:45:57.250619 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6kktk" event={"ID":"5557af77-f9d9-42b3-a2fd-71e0632bc567","Type":"ContainerStarted","Data":"d13a7806a35b07a38eb965878a64169d07f9e7858d26dcf505ad43938d18f09c"} Nov 28 10:45:58 crc kubenswrapper[4838]: I1128 10:45:58.274278 4838 generic.go:334] "Generic (PLEG): container finished" podID="5557af77-f9d9-42b3-a2fd-71e0632bc567" containerID="d13a7806a35b07a38eb965878a64169d07f9e7858d26dcf505ad43938d18f09c" exitCode=0 Nov 28 10:45:58 crc kubenswrapper[4838]: I1128 10:45:58.274655 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6kktk" event={"ID":"5557af77-f9d9-42b3-a2fd-71e0632bc567","Type":"ContainerDied","Data":"d13a7806a35b07a38eb965878a64169d07f9e7858d26dcf505ad43938d18f09c"} Nov 28 10:45:58 crc kubenswrapper[4838]: I1128 10:45:58.821606 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-pf2bq"] Nov 28 10:45:58 crc kubenswrapper[4838]: I1128 10:45:58.825092 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-pf2bq" Nov 28 10:45:58 crc kubenswrapper[4838]: I1128 10:45:58.836134 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-pf2bq"] Nov 28 10:45:58 crc kubenswrapper[4838]: I1128 10:45:58.932010 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vnks4\" (UniqueName: \"kubernetes.io/projected/3908b5d1-56f5-4fb0-ab2b-10bf81e10642-kube-api-access-vnks4\") pod \"certified-operators-pf2bq\" (UID: \"3908b5d1-56f5-4fb0-ab2b-10bf81e10642\") " pod="openshift-marketplace/certified-operators-pf2bq" Nov 28 10:45:58 crc kubenswrapper[4838]: I1128 10:45:58.932170 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3908b5d1-56f5-4fb0-ab2b-10bf81e10642-catalog-content\") pod \"certified-operators-pf2bq\" (UID: \"3908b5d1-56f5-4fb0-ab2b-10bf81e10642\") " pod="openshift-marketplace/certified-operators-pf2bq" Nov 28 10:45:58 crc kubenswrapper[4838]: I1128 10:45:58.932199 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3908b5d1-56f5-4fb0-ab2b-10bf81e10642-utilities\") pod \"certified-operators-pf2bq\" (UID: \"3908b5d1-56f5-4fb0-ab2b-10bf81e10642\") " pod="openshift-marketplace/certified-operators-pf2bq" Nov 28 10:45:59 crc kubenswrapper[4838]: I1128 10:45:59.034100 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3908b5d1-56f5-4fb0-ab2b-10bf81e10642-catalog-content\") pod \"certified-operators-pf2bq\" (UID: \"3908b5d1-56f5-4fb0-ab2b-10bf81e10642\") " pod="openshift-marketplace/certified-operators-pf2bq" Nov 28 10:45:59 crc kubenswrapper[4838]: I1128 10:45:59.034171 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3908b5d1-56f5-4fb0-ab2b-10bf81e10642-utilities\") pod \"certified-operators-pf2bq\" (UID: \"3908b5d1-56f5-4fb0-ab2b-10bf81e10642\") " pod="openshift-marketplace/certified-operators-pf2bq" Nov 28 10:45:59 crc kubenswrapper[4838]: I1128 10:45:59.034285 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vnks4\" (UniqueName: \"kubernetes.io/projected/3908b5d1-56f5-4fb0-ab2b-10bf81e10642-kube-api-access-vnks4\") pod \"certified-operators-pf2bq\" (UID: \"3908b5d1-56f5-4fb0-ab2b-10bf81e10642\") " pod="openshift-marketplace/certified-operators-pf2bq" Nov 28 10:45:59 crc kubenswrapper[4838]: I1128 10:45:59.034745 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3908b5d1-56f5-4fb0-ab2b-10bf81e10642-catalog-content\") pod \"certified-operators-pf2bq\" (UID: \"3908b5d1-56f5-4fb0-ab2b-10bf81e10642\") " pod="openshift-marketplace/certified-operators-pf2bq" Nov 28 10:45:59 crc kubenswrapper[4838]: I1128 10:45:59.034745 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3908b5d1-56f5-4fb0-ab2b-10bf81e10642-utilities\") pod \"certified-operators-pf2bq\" (UID: \"3908b5d1-56f5-4fb0-ab2b-10bf81e10642\") " pod="openshift-marketplace/certified-operators-pf2bq" Nov 28 10:45:59 crc kubenswrapper[4838]: I1128 10:45:59.055069 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vnks4\" (UniqueName: \"kubernetes.io/projected/3908b5d1-56f5-4fb0-ab2b-10bf81e10642-kube-api-access-vnks4\") pod \"certified-operators-pf2bq\" (UID: \"3908b5d1-56f5-4fb0-ab2b-10bf81e10642\") " pod="openshift-marketplace/certified-operators-pf2bq" Nov 28 10:45:59 crc kubenswrapper[4838]: I1128 10:45:59.149080 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-pf2bq" Nov 28 10:45:59 crc kubenswrapper[4838]: I1128 10:45:59.322610 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6kktk" event={"ID":"5557af77-f9d9-42b3-a2fd-71e0632bc567","Type":"ContainerStarted","Data":"2b7fdcf29b666f9440cc14693014caf46e129ab3364ad7a137ed92e31439c163"} Nov 28 10:45:59 crc kubenswrapper[4838]: I1128 10:45:59.383328 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-6kktk" podStartSLOduration=2.707646696 podStartE2EDuration="5.383274408s" podCreationTimestamp="2025-11-28 10:45:54 +0000 UTC" firstStartedPulling="2025-11-28 10:45:56.240829114 +0000 UTC m=+2927.939803294" lastFinishedPulling="2025-11-28 10:45:58.916456816 +0000 UTC m=+2930.615431006" observedRunningTime="2025-11-28 10:45:59.34104327 +0000 UTC m=+2931.040017440" watchObservedRunningTime="2025-11-28 10:45:59.383274408 +0000 UTC m=+2931.082248588" Nov 28 10:45:59 crc kubenswrapper[4838]: I1128 10:45:59.650652 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-pf2bq"] Nov 28 10:46:00 crc kubenswrapper[4838]: I1128 10:46:00.338837 4838 generic.go:334] "Generic (PLEG): container finished" podID="3908b5d1-56f5-4fb0-ab2b-10bf81e10642" containerID="bcb35937263469bd16dd54b716c015c3a04674446b333020ed6953c40ddd778f" exitCode=0 Nov 28 10:46:00 crc kubenswrapper[4838]: I1128 10:46:00.339001 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-pf2bq" event={"ID":"3908b5d1-56f5-4fb0-ab2b-10bf81e10642","Type":"ContainerDied","Data":"bcb35937263469bd16dd54b716c015c3a04674446b333020ed6953c40ddd778f"} Nov 28 10:46:00 crc kubenswrapper[4838]: I1128 10:46:00.339271 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-pf2bq" event={"ID":"3908b5d1-56f5-4fb0-ab2b-10bf81e10642","Type":"ContainerStarted","Data":"76211d6c9919488a85b83cc835a0c9f0421b7b9b2c6df672f2030eddfa897a1b"} Nov 28 10:46:01 crc kubenswrapper[4838]: I1128 10:46:01.349176 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-pf2bq" event={"ID":"3908b5d1-56f5-4fb0-ab2b-10bf81e10642","Type":"ContainerStarted","Data":"63492924efdd11885dc90b498f9fcecbb097265fb6a62ce2024b79895d2a4a15"} Nov 28 10:46:02 crc kubenswrapper[4838]: I1128 10:46:02.363561 4838 generic.go:334] "Generic (PLEG): container finished" podID="3908b5d1-56f5-4fb0-ab2b-10bf81e10642" containerID="63492924efdd11885dc90b498f9fcecbb097265fb6a62ce2024b79895d2a4a15" exitCode=0 Nov 28 10:46:02 crc kubenswrapper[4838]: I1128 10:46:02.363632 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-pf2bq" event={"ID":"3908b5d1-56f5-4fb0-ab2b-10bf81e10642","Type":"ContainerDied","Data":"63492924efdd11885dc90b498f9fcecbb097265fb6a62ce2024b79895d2a4a15"} Nov 28 10:46:03 crc kubenswrapper[4838]: I1128 10:46:03.379019 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-pf2bq" event={"ID":"3908b5d1-56f5-4fb0-ab2b-10bf81e10642","Type":"ContainerStarted","Data":"70ba19c769109a84b482e6fd81e6e764d7579c8f392797b2c384cb127ad7e9f3"} Nov 28 10:46:03 crc kubenswrapper[4838]: I1128 10:46:03.410836 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-pf2bq" podStartSLOduration=2.74295283 podStartE2EDuration="5.41080412s" podCreationTimestamp="2025-11-28 10:45:58 +0000 UTC" firstStartedPulling="2025-11-28 10:46:00.341688119 +0000 UTC m=+2932.040662289" lastFinishedPulling="2025-11-28 10:46:03.009539369 +0000 UTC m=+2934.708513579" observedRunningTime="2025-11-28 10:46:03.406238536 +0000 UTC m=+2935.105212746" watchObservedRunningTime="2025-11-28 10:46:03.41080412 +0000 UTC m=+2935.109778330" Nov 28 10:46:04 crc kubenswrapper[4838]: I1128 10:46:04.993157 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-6kktk" Nov 28 10:46:04 crc kubenswrapper[4838]: I1128 10:46:04.993999 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-6kktk" Nov 28 10:46:05 crc kubenswrapper[4838]: I1128 10:46:05.078981 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-6kktk" Nov 28 10:46:05 crc kubenswrapper[4838]: I1128 10:46:05.471248 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-6kktk" Nov 28 10:46:06 crc kubenswrapper[4838]: I1128 10:46:06.206644 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-6kktk"] Nov 28 10:46:07 crc kubenswrapper[4838]: I1128 10:46:07.423306 4838 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-6kktk" podUID="5557af77-f9d9-42b3-a2fd-71e0632bc567" containerName="registry-server" containerID="cri-o://2b7fdcf29b666f9440cc14693014caf46e129ab3364ad7a137ed92e31439c163" gracePeriod=2 Nov 28 10:46:08 crc kubenswrapper[4838]: I1128 10:46:08.436981 4838 generic.go:334] "Generic (PLEG): container finished" podID="5557af77-f9d9-42b3-a2fd-71e0632bc567" containerID="2b7fdcf29b666f9440cc14693014caf46e129ab3364ad7a137ed92e31439c163" exitCode=0 Nov 28 10:46:08 crc kubenswrapper[4838]: I1128 10:46:08.437106 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6kktk" event={"ID":"5557af77-f9d9-42b3-a2fd-71e0632bc567","Type":"ContainerDied","Data":"2b7fdcf29b666f9440cc14693014caf46e129ab3364ad7a137ed92e31439c163"} Nov 28 10:46:08 crc kubenswrapper[4838]: I1128 10:46:08.437347 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6kktk" event={"ID":"5557af77-f9d9-42b3-a2fd-71e0632bc567","Type":"ContainerDied","Data":"9f14890e1f99818042864ee043920db812a1bdba251eb3d243fd794a3100f6f0"} Nov 28 10:46:08 crc kubenswrapper[4838]: I1128 10:46:08.437367 4838 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9f14890e1f99818042864ee043920db812a1bdba251eb3d243fd794a3100f6f0" Nov 28 10:46:08 crc kubenswrapper[4838]: I1128 10:46:08.463113 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-6kktk" Nov 28 10:46:08 crc kubenswrapper[4838]: I1128 10:46:08.583378 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5557af77-f9d9-42b3-a2fd-71e0632bc567-utilities\") pod \"5557af77-f9d9-42b3-a2fd-71e0632bc567\" (UID: \"5557af77-f9d9-42b3-a2fd-71e0632bc567\") " Nov 28 10:46:08 crc kubenswrapper[4838]: I1128 10:46:08.583653 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5557af77-f9d9-42b3-a2fd-71e0632bc567-catalog-content\") pod \"5557af77-f9d9-42b3-a2fd-71e0632bc567\" (UID: \"5557af77-f9d9-42b3-a2fd-71e0632bc567\") " Nov 28 10:46:08 crc kubenswrapper[4838]: I1128 10:46:08.583815 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gnjhg\" (UniqueName: \"kubernetes.io/projected/5557af77-f9d9-42b3-a2fd-71e0632bc567-kube-api-access-gnjhg\") pod \"5557af77-f9d9-42b3-a2fd-71e0632bc567\" (UID: \"5557af77-f9d9-42b3-a2fd-71e0632bc567\") " Nov 28 10:46:08 crc kubenswrapper[4838]: I1128 10:46:08.584775 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5557af77-f9d9-42b3-a2fd-71e0632bc567-utilities" (OuterVolumeSpecName: "utilities") pod "5557af77-f9d9-42b3-a2fd-71e0632bc567" (UID: "5557af77-f9d9-42b3-a2fd-71e0632bc567"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 10:46:08 crc kubenswrapper[4838]: I1128 10:46:08.595031 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5557af77-f9d9-42b3-a2fd-71e0632bc567-kube-api-access-gnjhg" (OuterVolumeSpecName: "kube-api-access-gnjhg") pod "5557af77-f9d9-42b3-a2fd-71e0632bc567" (UID: "5557af77-f9d9-42b3-a2fd-71e0632bc567"). InnerVolumeSpecName "kube-api-access-gnjhg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:46:08 crc kubenswrapper[4838]: I1128 10:46:08.634839 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5557af77-f9d9-42b3-a2fd-71e0632bc567-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5557af77-f9d9-42b3-a2fd-71e0632bc567" (UID: "5557af77-f9d9-42b3-a2fd-71e0632bc567"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 10:46:08 crc kubenswrapper[4838]: I1128 10:46:08.686189 4838 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5557af77-f9d9-42b3-a2fd-71e0632bc567-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 10:46:08 crc kubenswrapper[4838]: I1128 10:46:08.686238 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gnjhg\" (UniqueName: \"kubernetes.io/projected/5557af77-f9d9-42b3-a2fd-71e0632bc567-kube-api-access-gnjhg\") on node \"crc\" DevicePath \"\"" Nov 28 10:46:08 crc kubenswrapper[4838]: I1128 10:46:08.686260 4838 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5557af77-f9d9-42b3-a2fd-71e0632bc567-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 10:46:09 crc kubenswrapper[4838]: I1128 10:46:09.149274 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-pf2bq" Nov 28 10:46:09 crc kubenswrapper[4838]: I1128 10:46:09.149336 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-pf2bq" Nov 28 10:46:09 crc kubenswrapper[4838]: I1128 10:46:09.207175 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-pf2bq" Nov 28 10:46:09 crc kubenswrapper[4838]: I1128 10:46:09.445855 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-6kktk" Nov 28 10:46:09 crc kubenswrapper[4838]: I1128 10:46:09.482131 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-6kktk"] Nov 28 10:46:09 crc kubenswrapper[4838]: I1128 10:46:09.488589 4838 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-6kktk"] Nov 28 10:46:09 crc kubenswrapper[4838]: I1128 10:46:09.505998 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-pf2bq" Nov 28 10:46:10 crc kubenswrapper[4838]: I1128 10:46:10.584671 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5557af77-f9d9-42b3-a2fd-71e0632bc567" path="/var/lib/kubelet/pods/5557af77-f9d9-42b3-a2fd-71e0632bc567/volumes" Nov 28 10:46:11 crc kubenswrapper[4838]: I1128 10:46:11.603711 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-pf2bq"] Nov 28 10:46:11 crc kubenswrapper[4838]: I1128 10:46:11.603983 4838 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-pf2bq" podUID="3908b5d1-56f5-4fb0-ab2b-10bf81e10642" containerName="registry-server" containerID="cri-o://70ba19c769109a84b482e6fd81e6e764d7579c8f392797b2c384cb127ad7e9f3" gracePeriod=2 Nov 28 10:46:11 crc kubenswrapper[4838]: E1128 10:46:11.764211 4838 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3908b5d1_56f5_4fb0_ab2b_10bf81e10642.slice/crio-70ba19c769109a84b482e6fd81e6e764d7579c8f392797b2c384cb127ad7e9f3.scope\": RecentStats: unable to find data in memory cache]" Nov 28 10:46:12 crc kubenswrapper[4838]: I1128 10:46:12.071942 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-pf2bq" Nov 28 10:46:12 crc kubenswrapper[4838]: I1128 10:46:12.159094 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vnks4\" (UniqueName: \"kubernetes.io/projected/3908b5d1-56f5-4fb0-ab2b-10bf81e10642-kube-api-access-vnks4\") pod \"3908b5d1-56f5-4fb0-ab2b-10bf81e10642\" (UID: \"3908b5d1-56f5-4fb0-ab2b-10bf81e10642\") " Nov 28 10:46:12 crc kubenswrapper[4838]: I1128 10:46:12.159298 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3908b5d1-56f5-4fb0-ab2b-10bf81e10642-catalog-content\") pod \"3908b5d1-56f5-4fb0-ab2b-10bf81e10642\" (UID: \"3908b5d1-56f5-4fb0-ab2b-10bf81e10642\") " Nov 28 10:46:12 crc kubenswrapper[4838]: I1128 10:46:12.159791 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3908b5d1-56f5-4fb0-ab2b-10bf81e10642-utilities\") pod \"3908b5d1-56f5-4fb0-ab2b-10bf81e10642\" (UID: \"3908b5d1-56f5-4fb0-ab2b-10bf81e10642\") " Nov 28 10:46:12 crc kubenswrapper[4838]: I1128 10:46:12.161163 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3908b5d1-56f5-4fb0-ab2b-10bf81e10642-utilities" (OuterVolumeSpecName: "utilities") pod "3908b5d1-56f5-4fb0-ab2b-10bf81e10642" (UID: "3908b5d1-56f5-4fb0-ab2b-10bf81e10642"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 10:46:12 crc kubenswrapper[4838]: I1128 10:46:12.164564 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3908b5d1-56f5-4fb0-ab2b-10bf81e10642-kube-api-access-vnks4" (OuterVolumeSpecName: "kube-api-access-vnks4") pod "3908b5d1-56f5-4fb0-ab2b-10bf81e10642" (UID: "3908b5d1-56f5-4fb0-ab2b-10bf81e10642"). InnerVolumeSpecName "kube-api-access-vnks4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:46:12 crc kubenswrapper[4838]: I1128 10:46:12.262263 4838 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3908b5d1-56f5-4fb0-ab2b-10bf81e10642-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 10:46:12 crc kubenswrapper[4838]: I1128 10:46:12.262313 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vnks4\" (UniqueName: \"kubernetes.io/projected/3908b5d1-56f5-4fb0-ab2b-10bf81e10642-kube-api-access-vnks4\") on node \"crc\" DevicePath \"\"" Nov 28 10:46:12 crc kubenswrapper[4838]: I1128 10:46:12.484389 4838 generic.go:334] "Generic (PLEG): container finished" podID="3908b5d1-56f5-4fb0-ab2b-10bf81e10642" containerID="70ba19c769109a84b482e6fd81e6e764d7579c8f392797b2c384cb127ad7e9f3" exitCode=0 Nov 28 10:46:12 crc kubenswrapper[4838]: I1128 10:46:12.484451 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-pf2bq" event={"ID":"3908b5d1-56f5-4fb0-ab2b-10bf81e10642","Type":"ContainerDied","Data":"70ba19c769109a84b482e6fd81e6e764d7579c8f392797b2c384cb127ad7e9f3"} Nov 28 10:46:12 crc kubenswrapper[4838]: I1128 10:46:12.484491 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-pf2bq" event={"ID":"3908b5d1-56f5-4fb0-ab2b-10bf81e10642","Type":"ContainerDied","Data":"76211d6c9919488a85b83cc835a0c9f0421b7b9b2c6df672f2030eddfa897a1b"} Nov 28 10:46:12 crc kubenswrapper[4838]: I1128 10:46:12.484528 4838 scope.go:117] "RemoveContainer" containerID="70ba19c769109a84b482e6fd81e6e764d7579c8f392797b2c384cb127ad7e9f3" Nov 28 10:46:12 crc kubenswrapper[4838]: I1128 10:46:12.484542 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-pf2bq" Nov 28 10:46:12 crc kubenswrapper[4838]: I1128 10:46:12.515639 4838 scope.go:117] "RemoveContainer" containerID="63492924efdd11885dc90b498f9fcecbb097265fb6a62ce2024b79895d2a4a15" Nov 28 10:46:12 crc kubenswrapper[4838]: I1128 10:46:12.543883 4838 scope.go:117] "RemoveContainer" containerID="bcb35937263469bd16dd54b716c015c3a04674446b333020ed6953c40ddd778f" Nov 28 10:46:12 crc kubenswrapper[4838]: I1128 10:46:12.597836 4838 scope.go:117] "RemoveContainer" containerID="70ba19c769109a84b482e6fd81e6e764d7579c8f392797b2c384cb127ad7e9f3" Nov 28 10:46:12 crc kubenswrapper[4838]: E1128 10:46:12.598289 4838 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"70ba19c769109a84b482e6fd81e6e764d7579c8f392797b2c384cb127ad7e9f3\": container with ID starting with 70ba19c769109a84b482e6fd81e6e764d7579c8f392797b2c384cb127ad7e9f3 not found: ID does not exist" containerID="70ba19c769109a84b482e6fd81e6e764d7579c8f392797b2c384cb127ad7e9f3" Nov 28 10:46:12 crc kubenswrapper[4838]: I1128 10:46:12.598320 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"70ba19c769109a84b482e6fd81e6e764d7579c8f392797b2c384cb127ad7e9f3"} err="failed to get container status \"70ba19c769109a84b482e6fd81e6e764d7579c8f392797b2c384cb127ad7e9f3\": rpc error: code = NotFound desc = could not find container \"70ba19c769109a84b482e6fd81e6e764d7579c8f392797b2c384cb127ad7e9f3\": container with ID starting with 70ba19c769109a84b482e6fd81e6e764d7579c8f392797b2c384cb127ad7e9f3 not found: ID does not exist" Nov 28 10:46:12 crc kubenswrapper[4838]: I1128 10:46:12.598341 4838 scope.go:117] "RemoveContainer" containerID="63492924efdd11885dc90b498f9fcecbb097265fb6a62ce2024b79895d2a4a15" Nov 28 10:46:12 crc kubenswrapper[4838]: E1128 10:46:12.598852 4838 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"63492924efdd11885dc90b498f9fcecbb097265fb6a62ce2024b79895d2a4a15\": container with ID starting with 63492924efdd11885dc90b498f9fcecbb097265fb6a62ce2024b79895d2a4a15 not found: ID does not exist" containerID="63492924efdd11885dc90b498f9fcecbb097265fb6a62ce2024b79895d2a4a15" Nov 28 10:46:12 crc kubenswrapper[4838]: I1128 10:46:12.599152 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"63492924efdd11885dc90b498f9fcecbb097265fb6a62ce2024b79895d2a4a15"} err="failed to get container status \"63492924efdd11885dc90b498f9fcecbb097265fb6a62ce2024b79895d2a4a15\": rpc error: code = NotFound desc = could not find container \"63492924efdd11885dc90b498f9fcecbb097265fb6a62ce2024b79895d2a4a15\": container with ID starting with 63492924efdd11885dc90b498f9fcecbb097265fb6a62ce2024b79895d2a4a15 not found: ID does not exist" Nov 28 10:46:12 crc kubenswrapper[4838]: I1128 10:46:12.599393 4838 scope.go:117] "RemoveContainer" containerID="bcb35937263469bd16dd54b716c015c3a04674446b333020ed6953c40ddd778f" Nov 28 10:46:12 crc kubenswrapper[4838]: E1128 10:46:12.600099 4838 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bcb35937263469bd16dd54b716c015c3a04674446b333020ed6953c40ddd778f\": container with ID starting with bcb35937263469bd16dd54b716c015c3a04674446b333020ed6953c40ddd778f not found: ID does not exist" containerID="bcb35937263469bd16dd54b716c015c3a04674446b333020ed6953c40ddd778f" Nov 28 10:46:12 crc kubenswrapper[4838]: I1128 10:46:12.600128 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bcb35937263469bd16dd54b716c015c3a04674446b333020ed6953c40ddd778f"} err="failed to get container status \"bcb35937263469bd16dd54b716c015c3a04674446b333020ed6953c40ddd778f\": rpc error: code = NotFound desc = could not find container \"bcb35937263469bd16dd54b716c015c3a04674446b333020ed6953c40ddd778f\": container with ID starting with bcb35937263469bd16dd54b716c015c3a04674446b333020ed6953c40ddd778f not found: ID does not exist" Nov 28 10:46:12 crc kubenswrapper[4838]: I1128 10:46:12.673391 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3908b5d1-56f5-4fb0-ab2b-10bf81e10642-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "3908b5d1-56f5-4fb0-ab2b-10bf81e10642" (UID: "3908b5d1-56f5-4fb0-ab2b-10bf81e10642"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 10:46:12 crc kubenswrapper[4838]: I1128 10:46:12.774621 4838 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3908b5d1-56f5-4fb0-ab2b-10bf81e10642-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 10:46:12 crc kubenswrapper[4838]: I1128 10:46:12.832118 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-pf2bq"] Nov 28 10:46:12 crc kubenswrapper[4838]: I1128 10:46:12.843544 4838 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-pf2bq"] Nov 28 10:46:14 crc kubenswrapper[4838]: I1128 10:46:14.580569 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3908b5d1-56f5-4fb0-ab2b-10bf81e10642" path="/var/lib/kubelet/pods/3908b5d1-56f5-4fb0-ab2b-10bf81e10642/volumes" Nov 28 10:46:23 crc kubenswrapper[4838]: I1128 10:46:23.940473 4838 patch_prober.go:28] interesting pod/machine-config-daemon-5dxdd container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 10:46:23 crc kubenswrapper[4838]: I1128 10:46:23.941304 4838 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 10:46:23 crc kubenswrapper[4838]: I1128 10:46:23.941381 4838 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" Nov 28 10:46:23 crc kubenswrapper[4838]: I1128 10:46:23.942406 4838 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"c3bd2f09ef29e69880ca8ba24161da6d816f538d277f4ad3c4e163e96b582c98"} pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 10:46:23 crc kubenswrapper[4838]: I1128 10:46:23.942540 4838 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" containerName="machine-config-daemon" containerID="cri-o://c3bd2f09ef29e69880ca8ba24161da6d816f538d277f4ad3c4e163e96b582c98" gracePeriod=600 Nov 28 10:46:24 crc kubenswrapper[4838]: I1128 10:46:24.613232 4838 generic.go:334] "Generic (PLEG): container finished" podID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" containerID="c3bd2f09ef29e69880ca8ba24161da6d816f538d277f4ad3c4e163e96b582c98" exitCode=0 Nov 28 10:46:24 crc kubenswrapper[4838]: I1128 10:46:24.613255 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" event={"ID":"5c3daa53-8c4e-4e30-aeba-146602dd45cd","Type":"ContainerDied","Data":"c3bd2f09ef29e69880ca8ba24161da6d816f538d277f4ad3c4e163e96b582c98"} Nov 28 10:46:24 crc kubenswrapper[4838]: I1128 10:46:24.613833 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" event={"ID":"5c3daa53-8c4e-4e30-aeba-146602dd45cd","Type":"ContainerStarted","Data":"3d80b1b6cf83170cb103f16c70c6a8b3db90d429f22463d02230864c009beb4b"} Nov 28 10:46:24 crc kubenswrapper[4838]: I1128 10:46:24.613856 4838 scope.go:117] "RemoveContainer" containerID="eb7c3a5fb0c9835207f14cd442a9c164c2c7763a8bc95f818d99864ca09179f7" Nov 28 10:47:57 crc kubenswrapper[4838]: I1128 10:47:57.667135 4838 generic.go:334] "Generic (PLEG): container finished" podID="d861b650-a017-43fc-8da3-b65d8f9e8ce8" containerID="594517f1ab7c7c66ad576c9682b5e48c1e2a57fad013ad72bc60205349f7f88d" exitCode=0 Nov 28 10:47:57 crc kubenswrapper[4838]: I1128 10:47:57.667191 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-8958p" event={"ID":"d861b650-a017-43fc-8da3-b65d8f9e8ce8","Type":"ContainerDied","Data":"594517f1ab7c7c66ad576c9682b5e48c1e2a57fad013ad72bc60205349f7f88d"} Nov 28 10:47:59 crc kubenswrapper[4838]: I1128 10:47:59.211022 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-8958p" Nov 28 10:47:59 crc kubenswrapper[4838]: I1128 10:47:59.293476 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/d861b650-a017-43fc-8da3-b65d8f9e8ce8-ceph\") pod \"d861b650-a017-43fc-8da3-b65d8f9e8ce8\" (UID: \"d861b650-a017-43fc-8da3-b65d8f9e8ce8\") " Nov 28 10:47:59 crc kubenswrapper[4838]: I1128 10:47:59.293663 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/d861b650-a017-43fc-8da3-b65d8f9e8ce8-libvirt-secret-0\") pod \"d861b650-a017-43fc-8da3-b65d8f9e8ce8\" (UID: \"d861b650-a017-43fc-8da3-b65d8f9e8ce8\") " Nov 28 10:47:59 crc kubenswrapper[4838]: I1128 10:47:59.293756 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kr42l\" (UniqueName: \"kubernetes.io/projected/d861b650-a017-43fc-8da3-b65d8f9e8ce8-kube-api-access-kr42l\") pod \"d861b650-a017-43fc-8da3-b65d8f9e8ce8\" (UID: \"d861b650-a017-43fc-8da3-b65d8f9e8ce8\") " Nov 28 10:47:59 crc kubenswrapper[4838]: I1128 10:47:59.293824 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d861b650-a017-43fc-8da3-b65d8f9e8ce8-libvirt-combined-ca-bundle\") pod \"d861b650-a017-43fc-8da3-b65d8f9e8ce8\" (UID: \"d861b650-a017-43fc-8da3-b65d8f9e8ce8\") " Nov 28 10:47:59 crc kubenswrapper[4838]: I1128 10:47:59.293861 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d861b650-a017-43fc-8da3-b65d8f9e8ce8-inventory\") pod \"d861b650-a017-43fc-8da3-b65d8f9e8ce8\" (UID: \"d861b650-a017-43fc-8da3-b65d8f9e8ce8\") " Nov 28 10:47:59 crc kubenswrapper[4838]: I1128 10:47:59.293933 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d861b650-a017-43fc-8da3-b65d8f9e8ce8-ssh-key\") pod \"d861b650-a017-43fc-8da3-b65d8f9e8ce8\" (UID: \"d861b650-a017-43fc-8da3-b65d8f9e8ce8\") " Nov 28 10:47:59 crc kubenswrapper[4838]: I1128 10:47:59.306001 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d861b650-a017-43fc-8da3-b65d8f9e8ce8-ceph" (OuterVolumeSpecName: "ceph") pod "d861b650-a017-43fc-8da3-b65d8f9e8ce8" (UID: "d861b650-a017-43fc-8da3-b65d8f9e8ce8"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:47:59 crc kubenswrapper[4838]: I1128 10:47:59.306079 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d861b650-a017-43fc-8da3-b65d8f9e8ce8-kube-api-access-kr42l" (OuterVolumeSpecName: "kube-api-access-kr42l") pod "d861b650-a017-43fc-8da3-b65d8f9e8ce8" (UID: "d861b650-a017-43fc-8da3-b65d8f9e8ce8"). InnerVolumeSpecName "kube-api-access-kr42l". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:47:59 crc kubenswrapper[4838]: I1128 10:47:59.308975 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d861b650-a017-43fc-8da3-b65d8f9e8ce8-libvirt-combined-ca-bundle" (OuterVolumeSpecName: "libvirt-combined-ca-bundle") pod "d861b650-a017-43fc-8da3-b65d8f9e8ce8" (UID: "d861b650-a017-43fc-8da3-b65d8f9e8ce8"). InnerVolumeSpecName "libvirt-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:47:59 crc kubenswrapper[4838]: I1128 10:47:59.330534 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d861b650-a017-43fc-8da3-b65d8f9e8ce8-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "d861b650-a017-43fc-8da3-b65d8f9e8ce8" (UID: "d861b650-a017-43fc-8da3-b65d8f9e8ce8"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:47:59 crc kubenswrapper[4838]: I1128 10:47:59.348684 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d861b650-a017-43fc-8da3-b65d8f9e8ce8-inventory" (OuterVolumeSpecName: "inventory") pod "d861b650-a017-43fc-8da3-b65d8f9e8ce8" (UID: "d861b650-a017-43fc-8da3-b65d8f9e8ce8"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:47:59 crc kubenswrapper[4838]: I1128 10:47:59.352215 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d861b650-a017-43fc-8da3-b65d8f9e8ce8-libvirt-secret-0" (OuterVolumeSpecName: "libvirt-secret-0") pod "d861b650-a017-43fc-8da3-b65d8f9e8ce8" (UID: "d861b650-a017-43fc-8da3-b65d8f9e8ce8"). InnerVolumeSpecName "libvirt-secret-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:47:59 crc kubenswrapper[4838]: I1128 10:47:59.397600 4838 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d861b650-a017-43fc-8da3-b65d8f9e8ce8-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 28 10:47:59 crc kubenswrapper[4838]: I1128 10:47:59.397675 4838 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/d861b650-a017-43fc-8da3-b65d8f9e8ce8-ceph\") on node \"crc\" DevicePath \"\"" Nov 28 10:47:59 crc kubenswrapper[4838]: I1128 10:47:59.397709 4838 reconciler_common.go:293] "Volume detached for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/d861b650-a017-43fc-8da3-b65d8f9e8ce8-libvirt-secret-0\") on node \"crc\" DevicePath \"\"" Nov 28 10:47:59 crc kubenswrapper[4838]: I1128 10:47:59.397780 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kr42l\" (UniqueName: \"kubernetes.io/projected/d861b650-a017-43fc-8da3-b65d8f9e8ce8-kube-api-access-kr42l\") on node \"crc\" DevicePath \"\"" Nov 28 10:47:59 crc kubenswrapper[4838]: I1128 10:47:59.397808 4838 reconciler_common.go:293] "Volume detached for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d861b650-a017-43fc-8da3-b65d8f9e8ce8-libvirt-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 10:47:59 crc kubenswrapper[4838]: I1128 10:47:59.397831 4838 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d861b650-a017-43fc-8da3-b65d8f9e8ce8-inventory\") on node \"crc\" DevicePath \"\"" Nov 28 10:47:59 crc kubenswrapper[4838]: I1128 10:47:59.690909 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-8958p" event={"ID":"d861b650-a017-43fc-8da3-b65d8f9e8ce8","Type":"ContainerDied","Data":"e95d01f1e810b4fc99a2682971c4bce11be8d60569532b0dbe3a6836f53c3896"} Nov 28 10:47:59 crc kubenswrapper[4838]: I1128 10:47:59.690968 4838 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e95d01f1e810b4fc99a2682971c4bce11be8d60569532b0dbe3a6836f53c3896" Nov 28 10:47:59 crc kubenswrapper[4838]: I1128 10:47:59.691020 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-8958p" Nov 28 10:47:59 crc kubenswrapper[4838]: I1128 10:47:59.871667 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-cv52x"] Nov 28 10:47:59 crc kubenswrapper[4838]: E1128 10:47:59.872156 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3908b5d1-56f5-4fb0-ab2b-10bf81e10642" containerName="extract-utilities" Nov 28 10:47:59 crc kubenswrapper[4838]: I1128 10:47:59.872179 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="3908b5d1-56f5-4fb0-ab2b-10bf81e10642" containerName="extract-utilities" Nov 28 10:47:59 crc kubenswrapper[4838]: E1128 10:47:59.872199 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3908b5d1-56f5-4fb0-ab2b-10bf81e10642" containerName="registry-server" Nov 28 10:47:59 crc kubenswrapper[4838]: I1128 10:47:59.872208 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="3908b5d1-56f5-4fb0-ab2b-10bf81e10642" containerName="registry-server" Nov 28 10:47:59 crc kubenswrapper[4838]: E1128 10:47:59.872222 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5557af77-f9d9-42b3-a2fd-71e0632bc567" containerName="extract-content" Nov 28 10:47:59 crc kubenswrapper[4838]: I1128 10:47:59.872231 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="5557af77-f9d9-42b3-a2fd-71e0632bc567" containerName="extract-content" Nov 28 10:47:59 crc kubenswrapper[4838]: E1128 10:47:59.872247 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d861b650-a017-43fc-8da3-b65d8f9e8ce8" containerName="libvirt-edpm-deployment-openstack-edpm-ipam" Nov 28 10:47:59 crc kubenswrapper[4838]: I1128 10:47:59.872256 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="d861b650-a017-43fc-8da3-b65d8f9e8ce8" containerName="libvirt-edpm-deployment-openstack-edpm-ipam" Nov 28 10:47:59 crc kubenswrapper[4838]: E1128 10:47:59.872298 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5557af77-f9d9-42b3-a2fd-71e0632bc567" containerName="registry-server" Nov 28 10:47:59 crc kubenswrapper[4838]: I1128 10:47:59.872307 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="5557af77-f9d9-42b3-a2fd-71e0632bc567" containerName="registry-server" Nov 28 10:47:59 crc kubenswrapper[4838]: E1128 10:47:59.872319 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5557af77-f9d9-42b3-a2fd-71e0632bc567" containerName="extract-utilities" Nov 28 10:47:59 crc kubenswrapper[4838]: I1128 10:47:59.872329 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="5557af77-f9d9-42b3-a2fd-71e0632bc567" containerName="extract-utilities" Nov 28 10:47:59 crc kubenswrapper[4838]: E1128 10:47:59.872342 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3908b5d1-56f5-4fb0-ab2b-10bf81e10642" containerName="extract-content" Nov 28 10:47:59 crc kubenswrapper[4838]: I1128 10:47:59.872351 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="3908b5d1-56f5-4fb0-ab2b-10bf81e10642" containerName="extract-content" Nov 28 10:47:59 crc kubenswrapper[4838]: I1128 10:47:59.872581 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="3908b5d1-56f5-4fb0-ab2b-10bf81e10642" containerName="registry-server" Nov 28 10:47:59 crc kubenswrapper[4838]: I1128 10:47:59.872601 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="5557af77-f9d9-42b3-a2fd-71e0632bc567" containerName="registry-server" Nov 28 10:47:59 crc kubenswrapper[4838]: I1128 10:47:59.872621 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="d861b650-a017-43fc-8da3-b65d8f9e8ce8" containerName="libvirt-edpm-deployment-openstack-edpm-ipam" Nov 28 10:47:59 crc kubenswrapper[4838]: I1128 10:47:59.873411 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-cv52x" Nov 28 10:47:59 crc kubenswrapper[4838]: I1128 10:47:59.879179 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ceph-nova" Nov 28 10:47:59 crc kubenswrapper[4838]: I1128 10:47:59.879871 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"nova-extra-config" Nov 28 10:47:59 crc kubenswrapper[4838]: I1128 10:47:59.883311 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 28 10:47:59 crc kubenswrapper[4838]: I1128 10:47:59.883394 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-migration-ssh-key" Nov 28 10:47:59 crc kubenswrapper[4838]: I1128 10:47:59.883453 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 28 10:47:59 crc kubenswrapper[4838]: I1128 10:47:59.883556 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 28 10:47:59 crc kubenswrapper[4838]: I1128 10:47:59.883764 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceph-conf-files" Nov 28 10:47:59 crc kubenswrapper[4838]: I1128 10:47:59.886424 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-compute-config" Nov 28 10:47:59 crc kubenswrapper[4838]: I1128 10:47:59.886658 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-t6dwn" Nov 28 10:47:59 crc kubenswrapper[4838]: I1128 10:47:59.888801 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-cv52x"] Nov 28 10:47:59 crc kubenswrapper[4838]: I1128 10:47:59.921874 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/54e4c0ee-74da-434c-bb61-702d4e78c663-nova-cell1-compute-config-1\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-cv52x\" (UID: \"54e4c0ee-74da-434c-bb61-702d4e78c663\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-cv52x" Nov 28 10:47:59 crc kubenswrapper[4838]: I1128 10:47:59.921952 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/54e4c0ee-74da-434c-bb61-702d4e78c663-nova-extra-config-0\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-cv52x\" (UID: \"54e4c0ee-74da-434c-bb61-702d4e78c663\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-cv52x" Nov 28 10:47:59 crc kubenswrapper[4838]: I1128 10:47:59.921987 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/54e4c0ee-74da-434c-bb61-702d4e78c663-nova-cell1-compute-config-0\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-cv52x\" (UID: \"54e4c0ee-74da-434c-bb61-702d4e78c663\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-cv52x" Nov 28 10:47:59 crc kubenswrapper[4838]: I1128 10:47:59.922047 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9x8bq\" (UniqueName: \"kubernetes.io/projected/54e4c0ee-74da-434c-bb61-702d4e78c663-kube-api-access-9x8bq\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-cv52x\" (UID: \"54e4c0ee-74da-434c-bb61-702d4e78c663\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-cv52x" Nov 28 10:47:59 crc kubenswrapper[4838]: I1128 10:47:59.922116 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/54e4c0ee-74da-434c-bb61-702d4e78c663-ceph\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-cv52x\" (UID: \"54e4c0ee-74da-434c-bb61-702d4e78c663\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-cv52x" Nov 28 10:47:59 crc kubenswrapper[4838]: I1128 10:47:59.922217 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/54e4c0ee-74da-434c-bb61-702d4e78c663-nova-migration-ssh-key-0\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-cv52x\" (UID: \"54e4c0ee-74da-434c-bb61-702d4e78c663\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-cv52x" Nov 28 10:47:59 crc kubenswrapper[4838]: I1128 10:47:59.922309 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/54e4c0ee-74da-434c-bb61-702d4e78c663-nova-migration-ssh-key-1\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-cv52x\" (UID: \"54e4c0ee-74da-434c-bb61-702d4e78c663\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-cv52x" Nov 28 10:47:59 crc kubenswrapper[4838]: I1128 10:47:59.922364 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/54e4c0ee-74da-434c-bb61-702d4e78c663-ssh-key\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-cv52x\" (UID: \"54e4c0ee-74da-434c-bb61-702d4e78c663\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-cv52x" Nov 28 10:47:59 crc kubenswrapper[4838]: I1128 10:47:59.922410 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph-nova-0\" (UniqueName: \"kubernetes.io/configmap/54e4c0ee-74da-434c-bb61-702d4e78c663-ceph-nova-0\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-cv52x\" (UID: \"54e4c0ee-74da-434c-bb61-702d4e78c663\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-cv52x" Nov 28 10:47:59 crc kubenswrapper[4838]: I1128 10:47:59.922456 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/54e4c0ee-74da-434c-bb61-702d4e78c663-inventory\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-cv52x\" (UID: \"54e4c0ee-74da-434c-bb61-702d4e78c663\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-cv52x" Nov 28 10:47:59 crc kubenswrapper[4838]: I1128 10:47:59.922515 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-custom-ceph-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/54e4c0ee-74da-434c-bb61-702d4e78c663-nova-custom-ceph-combined-ca-bundle\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-cv52x\" (UID: \"54e4c0ee-74da-434c-bb61-702d4e78c663\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-cv52x" Nov 28 10:48:00 crc kubenswrapper[4838]: I1128 10:48:00.024596 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph-nova-0\" (UniqueName: \"kubernetes.io/configmap/54e4c0ee-74da-434c-bb61-702d4e78c663-ceph-nova-0\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-cv52x\" (UID: \"54e4c0ee-74da-434c-bb61-702d4e78c663\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-cv52x" Nov 28 10:48:00 crc kubenswrapper[4838]: I1128 10:48:00.024659 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/54e4c0ee-74da-434c-bb61-702d4e78c663-inventory\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-cv52x\" (UID: \"54e4c0ee-74da-434c-bb61-702d4e78c663\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-cv52x" Nov 28 10:48:00 crc kubenswrapper[4838]: I1128 10:48:00.024728 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-custom-ceph-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/54e4c0ee-74da-434c-bb61-702d4e78c663-nova-custom-ceph-combined-ca-bundle\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-cv52x\" (UID: \"54e4c0ee-74da-434c-bb61-702d4e78c663\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-cv52x" Nov 28 10:48:00 crc kubenswrapper[4838]: I1128 10:48:00.024794 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/54e4c0ee-74da-434c-bb61-702d4e78c663-nova-cell1-compute-config-1\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-cv52x\" (UID: \"54e4c0ee-74da-434c-bb61-702d4e78c663\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-cv52x" Nov 28 10:48:00 crc kubenswrapper[4838]: I1128 10:48:00.024830 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/54e4c0ee-74da-434c-bb61-702d4e78c663-nova-extra-config-0\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-cv52x\" (UID: \"54e4c0ee-74da-434c-bb61-702d4e78c663\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-cv52x" Nov 28 10:48:00 crc kubenswrapper[4838]: I1128 10:48:00.024858 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/54e4c0ee-74da-434c-bb61-702d4e78c663-nova-cell1-compute-config-0\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-cv52x\" (UID: \"54e4c0ee-74da-434c-bb61-702d4e78c663\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-cv52x" Nov 28 10:48:00 crc kubenswrapper[4838]: I1128 10:48:00.024897 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9x8bq\" (UniqueName: \"kubernetes.io/projected/54e4c0ee-74da-434c-bb61-702d4e78c663-kube-api-access-9x8bq\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-cv52x\" (UID: \"54e4c0ee-74da-434c-bb61-702d4e78c663\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-cv52x" Nov 28 10:48:00 crc kubenswrapper[4838]: I1128 10:48:00.024943 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/54e4c0ee-74da-434c-bb61-702d4e78c663-ceph\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-cv52x\" (UID: \"54e4c0ee-74da-434c-bb61-702d4e78c663\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-cv52x" Nov 28 10:48:00 crc kubenswrapper[4838]: I1128 10:48:00.024978 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/54e4c0ee-74da-434c-bb61-702d4e78c663-nova-migration-ssh-key-0\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-cv52x\" (UID: \"54e4c0ee-74da-434c-bb61-702d4e78c663\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-cv52x" Nov 28 10:48:00 crc kubenswrapper[4838]: I1128 10:48:00.025018 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/54e4c0ee-74da-434c-bb61-702d4e78c663-nova-migration-ssh-key-1\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-cv52x\" (UID: \"54e4c0ee-74da-434c-bb61-702d4e78c663\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-cv52x" Nov 28 10:48:00 crc kubenswrapper[4838]: I1128 10:48:00.025073 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/54e4c0ee-74da-434c-bb61-702d4e78c663-ssh-key\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-cv52x\" (UID: \"54e4c0ee-74da-434c-bb61-702d4e78c663\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-cv52x" Nov 28 10:48:00 crc kubenswrapper[4838]: I1128 10:48:00.026380 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph-nova-0\" (UniqueName: \"kubernetes.io/configmap/54e4c0ee-74da-434c-bb61-702d4e78c663-ceph-nova-0\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-cv52x\" (UID: \"54e4c0ee-74da-434c-bb61-702d4e78c663\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-cv52x" Nov 28 10:48:00 crc kubenswrapper[4838]: I1128 10:48:00.026488 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/54e4c0ee-74da-434c-bb61-702d4e78c663-nova-extra-config-0\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-cv52x\" (UID: \"54e4c0ee-74da-434c-bb61-702d4e78c663\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-cv52x" Nov 28 10:48:00 crc kubenswrapper[4838]: I1128 10:48:00.029191 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/54e4c0ee-74da-434c-bb61-702d4e78c663-nova-cell1-compute-config-1\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-cv52x\" (UID: \"54e4c0ee-74da-434c-bb61-702d4e78c663\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-cv52x" Nov 28 10:48:00 crc kubenswrapper[4838]: I1128 10:48:00.030798 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-custom-ceph-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/54e4c0ee-74da-434c-bb61-702d4e78c663-nova-custom-ceph-combined-ca-bundle\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-cv52x\" (UID: \"54e4c0ee-74da-434c-bb61-702d4e78c663\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-cv52x" Nov 28 10:48:00 crc kubenswrapper[4838]: I1128 10:48:00.032750 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/54e4c0ee-74da-434c-bb61-702d4e78c663-nova-cell1-compute-config-0\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-cv52x\" (UID: \"54e4c0ee-74da-434c-bb61-702d4e78c663\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-cv52x" Nov 28 10:48:00 crc kubenswrapper[4838]: I1128 10:48:00.033184 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/54e4c0ee-74da-434c-bb61-702d4e78c663-inventory\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-cv52x\" (UID: \"54e4c0ee-74da-434c-bb61-702d4e78c663\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-cv52x" Nov 28 10:48:00 crc kubenswrapper[4838]: I1128 10:48:00.033503 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/54e4c0ee-74da-434c-bb61-702d4e78c663-nova-migration-ssh-key-1\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-cv52x\" (UID: \"54e4c0ee-74da-434c-bb61-702d4e78c663\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-cv52x" Nov 28 10:48:00 crc kubenswrapper[4838]: I1128 10:48:00.033609 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/54e4c0ee-74da-434c-bb61-702d4e78c663-ceph\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-cv52x\" (UID: \"54e4c0ee-74da-434c-bb61-702d4e78c663\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-cv52x" Nov 28 10:48:00 crc kubenswrapper[4838]: I1128 10:48:00.033659 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/54e4c0ee-74da-434c-bb61-702d4e78c663-nova-migration-ssh-key-0\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-cv52x\" (UID: \"54e4c0ee-74da-434c-bb61-702d4e78c663\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-cv52x" Nov 28 10:48:00 crc kubenswrapper[4838]: I1128 10:48:00.034125 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/54e4c0ee-74da-434c-bb61-702d4e78c663-ssh-key\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-cv52x\" (UID: \"54e4c0ee-74da-434c-bb61-702d4e78c663\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-cv52x" Nov 28 10:48:00 crc kubenswrapper[4838]: I1128 10:48:00.043764 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9x8bq\" (UniqueName: \"kubernetes.io/projected/54e4c0ee-74da-434c-bb61-702d4e78c663-kube-api-access-9x8bq\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-cv52x\" (UID: \"54e4c0ee-74da-434c-bb61-702d4e78c663\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-cv52x" Nov 28 10:48:00 crc kubenswrapper[4838]: I1128 10:48:00.196207 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-cv52x" Nov 28 10:48:00 crc kubenswrapper[4838]: I1128 10:48:00.761953 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-cv52x"] Nov 28 10:48:00 crc kubenswrapper[4838]: I1128 10:48:00.779130 4838 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 28 10:48:01 crc kubenswrapper[4838]: I1128 10:48:01.708707 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-cv52x" event={"ID":"54e4c0ee-74da-434c-bb61-702d4e78c663","Type":"ContainerStarted","Data":"8c3fa5a48f231e10702e87e08358a95535ab4301c9a66fe81881666f218ab8c7"} Nov 28 10:48:02 crc kubenswrapper[4838]: I1128 10:48:02.719900 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-cv52x" event={"ID":"54e4c0ee-74da-434c-bb61-702d4e78c663","Type":"ContainerStarted","Data":"ccfcbd4e380e6bb6014ff20bab9963c8e41525dd29398e3b551f02604f10a23e"} Nov 28 10:48:02 crc kubenswrapper[4838]: I1128 10:48:02.751667 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-cv52x" podStartSLOduration=3.001451532 podStartE2EDuration="3.751636717s" podCreationTimestamp="2025-11-28 10:47:59 +0000 UTC" firstStartedPulling="2025-11-28 10:48:00.778706424 +0000 UTC m=+3052.477680624" lastFinishedPulling="2025-11-28 10:48:01.528891639 +0000 UTC m=+3053.227865809" observedRunningTime="2025-11-28 10:48:02.744446321 +0000 UTC m=+3054.443420521" watchObservedRunningTime="2025-11-28 10:48:02.751636717 +0000 UTC m=+3054.450610927" Nov 28 10:48:53 crc kubenswrapper[4838]: I1128 10:48:53.940612 4838 patch_prober.go:28] interesting pod/machine-config-daemon-5dxdd container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 10:48:53 crc kubenswrapper[4838]: I1128 10:48:53.941245 4838 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 10:49:23 crc kubenswrapper[4838]: I1128 10:49:23.940087 4838 patch_prober.go:28] interesting pod/machine-config-daemon-5dxdd container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 10:49:23 crc kubenswrapper[4838]: I1128 10:49:23.940781 4838 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 10:49:53 crc kubenswrapper[4838]: I1128 10:49:53.940864 4838 patch_prober.go:28] interesting pod/machine-config-daemon-5dxdd container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 10:49:53 crc kubenswrapper[4838]: I1128 10:49:53.941787 4838 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 10:49:53 crc kubenswrapper[4838]: I1128 10:49:53.941863 4838 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" Nov 28 10:49:53 crc kubenswrapper[4838]: I1128 10:49:53.943214 4838 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"3d80b1b6cf83170cb103f16c70c6a8b3db90d429f22463d02230864c009beb4b"} pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 10:49:53 crc kubenswrapper[4838]: I1128 10:49:53.943330 4838 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" containerName="machine-config-daemon" containerID="cri-o://3d80b1b6cf83170cb103f16c70c6a8b3db90d429f22463d02230864c009beb4b" gracePeriod=600 Nov 28 10:49:54 crc kubenswrapper[4838]: E1128 10:49:54.074498 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-5dxdd_openshift-machine-config-operator(5c3daa53-8c4e-4e30-aeba-146602dd45cd)\"" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" Nov 28 10:49:55 crc kubenswrapper[4838]: I1128 10:49:55.086674 4838 generic.go:334] "Generic (PLEG): container finished" podID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" containerID="3d80b1b6cf83170cb103f16c70c6a8b3db90d429f22463d02230864c009beb4b" exitCode=0 Nov 28 10:49:55 crc kubenswrapper[4838]: I1128 10:49:55.086786 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" event={"ID":"5c3daa53-8c4e-4e30-aeba-146602dd45cd","Type":"ContainerDied","Data":"3d80b1b6cf83170cb103f16c70c6a8b3db90d429f22463d02230864c009beb4b"} Nov 28 10:49:55 crc kubenswrapper[4838]: I1128 10:49:55.087132 4838 scope.go:117] "RemoveContainer" containerID="c3bd2f09ef29e69880ca8ba24161da6d816f538d277f4ad3c4e163e96b582c98" Nov 28 10:49:55 crc kubenswrapper[4838]: I1128 10:49:55.089117 4838 scope.go:117] "RemoveContainer" containerID="3d80b1b6cf83170cb103f16c70c6a8b3db90d429f22463d02230864c009beb4b" Nov 28 10:49:55 crc kubenswrapper[4838]: E1128 10:49:55.090517 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-5dxdd_openshift-machine-config-operator(5c3daa53-8c4e-4e30-aeba-146602dd45cd)\"" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" Nov 28 10:50:06 crc kubenswrapper[4838]: I1128 10:50:06.563101 4838 scope.go:117] "RemoveContainer" containerID="3d80b1b6cf83170cb103f16c70c6a8b3db90d429f22463d02230864c009beb4b" Nov 28 10:50:06 crc kubenswrapper[4838]: E1128 10:50:06.564083 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-5dxdd_openshift-machine-config-operator(5c3daa53-8c4e-4e30-aeba-146602dd45cd)\"" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" Nov 28 10:50:19 crc kubenswrapper[4838]: I1128 10:50:19.562573 4838 scope.go:117] "RemoveContainer" containerID="3d80b1b6cf83170cb103f16c70c6a8b3db90d429f22463d02230864c009beb4b" Nov 28 10:50:19 crc kubenswrapper[4838]: E1128 10:50:19.563929 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-5dxdd_openshift-machine-config-operator(5c3daa53-8c4e-4e30-aeba-146602dd45cd)\"" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" Nov 28 10:50:34 crc kubenswrapper[4838]: I1128 10:50:34.572578 4838 scope.go:117] "RemoveContainer" containerID="3d80b1b6cf83170cb103f16c70c6a8b3db90d429f22463d02230864c009beb4b" Nov 28 10:50:34 crc kubenswrapper[4838]: E1128 10:50:34.573655 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-5dxdd_openshift-machine-config-operator(5c3daa53-8c4e-4e30-aeba-146602dd45cd)\"" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" Nov 28 10:50:45 crc kubenswrapper[4838]: I1128 10:50:45.562472 4838 scope.go:117] "RemoveContainer" containerID="3d80b1b6cf83170cb103f16c70c6a8b3db90d429f22463d02230864c009beb4b" Nov 28 10:50:45 crc kubenswrapper[4838]: E1128 10:50:45.563223 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-5dxdd_openshift-machine-config-operator(5c3daa53-8c4e-4e30-aeba-146602dd45cd)\"" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" Nov 28 10:50:57 crc kubenswrapper[4838]: I1128 10:50:57.562186 4838 scope.go:117] "RemoveContainer" containerID="3d80b1b6cf83170cb103f16c70c6a8b3db90d429f22463d02230864c009beb4b" Nov 28 10:50:57 crc kubenswrapper[4838]: E1128 10:50:57.563115 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-5dxdd_openshift-machine-config-operator(5c3daa53-8c4e-4e30-aeba-146602dd45cd)\"" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" Nov 28 10:51:08 crc kubenswrapper[4838]: I1128 10:51:08.581270 4838 scope.go:117] "RemoveContainer" containerID="3d80b1b6cf83170cb103f16c70c6a8b3db90d429f22463d02230864c009beb4b" Nov 28 10:51:08 crc kubenswrapper[4838]: E1128 10:51:08.584590 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-5dxdd_openshift-machine-config-operator(5c3daa53-8c4e-4e30-aeba-146602dd45cd)\"" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" Nov 28 10:51:22 crc kubenswrapper[4838]: I1128 10:51:22.562752 4838 scope.go:117] "RemoveContainer" containerID="3d80b1b6cf83170cb103f16c70c6a8b3db90d429f22463d02230864c009beb4b" Nov 28 10:51:22 crc kubenswrapper[4838]: E1128 10:51:22.563526 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-5dxdd_openshift-machine-config-operator(5c3daa53-8c4e-4e30-aeba-146602dd45cd)\"" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" Nov 28 10:51:34 crc kubenswrapper[4838]: I1128 10:51:34.562531 4838 scope.go:117] "RemoveContainer" containerID="3d80b1b6cf83170cb103f16c70c6a8b3db90d429f22463d02230864c009beb4b" Nov 28 10:51:34 crc kubenswrapper[4838]: E1128 10:51:34.563672 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-5dxdd_openshift-machine-config-operator(5c3daa53-8c4e-4e30-aeba-146602dd45cd)\"" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" Nov 28 10:51:42 crc kubenswrapper[4838]: I1128 10:51:42.297046 4838 generic.go:334] "Generic (PLEG): container finished" podID="54e4c0ee-74da-434c-bb61-702d4e78c663" containerID="ccfcbd4e380e6bb6014ff20bab9963c8e41525dd29398e3b551f02604f10a23e" exitCode=0 Nov 28 10:51:42 crc kubenswrapper[4838]: I1128 10:51:42.297173 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-cv52x" event={"ID":"54e4c0ee-74da-434c-bb61-702d4e78c663","Type":"ContainerDied","Data":"ccfcbd4e380e6bb6014ff20bab9963c8e41525dd29398e3b551f02604f10a23e"} Nov 28 10:51:43 crc kubenswrapper[4838]: I1128 10:51:43.784626 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-cv52x" Nov 28 10:51:43 crc kubenswrapper[4838]: I1128 10:51:43.964540 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/54e4c0ee-74da-434c-bb61-702d4e78c663-nova-cell1-compute-config-1\") pod \"54e4c0ee-74da-434c-bb61-702d4e78c663\" (UID: \"54e4c0ee-74da-434c-bb61-702d4e78c663\") " Nov 28 10:51:43 crc kubenswrapper[4838]: I1128 10:51:43.964622 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/54e4c0ee-74da-434c-bb61-702d4e78c663-inventory\") pod \"54e4c0ee-74da-434c-bb61-702d4e78c663\" (UID: \"54e4c0ee-74da-434c-bb61-702d4e78c663\") " Nov 28 10:51:43 crc kubenswrapper[4838]: I1128 10:51:43.964805 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/54e4c0ee-74da-434c-bb61-702d4e78c663-nova-cell1-compute-config-0\") pod \"54e4c0ee-74da-434c-bb61-702d4e78c663\" (UID: \"54e4c0ee-74da-434c-bb61-702d4e78c663\") " Nov 28 10:51:43 crc kubenswrapper[4838]: I1128 10:51:43.964971 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/54e4c0ee-74da-434c-bb61-702d4e78c663-nova-extra-config-0\") pod \"54e4c0ee-74da-434c-bb61-702d4e78c663\" (UID: \"54e4c0ee-74da-434c-bb61-702d4e78c663\") " Nov 28 10:51:43 crc kubenswrapper[4838]: I1128 10:51:43.965006 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/54e4c0ee-74da-434c-bb61-702d4e78c663-nova-migration-ssh-key-1\") pod \"54e4c0ee-74da-434c-bb61-702d4e78c663\" (UID: \"54e4c0ee-74da-434c-bb61-702d4e78c663\") " Nov 28 10:51:43 crc kubenswrapper[4838]: I1128 10:51:43.965053 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/54e4c0ee-74da-434c-bb61-702d4e78c663-ceph\") pod \"54e4c0ee-74da-434c-bb61-702d4e78c663\" (UID: \"54e4c0ee-74da-434c-bb61-702d4e78c663\") " Nov 28 10:51:43 crc kubenswrapper[4838]: I1128 10:51:43.965087 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9x8bq\" (UniqueName: \"kubernetes.io/projected/54e4c0ee-74da-434c-bb61-702d4e78c663-kube-api-access-9x8bq\") pod \"54e4c0ee-74da-434c-bb61-702d4e78c663\" (UID: \"54e4c0ee-74da-434c-bb61-702d4e78c663\") " Nov 28 10:51:43 crc kubenswrapper[4838]: I1128 10:51:43.965120 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph-nova-0\" (UniqueName: \"kubernetes.io/configmap/54e4c0ee-74da-434c-bb61-702d4e78c663-ceph-nova-0\") pod \"54e4c0ee-74da-434c-bb61-702d4e78c663\" (UID: \"54e4c0ee-74da-434c-bb61-702d4e78c663\") " Nov 28 10:51:43 crc kubenswrapper[4838]: I1128 10:51:43.965249 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/54e4c0ee-74da-434c-bb61-702d4e78c663-ssh-key\") pod \"54e4c0ee-74da-434c-bb61-702d4e78c663\" (UID: \"54e4c0ee-74da-434c-bb61-702d4e78c663\") " Nov 28 10:51:43 crc kubenswrapper[4838]: I1128 10:51:43.965313 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-custom-ceph-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/54e4c0ee-74da-434c-bb61-702d4e78c663-nova-custom-ceph-combined-ca-bundle\") pod \"54e4c0ee-74da-434c-bb61-702d4e78c663\" (UID: \"54e4c0ee-74da-434c-bb61-702d4e78c663\") " Nov 28 10:51:43 crc kubenswrapper[4838]: I1128 10:51:43.965355 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/54e4c0ee-74da-434c-bb61-702d4e78c663-nova-migration-ssh-key-0\") pod \"54e4c0ee-74da-434c-bb61-702d4e78c663\" (UID: \"54e4c0ee-74da-434c-bb61-702d4e78c663\") " Nov 28 10:51:43 crc kubenswrapper[4838]: I1128 10:51:43.971059 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/54e4c0ee-74da-434c-bb61-702d4e78c663-ceph" (OuterVolumeSpecName: "ceph") pod "54e4c0ee-74da-434c-bb61-702d4e78c663" (UID: "54e4c0ee-74da-434c-bb61-702d4e78c663"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:51:43 crc kubenswrapper[4838]: I1128 10:51:43.972480 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/54e4c0ee-74da-434c-bb61-702d4e78c663-kube-api-access-9x8bq" (OuterVolumeSpecName: "kube-api-access-9x8bq") pod "54e4c0ee-74da-434c-bb61-702d4e78c663" (UID: "54e4c0ee-74da-434c-bb61-702d4e78c663"). InnerVolumeSpecName "kube-api-access-9x8bq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:51:43 crc kubenswrapper[4838]: I1128 10:51:43.974608 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/54e4c0ee-74da-434c-bb61-702d4e78c663-nova-custom-ceph-combined-ca-bundle" (OuterVolumeSpecName: "nova-custom-ceph-combined-ca-bundle") pod "54e4c0ee-74da-434c-bb61-702d4e78c663" (UID: "54e4c0ee-74da-434c-bb61-702d4e78c663"). InnerVolumeSpecName "nova-custom-ceph-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:51:43 crc kubenswrapper[4838]: I1128 10:51:43.993624 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/54e4c0ee-74da-434c-bb61-702d4e78c663-nova-extra-config-0" (OuterVolumeSpecName: "nova-extra-config-0") pod "54e4c0ee-74da-434c-bb61-702d4e78c663" (UID: "54e4c0ee-74da-434c-bb61-702d4e78c663"). InnerVolumeSpecName "nova-extra-config-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 10:51:43 crc kubenswrapper[4838]: I1128 10:51:43.998461 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/54e4c0ee-74da-434c-bb61-702d4e78c663-nova-cell1-compute-config-0" (OuterVolumeSpecName: "nova-cell1-compute-config-0") pod "54e4c0ee-74da-434c-bb61-702d4e78c663" (UID: "54e4c0ee-74da-434c-bb61-702d4e78c663"). InnerVolumeSpecName "nova-cell1-compute-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:51:44 crc kubenswrapper[4838]: I1128 10:51:44.007815 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/54e4c0ee-74da-434c-bb61-702d4e78c663-ceph-nova-0" (OuterVolumeSpecName: "ceph-nova-0") pod "54e4c0ee-74da-434c-bb61-702d4e78c663" (UID: "54e4c0ee-74da-434c-bb61-702d4e78c663"). InnerVolumeSpecName "ceph-nova-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 10:51:44 crc kubenswrapper[4838]: I1128 10:51:44.014296 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/54e4c0ee-74da-434c-bb61-702d4e78c663-nova-migration-ssh-key-1" (OuterVolumeSpecName: "nova-migration-ssh-key-1") pod "54e4c0ee-74da-434c-bb61-702d4e78c663" (UID: "54e4c0ee-74da-434c-bb61-702d4e78c663"). InnerVolumeSpecName "nova-migration-ssh-key-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:51:44 crc kubenswrapper[4838]: I1128 10:51:44.020824 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/54e4c0ee-74da-434c-bb61-702d4e78c663-inventory" (OuterVolumeSpecName: "inventory") pod "54e4c0ee-74da-434c-bb61-702d4e78c663" (UID: "54e4c0ee-74da-434c-bb61-702d4e78c663"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:51:44 crc kubenswrapper[4838]: I1128 10:51:44.021798 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/54e4c0ee-74da-434c-bb61-702d4e78c663-nova-migration-ssh-key-0" (OuterVolumeSpecName: "nova-migration-ssh-key-0") pod "54e4c0ee-74da-434c-bb61-702d4e78c663" (UID: "54e4c0ee-74da-434c-bb61-702d4e78c663"). InnerVolumeSpecName "nova-migration-ssh-key-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:51:44 crc kubenswrapper[4838]: I1128 10:51:44.022032 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/54e4c0ee-74da-434c-bb61-702d4e78c663-nova-cell1-compute-config-1" (OuterVolumeSpecName: "nova-cell1-compute-config-1") pod "54e4c0ee-74da-434c-bb61-702d4e78c663" (UID: "54e4c0ee-74da-434c-bb61-702d4e78c663"). InnerVolumeSpecName "nova-cell1-compute-config-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:51:44 crc kubenswrapper[4838]: I1128 10:51:44.027015 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/54e4c0ee-74da-434c-bb61-702d4e78c663-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "54e4c0ee-74da-434c-bb61-702d4e78c663" (UID: "54e4c0ee-74da-434c-bb61-702d4e78c663"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:51:44 crc kubenswrapper[4838]: I1128 10:51:44.084452 4838 reconciler_common.go:293] "Volume detached for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/54e4c0ee-74da-434c-bb61-702d4e78c663-nova-cell1-compute-config-0\") on node \"crc\" DevicePath \"\"" Nov 28 10:51:44 crc kubenswrapper[4838]: I1128 10:51:44.084504 4838 reconciler_common.go:293] "Volume detached for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/54e4c0ee-74da-434c-bb61-702d4e78c663-nova-extra-config-0\") on node \"crc\" DevicePath \"\"" Nov 28 10:51:44 crc kubenswrapper[4838]: I1128 10:51:44.084526 4838 reconciler_common.go:293] "Volume detached for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/54e4c0ee-74da-434c-bb61-702d4e78c663-nova-migration-ssh-key-1\") on node \"crc\" DevicePath \"\"" Nov 28 10:51:44 crc kubenswrapper[4838]: I1128 10:51:44.084545 4838 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/54e4c0ee-74da-434c-bb61-702d4e78c663-ceph\") on node \"crc\" DevicePath \"\"" Nov 28 10:51:44 crc kubenswrapper[4838]: I1128 10:51:44.084563 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9x8bq\" (UniqueName: \"kubernetes.io/projected/54e4c0ee-74da-434c-bb61-702d4e78c663-kube-api-access-9x8bq\") on node \"crc\" DevicePath \"\"" Nov 28 10:51:44 crc kubenswrapper[4838]: I1128 10:51:44.084580 4838 reconciler_common.go:293] "Volume detached for volume \"ceph-nova-0\" (UniqueName: \"kubernetes.io/configmap/54e4c0ee-74da-434c-bb61-702d4e78c663-ceph-nova-0\") on node \"crc\" DevicePath \"\"" Nov 28 10:51:44 crc kubenswrapper[4838]: I1128 10:51:44.084596 4838 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/54e4c0ee-74da-434c-bb61-702d4e78c663-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 28 10:51:44 crc kubenswrapper[4838]: I1128 10:51:44.084613 4838 reconciler_common.go:293] "Volume detached for volume \"nova-custom-ceph-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/54e4c0ee-74da-434c-bb61-702d4e78c663-nova-custom-ceph-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 10:51:44 crc kubenswrapper[4838]: I1128 10:51:44.084634 4838 reconciler_common.go:293] "Volume detached for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/54e4c0ee-74da-434c-bb61-702d4e78c663-nova-migration-ssh-key-0\") on node \"crc\" DevicePath \"\"" Nov 28 10:51:44 crc kubenswrapper[4838]: I1128 10:51:44.084651 4838 reconciler_common.go:293] "Volume detached for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/54e4c0ee-74da-434c-bb61-702d4e78c663-nova-cell1-compute-config-1\") on node \"crc\" DevicePath \"\"" Nov 28 10:51:44 crc kubenswrapper[4838]: I1128 10:51:44.084669 4838 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/54e4c0ee-74da-434c-bb61-702d4e78c663-inventory\") on node \"crc\" DevicePath \"\"" Nov 28 10:51:44 crc kubenswrapper[4838]: I1128 10:51:44.324909 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-cv52x" event={"ID":"54e4c0ee-74da-434c-bb61-702d4e78c663","Type":"ContainerDied","Data":"8c3fa5a48f231e10702e87e08358a95535ab4301c9a66fe81881666f218ab8c7"} Nov 28 10:51:44 crc kubenswrapper[4838]: I1128 10:51:44.324962 4838 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8c3fa5a48f231e10702e87e08358a95535ab4301c9a66fe81881666f218ab8c7" Nov 28 10:51:44 crc kubenswrapper[4838]: I1128 10:51:44.325053 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-cv52x" Nov 28 10:51:49 crc kubenswrapper[4838]: I1128 10:51:49.563424 4838 scope.go:117] "RemoveContainer" containerID="3d80b1b6cf83170cb103f16c70c6a8b3db90d429f22463d02230864c009beb4b" Nov 28 10:51:49 crc kubenswrapper[4838]: E1128 10:51:49.564772 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-5dxdd_openshift-machine-config-operator(5c3daa53-8c4e-4e30-aeba-146602dd45cd)\"" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" Nov 28 10:51:58 crc kubenswrapper[4838]: I1128 10:51:58.584960 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-volume-volume1-0"] Nov 28 10:52:00 crc kubenswrapper[4838]: E1128 10:51:58.591217 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="54e4c0ee-74da-434c-bb61-702d4e78c663" containerName="nova-custom-ceph-edpm-deployment-openstack-edpm-ipam" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:58.591239 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="54e4c0ee-74da-434c-bb61-702d4e78c663" containerName="nova-custom-ceph-edpm-deployment-openstack-edpm-ipam" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:58.591459 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="54e4c0ee-74da-434c-bb61-702d4e78c663" containerName="nova-custom-ceph-edpm-deployment-openstack-edpm-ipam" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:58.592410 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-volume-volume1-0" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:58.594863 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceph-conf-files" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:58.595243 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-volume-volume1-config-data" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:58.597672 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-backup-0"] Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:58.599203 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-backup-0" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:58.600655 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-backup-config-data" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:58.609464 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-volume-volume1-0"] Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:58.628468 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-backup-0"] Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:58.705608 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/5efcb033-775b-46d6-8c77-2bafc360c749-lib-modules\") pod \"cinder-volume-volume1-0\" (UID: \"5efcb033-775b-46d6-8c77-2bafc360c749\") " pod="openstack/cinder-volume-volume1-0" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:58.705648 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5efcb033-775b-46d6-8c77-2bafc360c749-config-data\") pod \"cinder-volume-volume1-0\" (UID: \"5efcb033-775b-46d6-8c77-2bafc360c749\") " pod="openstack/cinder-volume-volume1-0" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:58.705676 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/5efcb033-775b-46d6-8c77-2bafc360c749-run\") pod \"cinder-volume-volume1-0\" (UID: \"5efcb033-775b-46d6-8c77-2bafc360c749\") " pod="openstack/cinder-volume-volume1-0" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:58.705710 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/8525f0f2-643f-4177-a4f8-12ca22b43363-sys\") pod \"cinder-backup-0\" (UID: \"8525f0f2-643f-4177-a4f8-12ca22b43363\") " pod="openstack/cinder-backup-0" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:58.706822 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-cinder\" (UniqueName: \"kubernetes.io/host-path/8525f0f2-643f-4177-a4f8-12ca22b43363-var-lib-cinder\") pod \"cinder-backup-0\" (UID: \"8525f0f2-643f-4177-a4f8-12ca22b43363\") " pod="openstack/cinder-backup-0" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:58.706867 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/8525f0f2-643f-4177-a4f8-12ca22b43363-config-data-custom\") pod \"cinder-backup-0\" (UID: \"8525f0f2-643f-4177-a4f8-12ca22b43363\") " pod="openstack/cinder-backup-0" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:58.706900 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/5efcb033-775b-46d6-8c77-2bafc360c749-ceph\") pod \"cinder-volume-volume1-0\" (UID: \"5efcb033-775b-46d6-8c77-2bafc360c749\") " pod="openstack/cinder-volume-volume1-0" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:58.706981 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/5efcb033-775b-46d6-8c77-2bafc360c749-config-data-custom\") pod \"cinder-volume-volume1-0\" (UID: \"5efcb033-775b-46d6-8c77-2bafc360c749\") " pod="openstack/cinder-volume-volume1-0" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:58.707038 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/8525f0f2-643f-4177-a4f8-12ca22b43363-etc-machine-id\") pod \"cinder-backup-0\" (UID: \"8525f0f2-643f-4177-a4f8-12ca22b43363\") " pod="openstack/cinder-backup-0" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:58.707058 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/5efcb033-775b-46d6-8c77-2bafc360c749-etc-machine-id\") pod \"cinder-volume-volume1-0\" (UID: \"5efcb033-775b-46d6-8c77-2bafc360c749\") " pod="openstack/cinder-volume-volume1-0" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:58.707088 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/8525f0f2-643f-4177-a4f8-12ca22b43363-etc-nvme\") pod \"cinder-backup-0\" (UID: \"8525f0f2-643f-4177-a4f8-12ca22b43363\") " pod="openstack/cinder-backup-0" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:58.707110 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/5efcb033-775b-46d6-8c77-2bafc360c749-etc-iscsi\") pod \"cinder-volume-volume1-0\" (UID: \"5efcb033-775b-46d6-8c77-2bafc360c749\") " pod="openstack/cinder-volume-volume1-0" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:58.707151 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/5efcb033-775b-46d6-8c77-2bafc360c749-var-locks-brick\") pod \"cinder-volume-volume1-0\" (UID: \"5efcb033-775b-46d6-8c77-2bafc360c749\") " pod="openstack/cinder-volume-volume1-0" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:58.707171 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/5efcb033-775b-46d6-8c77-2bafc360c749-dev\") pod \"cinder-volume-volume1-0\" (UID: \"5efcb033-775b-46d6-8c77-2bafc360c749\") " pod="openstack/cinder-volume-volume1-0" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:58.707223 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/5efcb033-775b-46d6-8c77-2bafc360c749-sys\") pod \"cinder-volume-volume1-0\" (UID: \"5efcb033-775b-46d6-8c77-2bafc360c749\") " pod="openstack/cinder-volume-volume1-0" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:58.707246 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/5efcb033-775b-46d6-8c77-2bafc360c749-etc-nvme\") pod \"cinder-volume-volume1-0\" (UID: \"5efcb033-775b-46d6-8c77-2bafc360c749\") " pod="openstack/cinder-volume-volume1-0" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:58.707287 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/8525f0f2-643f-4177-a4f8-12ca22b43363-etc-iscsi\") pod \"cinder-backup-0\" (UID: \"8525f0f2-643f-4177-a4f8-12ca22b43363\") " pod="openstack/cinder-backup-0" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:58.707312 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8525f0f2-643f-4177-a4f8-12ca22b43363-scripts\") pod \"cinder-backup-0\" (UID: \"8525f0f2-643f-4177-a4f8-12ca22b43363\") " pod="openstack/cinder-backup-0" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:58.707351 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/8525f0f2-643f-4177-a4f8-12ca22b43363-ceph\") pod \"cinder-backup-0\" (UID: \"8525f0f2-643f-4177-a4f8-12ca22b43363\") " pod="openstack/cinder-backup-0" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:58.707385 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-locks-cinder\" (UniqueName: \"kubernetes.io/host-path/5efcb033-775b-46d6-8c77-2bafc360c749-var-locks-cinder\") pod \"cinder-volume-volume1-0\" (UID: \"5efcb033-775b-46d6-8c77-2bafc360c749\") " pod="openstack/cinder-volume-volume1-0" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:58.707410 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t2gmz\" (UniqueName: \"kubernetes.io/projected/5efcb033-775b-46d6-8c77-2bafc360c749-kube-api-access-t2gmz\") pod \"cinder-volume-volume1-0\" (UID: \"5efcb033-775b-46d6-8c77-2bafc360c749\") " pod="openstack/cinder-volume-volume1-0" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:58.707464 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5efcb033-775b-46d6-8c77-2bafc360c749-scripts\") pod \"cinder-volume-volume1-0\" (UID: \"5efcb033-775b-46d6-8c77-2bafc360c749\") " pod="openstack/cinder-volume-volume1-0" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:58.707491 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8525f0f2-643f-4177-a4f8-12ca22b43363-combined-ca-bundle\") pod \"cinder-backup-0\" (UID: \"8525f0f2-643f-4177-a4f8-12ca22b43363\") " pod="openstack/cinder-backup-0" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:58.707513 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-cinder\" (UniqueName: \"kubernetes.io/host-path/5efcb033-775b-46d6-8c77-2bafc360c749-var-lib-cinder\") pod \"cinder-volume-volume1-0\" (UID: \"5efcb033-775b-46d6-8c77-2bafc360c749\") " pod="openstack/cinder-volume-volume1-0" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:58.707533 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/8525f0f2-643f-4177-a4f8-12ca22b43363-lib-modules\") pod \"cinder-backup-0\" (UID: \"8525f0f2-643f-4177-a4f8-12ca22b43363\") " pod="openstack/cinder-backup-0" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:58.707560 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/8525f0f2-643f-4177-a4f8-12ca22b43363-dev\") pod \"cinder-backup-0\" (UID: \"8525f0f2-643f-4177-a4f8-12ca22b43363\") " pod="openstack/cinder-backup-0" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:58.707579 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5efcb033-775b-46d6-8c77-2bafc360c749-combined-ca-bundle\") pod \"cinder-volume-volume1-0\" (UID: \"5efcb033-775b-46d6-8c77-2bafc360c749\") " pod="openstack/cinder-volume-volume1-0" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:58.707605 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-locks-cinder\" (UniqueName: \"kubernetes.io/host-path/8525f0f2-643f-4177-a4f8-12ca22b43363-var-locks-cinder\") pod \"cinder-backup-0\" (UID: \"8525f0f2-643f-4177-a4f8-12ca22b43363\") " pod="openstack/cinder-backup-0" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:58.707640 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8525f0f2-643f-4177-a4f8-12ca22b43363-config-data\") pod \"cinder-backup-0\" (UID: \"8525f0f2-643f-4177-a4f8-12ca22b43363\") " pod="openstack/cinder-backup-0" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:58.707705 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/8525f0f2-643f-4177-a4f8-12ca22b43363-var-locks-brick\") pod \"cinder-backup-0\" (UID: \"8525f0f2-643f-4177-a4f8-12ca22b43363\") " pod="openstack/cinder-backup-0" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:58.707755 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/8525f0f2-643f-4177-a4f8-12ca22b43363-run\") pod \"cinder-backup-0\" (UID: \"8525f0f2-643f-4177-a4f8-12ca22b43363\") " pod="openstack/cinder-backup-0" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:58.707791 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lbzqn\" (UniqueName: \"kubernetes.io/projected/8525f0f2-643f-4177-a4f8-12ca22b43363-kube-api-access-lbzqn\") pod \"cinder-backup-0\" (UID: \"8525f0f2-643f-4177-a4f8-12ca22b43363\") " pod="openstack/cinder-backup-0" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:58.809153 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/5efcb033-775b-46d6-8c77-2bafc360c749-lib-modules\") pod \"cinder-volume-volume1-0\" (UID: \"5efcb033-775b-46d6-8c77-2bafc360c749\") " pod="openstack/cinder-volume-volume1-0" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:58.809197 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5efcb033-775b-46d6-8c77-2bafc360c749-config-data\") pod \"cinder-volume-volume1-0\" (UID: \"5efcb033-775b-46d6-8c77-2bafc360c749\") " pod="openstack/cinder-volume-volume1-0" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:58.809225 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/5efcb033-775b-46d6-8c77-2bafc360c749-run\") pod \"cinder-volume-volume1-0\" (UID: \"5efcb033-775b-46d6-8c77-2bafc360c749\") " pod="openstack/cinder-volume-volume1-0" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:58.809246 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/8525f0f2-643f-4177-a4f8-12ca22b43363-sys\") pod \"cinder-backup-0\" (UID: \"8525f0f2-643f-4177-a4f8-12ca22b43363\") " pod="openstack/cinder-backup-0" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:58.809268 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-cinder\" (UniqueName: \"kubernetes.io/host-path/8525f0f2-643f-4177-a4f8-12ca22b43363-var-lib-cinder\") pod \"cinder-backup-0\" (UID: \"8525f0f2-643f-4177-a4f8-12ca22b43363\") " pod="openstack/cinder-backup-0" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:58.809286 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/8525f0f2-643f-4177-a4f8-12ca22b43363-config-data-custom\") pod \"cinder-backup-0\" (UID: \"8525f0f2-643f-4177-a4f8-12ca22b43363\") " pod="openstack/cinder-backup-0" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:58.809307 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/5efcb033-775b-46d6-8c77-2bafc360c749-ceph\") pod \"cinder-volume-volume1-0\" (UID: \"5efcb033-775b-46d6-8c77-2bafc360c749\") " pod="openstack/cinder-volume-volume1-0" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:58.809323 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run\" (UniqueName: \"kubernetes.io/host-path/5efcb033-775b-46d6-8c77-2bafc360c749-run\") pod \"cinder-volume-volume1-0\" (UID: \"5efcb033-775b-46d6-8c77-2bafc360c749\") " pod="openstack/cinder-volume-volume1-0" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:58.809337 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/5efcb033-775b-46d6-8c77-2bafc360c749-config-data-custom\") pod \"cinder-volume-volume1-0\" (UID: \"5efcb033-775b-46d6-8c77-2bafc360c749\") " pod="openstack/cinder-volume-volume1-0" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:58.809367 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/8525f0f2-643f-4177-a4f8-12ca22b43363-sys\") pod \"cinder-backup-0\" (UID: \"8525f0f2-643f-4177-a4f8-12ca22b43363\") " pod="openstack/cinder-backup-0" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:58.809322 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/5efcb033-775b-46d6-8c77-2bafc360c749-lib-modules\") pod \"cinder-volume-volume1-0\" (UID: \"5efcb033-775b-46d6-8c77-2bafc360c749\") " pod="openstack/cinder-volume-volume1-0" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:58.809444 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/8525f0f2-643f-4177-a4f8-12ca22b43363-etc-machine-id\") pod \"cinder-backup-0\" (UID: \"8525f0f2-643f-4177-a4f8-12ca22b43363\") " pod="openstack/cinder-backup-0" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:58.809413 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/8525f0f2-643f-4177-a4f8-12ca22b43363-etc-machine-id\") pod \"cinder-backup-0\" (UID: \"8525f0f2-643f-4177-a4f8-12ca22b43363\") " pod="openstack/cinder-backup-0" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:58.809482 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/5efcb033-775b-46d6-8c77-2bafc360c749-etc-machine-id\") pod \"cinder-volume-volume1-0\" (UID: \"5efcb033-775b-46d6-8c77-2bafc360c749\") " pod="openstack/cinder-volume-volume1-0" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:58.809526 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/8525f0f2-643f-4177-a4f8-12ca22b43363-etc-nvme\") pod \"cinder-backup-0\" (UID: \"8525f0f2-643f-4177-a4f8-12ca22b43363\") " pod="openstack/cinder-backup-0" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:58.809544 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/5efcb033-775b-46d6-8c77-2bafc360c749-etc-iscsi\") pod \"cinder-volume-volume1-0\" (UID: \"5efcb033-775b-46d6-8c77-2bafc360c749\") " pod="openstack/cinder-volume-volume1-0" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:58.809585 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/5efcb033-775b-46d6-8c77-2bafc360c749-var-locks-brick\") pod \"cinder-volume-volume1-0\" (UID: \"5efcb033-775b-46d6-8c77-2bafc360c749\") " pod="openstack/cinder-volume-volume1-0" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:58.809603 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/5efcb033-775b-46d6-8c77-2bafc360c749-dev\") pod \"cinder-volume-volume1-0\" (UID: \"5efcb033-775b-46d6-8c77-2bafc360c749\") " pod="openstack/cinder-volume-volume1-0" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:58.809661 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/5efcb033-775b-46d6-8c77-2bafc360c749-sys\") pod \"cinder-volume-volume1-0\" (UID: \"5efcb033-775b-46d6-8c77-2bafc360c749\") " pod="openstack/cinder-volume-volume1-0" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:58.809678 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/5efcb033-775b-46d6-8c77-2bafc360c749-etc-nvme\") pod \"cinder-volume-volume1-0\" (UID: \"5efcb033-775b-46d6-8c77-2bafc360c749\") " pod="openstack/cinder-volume-volume1-0" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:58.809764 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/8525f0f2-643f-4177-a4f8-12ca22b43363-etc-iscsi\") pod \"cinder-backup-0\" (UID: \"8525f0f2-643f-4177-a4f8-12ca22b43363\") " pod="openstack/cinder-backup-0" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:58.809796 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8525f0f2-643f-4177-a4f8-12ca22b43363-scripts\") pod \"cinder-backup-0\" (UID: \"8525f0f2-643f-4177-a4f8-12ca22b43363\") " pod="openstack/cinder-backup-0" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:58.809830 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/8525f0f2-643f-4177-a4f8-12ca22b43363-ceph\") pod \"cinder-backup-0\" (UID: \"8525f0f2-643f-4177-a4f8-12ca22b43363\") " pod="openstack/cinder-backup-0" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:58.809855 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-locks-cinder\" (UniqueName: \"kubernetes.io/host-path/5efcb033-775b-46d6-8c77-2bafc360c749-var-locks-cinder\") pod \"cinder-volume-volume1-0\" (UID: \"5efcb033-775b-46d6-8c77-2bafc360c749\") " pod="openstack/cinder-volume-volume1-0" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:58.809876 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t2gmz\" (UniqueName: \"kubernetes.io/projected/5efcb033-775b-46d6-8c77-2bafc360c749-kube-api-access-t2gmz\") pod \"cinder-volume-volume1-0\" (UID: \"5efcb033-775b-46d6-8c77-2bafc360c749\") " pod="openstack/cinder-volume-volume1-0" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:58.809937 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5efcb033-775b-46d6-8c77-2bafc360c749-scripts\") pod \"cinder-volume-volume1-0\" (UID: \"5efcb033-775b-46d6-8c77-2bafc360c749\") " pod="openstack/cinder-volume-volume1-0" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:58.809960 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8525f0f2-643f-4177-a4f8-12ca22b43363-combined-ca-bundle\") pod \"cinder-backup-0\" (UID: \"8525f0f2-643f-4177-a4f8-12ca22b43363\") " pod="openstack/cinder-backup-0" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:58.809975 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-cinder\" (UniqueName: \"kubernetes.io/host-path/5efcb033-775b-46d6-8c77-2bafc360c749-var-lib-cinder\") pod \"cinder-volume-volume1-0\" (UID: \"5efcb033-775b-46d6-8c77-2bafc360c749\") " pod="openstack/cinder-volume-volume1-0" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:58.809995 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/8525f0f2-643f-4177-a4f8-12ca22b43363-lib-modules\") pod \"cinder-backup-0\" (UID: \"8525f0f2-643f-4177-a4f8-12ca22b43363\") " pod="openstack/cinder-backup-0" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:58.810014 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/8525f0f2-643f-4177-a4f8-12ca22b43363-dev\") pod \"cinder-backup-0\" (UID: \"8525f0f2-643f-4177-a4f8-12ca22b43363\") " pod="openstack/cinder-backup-0" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:58.810028 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5efcb033-775b-46d6-8c77-2bafc360c749-combined-ca-bundle\") pod \"cinder-volume-volume1-0\" (UID: \"5efcb033-775b-46d6-8c77-2bafc360c749\") " pod="openstack/cinder-volume-volume1-0" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:58.810047 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-locks-cinder\" (UniqueName: \"kubernetes.io/host-path/8525f0f2-643f-4177-a4f8-12ca22b43363-var-locks-cinder\") pod \"cinder-backup-0\" (UID: \"8525f0f2-643f-4177-a4f8-12ca22b43363\") " pod="openstack/cinder-backup-0" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:58.810078 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8525f0f2-643f-4177-a4f8-12ca22b43363-config-data\") pod \"cinder-backup-0\" (UID: \"8525f0f2-643f-4177-a4f8-12ca22b43363\") " pod="openstack/cinder-backup-0" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:58.810101 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/8525f0f2-643f-4177-a4f8-12ca22b43363-var-locks-brick\") pod \"cinder-backup-0\" (UID: \"8525f0f2-643f-4177-a4f8-12ca22b43363\") " pod="openstack/cinder-backup-0" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:58.810126 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/8525f0f2-643f-4177-a4f8-12ca22b43363-run\") pod \"cinder-backup-0\" (UID: \"8525f0f2-643f-4177-a4f8-12ca22b43363\") " pod="openstack/cinder-backup-0" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:58.810158 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lbzqn\" (UniqueName: \"kubernetes.io/projected/8525f0f2-643f-4177-a4f8-12ca22b43363-kube-api-access-lbzqn\") pod \"cinder-backup-0\" (UID: \"8525f0f2-643f-4177-a4f8-12ca22b43363\") " pod="openstack/cinder-backup-0" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:58.810496 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-locks-cinder\" (UniqueName: \"kubernetes.io/host-path/5efcb033-775b-46d6-8c77-2bafc360c749-var-locks-cinder\") pod \"cinder-volume-volume1-0\" (UID: \"5efcb033-775b-46d6-8c77-2bafc360c749\") " pod="openstack/cinder-volume-volume1-0" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:58.810577 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/5efcb033-775b-46d6-8c77-2bafc360c749-etc-machine-id\") pod \"cinder-volume-volume1-0\" (UID: \"5efcb033-775b-46d6-8c77-2bafc360c749\") " pod="openstack/cinder-volume-volume1-0" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:58.810654 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/8525f0f2-643f-4177-a4f8-12ca22b43363-etc-nvme\") pod \"cinder-backup-0\" (UID: \"8525f0f2-643f-4177-a4f8-12ca22b43363\") " pod="openstack/cinder-backup-0" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:58.810688 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/5efcb033-775b-46d6-8c77-2bafc360c749-etc-iscsi\") pod \"cinder-volume-volume1-0\" (UID: \"5efcb033-775b-46d6-8c77-2bafc360c749\") " pod="openstack/cinder-volume-volume1-0" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:58.810813 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/5efcb033-775b-46d6-8c77-2bafc360c749-var-locks-brick\") pod \"cinder-volume-volume1-0\" (UID: \"5efcb033-775b-46d6-8c77-2bafc360c749\") " pod="openstack/cinder-volume-volume1-0" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:58.810847 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/5efcb033-775b-46d6-8c77-2bafc360c749-dev\") pod \"cinder-volume-volume1-0\" (UID: \"5efcb033-775b-46d6-8c77-2bafc360c749\") " pod="openstack/cinder-volume-volume1-0" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:58.810878 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/5efcb033-775b-46d6-8c77-2bafc360c749-sys\") pod \"cinder-volume-volume1-0\" (UID: \"5efcb033-775b-46d6-8c77-2bafc360c749\") " pod="openstack/cinder-volume-volume1-0" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:58.810920 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/5efcb033-775b-46d6-8c77-2bafc360c749-etc-nvme\") pod \"cinder-volume-volume1-0\" (UID: \"5efcb033-775b-46d6-8c77-2bafc360c749\") " pod="openstack/cinder-volume-volume1-0" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:58.810952 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/8525f0f2-643f-4177-a4f8-12ca22b43363-etc-iscsi\") pod \"cinder-backup-0\" (UID: \"8525f0f2-643f-4177-a4f8-12ca22b43363\") " pod="openstack/cinder-backup-0" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:58.816137 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/5efcb033-775b-46d6-8c77-2bafc360c749-ceph\") pod \"cinder-volume-volume1-0\" (UID: \"5efcb033-775b-46d6-8c77-2bafc360c749\") " pod="openstack/cinder-volume-volume1-0" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:58.816275 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-cinder\" (UniqueName: \"kubernetes.io/host-path/8525f0f2-643f-4177-a4f8-12ca22b43363-var-lib-cinder\") pod \"cinder-backup-0\" (UID: \"8525f0f2-643f-4177-a4f8-12ca22b43363\") " pod="openstack/cinder-backup-0" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:58.817028 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5efcb033-775b-46d6-8c77-2bafc360c749-combined-ca-bundle\") pod \"cinder-volume-volume1-0\" (UID: \"5efcb033-775b-46d6-8c77-2bafc360c749\") " pod="openstack/cinder-volume-volume1-0" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:58.819404 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5efcb033-775b-46d6-8c77-2bafc360c749-config-data\") pod \"cinder-volume-volume1-0\" (UID: \"5efcb033-775b-46d6-8c77-2bafc360c749\") " pod="openstack/cinder-volume-volume1-0" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:58.819487 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/8525f0f2-643f-4177-a4f8-12ca22b43363-var-locks-brick\") pod \"cinder-backup-0\" (UID: \"8525f0f2-643f-4177-a4f8-12ca22b43363\") " pod="openstack/cinder-backup-0" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:58.819514 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/8525f0f2-643f-4177-a4f8-12ca22b43363-lib-modules\") pod \"cinder-backup-0\" (UID: \"8525f0f2-643f-4177-a4f8-12ca22b43363\") " pod="openstack/cinder-backup-0" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:58.819548 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-cinder\" (UniqueName: \"kubernetes.io/host-path/5efcb033-775b-46d6-8c77-2bafc360c749-var-lib-cinder\") pod \"cinder-volume-volume1-0\" (UID: \"5efcb033-775b-46d6-8c77-2bafc360c749\") " pod="openstack/cinder-volume-volume1-0" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:58.819575 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run\" (UniqueName: \"kubernetes.io/host-path/8525f0f2-643f-4177-a4f8-12ca22b43363-run\") pod \"cinder-backup-0\" (UID: \"8525f0f2-643f-4177-a4f8-12ca22b43363\") " pod="openstack/cinder-backup-0" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:58.819608 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-locks-cinder\" (UniqueName: \"kubernetes.io/host-path/8525f0f2-643f-4177-a4f8-12ca22b43363-var-locks-cinder\") pod \"cinder-backup-0\" (UID: \"8525f0f2-643f-4177-a4f8-12ca22b43363\") " pod="openstack/cinder-backup-0" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:58.819631 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/8525f0f2-643f-4177-a4f8-12ca22b43363-dev\") pod \"cinder-backup-0\" (UID: \"8525f0f2-643f-4177-a4f8-12ca22b43363\") " pod="openstack/cinder-backup-0" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:58.820337 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/8525f0f2-643f-4177-a4f8-12ca22b43363-config-data-custom\") pod \"cinder-backup-0\" (UID: \"8525f0f2-643f-4177-a4f8-12ca22b43363\") " pod="openstack/cinder-backup-0" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:58.822401 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8525f0f2-643f-4177-a4f8-12ca22b43363-combined-ca-bundle\") pod \"cinder-backup-0\" (UID: \"8525f0f2-643f-4177-a4f8-12ca22b43363\") " pod="openstack/cinder-backup-0" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:58.823111 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/8525f0f2-643f-4177-a4f8-12ca22b43363-ceph\") pod \"cinder-backup-0\" (UID: \"8525f0f2-643f-4177-a4f8-12ca22b43363\") " pod="openstack/cinder-backup-0" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:58.825246 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/5efcb033-775b-46d6-8c77-2bafc360c749-config-data-custom\") pod \"cinder-volume-volume1-0\" (UID: \"5efcb033-775b-46d6-8c77-2bafc360c749\") " pod="openstack/cinder-volume-volume1-0" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:58.825313 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8525f0f2-643f-4177-a4f8-12ca22b43363-scripts\") pod \"cinder-backup-0\" (UID: \"8525f0f2-643f-4177-a4f8-12ca22b43363\") " pod="openstack/cinder-backup-0" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:58.827326 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8525f0f2-643f-4177-a4f8-12ca22b43363-config-data\") pod \"cinder-backup-0\" (UID: \"8525f0f2-643f-4177-a4f8-12ca22b43363\") " pod="openstack/cinder-backup-0" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:58.833747 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t2gmz\" (UniqueName: \"kubernetes.io/projected/5efcb033-775b-46d6-8c77-2bafc360c749-kube-api-access-t2gmz\") pod \"cinder-volume-volume1-0\" (UID: \"5efcb033-775b-46d6-8c77-2bafc360c749\") " pod="openstack/cinder-volume-volume1-0" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:58.835369 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5efcb033-775b-46d6-8c77-2bafc360c749-scripts\") pod \"cinder-volume-volume1-0\" (UID: \"5efcb033-775b-46d6-8c77-2bafc360c749\") " pod="openstack/cinder-volume-volume1-0" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:58.839636 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lbzqn\" (UniqueName: \"kubernetes.io/projected/8525f0f2-643f-4177-a4f8-12ca22b43363-kube-api-access-lbzqn\") pod \"cinder-backup-0\" (UID: \"8525f0f2-643f-4177-a4f8-12ca22b43363\") " pod="openstack/cinder-backup-0" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:58.926601 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-volume-volume1-0" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:58.948267 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-backup-0" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:59.469500 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:59.471596 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:59.473988 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-scripts" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:59.474550 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:59.474814 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:59.476975 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-d7h7r" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:59.506571 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:59.590597 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:59.592141 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:59.598480 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:59.599330 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:59.624476 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:59.689686 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/80add234-36e8-4db8-a857-3bc8fcb0904a-scripts\") pod \"glance-default-external-api-0\" (UID: \"80add234-36e8-4db8-a857-3bc8fcb0904a\") " pod="openstack/glance-default-external-api-0" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:59.689826 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/80add234-36e8-4db8-a857-3bc8fcb0904a-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"80add234-36e8-4db8-a857-3bc8fcb0904a\") " pod="openstack/glance-default-external-api-0" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:59.689876 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"glance-default-external-api-0\" (UID: \"80add234-36e8-4db8-a857-3bc8fcb0904a\") " pod="openstack/glance-default-external-api-0" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:59.689910 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/80add234-36e8-4db8-a857-3bc8fcb0904a-config-data\") pod \"glance-default-external-api-0\" (UID: \"80add234-36e8-4db8-a857-3bc8fcb0904a\") " pod="openstack/glance-default-external-api-0" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:59.689941 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/80add234-36e8-4db8-a857-3bc8fcb0904a-logs\") pod \"glance-default-external-api-0\" (UID: \"80add234-36e8-4db8-a857-3bc8fcb0904a\") " pod="openstack/glance-default-external-api-0" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:59.689957 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/80add234-36e8-4db8-a857-3bc8fcb0904a-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"80add234-36e8-4db8-a857-3bc8fcb0904a\") " pod="openstack/glance-default-external-api-0" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:59.689975 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/80add234-36e8-4db8-a857-3bc8fcb0904a-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"80add234-36e8-4db8-a857-3bc8fcb0904a\") " pod="openstack/glance-default-external-api-0" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:59.690002 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/80add234-36e8-4db8-a857-3bc8fcb0904a-ceph\") pod \"glance-default-external-api-0\" (UID: \"80add234-36e8-4db8-a857-3bc8fcb0904a\") " pod="openstack/glance-default-external-api-0" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:59.690030 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2hr5m\" (UniqueName: \"kubernetes.io/projected/80add234-36e8-4db8-a857-3bc8fcb0904a-kube-api-access-2hr5m\") pod \"glance-default-external-api-0\" (UID: \"80add234-36e8-4db8-a857-3bc8fcb0904a\") " pod="openstack/glance-default-external-api-0" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:59.725564 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-7c95bbbd67-4qj7h"] Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:59.742634 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-7c95bbbd67-4qj7h" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:59.744991 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"horizon-horizon-dockercfg-m2m7x" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:59.745566 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"horizon-scripts" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:59.745733 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"horizon" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:59.748949 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"horizon-config-data" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:59.775698 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-7c95bbbd67-4qj7h"] Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:59.792155 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2hr5m\" (UniqueName: \"kubernetes.io/projected/80add234-36e8-4db8-a857-3bc8fcb0904a-kube-api-access-2hr5m\") pod \"glance-default-external-api-0\" (UID: \"80add234-36e8-4db8-a857-3bc8fcb0904a\") " pod="openstack/glance-default-external-api-0" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:59.792222 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/96e3bfa8-39a8-40a8-80c0-60d8af991c75-ceph\") pod \"glance-default-internal-api-0\" (UID: \"96e3bfa8-39a8-40a8-80c0-60d8af991c75\") " pod="openstack/glance-default-internal-api-0" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:59.792251 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-internal-api-0\" (UID: \"96e3bfa8-39a8-40a8-80c0-60d8af991c75\") " pod="openstack/glance-default-internal-api-0" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:59.792278 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/80add234-36e8-4db8-a857-3bc8fcb0904a-scripts\") pod \"glance-default-external-api-0\" (UID: \"80add234-36e8-4db8-a857-3bc8fcb0904a\") " pod="openstack/glance-default-external-api-0" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:59.792313 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-87gdh\" (UniqueName: \"kubernetes.io/projected/96e3bfa8-39a8-40a8-80c0-60d8af991c75-kube-api-access-87gdh\") pod \"glance-default-internal-api-0\" (UID: \"96e3bfa8-39a8-40a8-80c0-60d8af991c75\") " pod="openstack/glance-default-internal-api-0" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:59.792338 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/96e3bfa8-39a8-40a8-80c0-60d8af991c75-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"96e3bfa8-39a8-40a8-80c0-60d8af991c75\") " pod="openstack/glance-default-internal-api-0" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:59.792362 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/80add234-36e8-4db8-a857-3bc8fcb0904a-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"80add234-36e8-4db8-a857-3bc8fcb0904a\") " pod="openstack/glance-default-external-api-0" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:59.792404 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"glance-default-external-api-0\" (UID: \"80add234-36e8-4db8-a857-3bc8fcb0904a\") " pod="openstack/glance-default-external-api-0" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:59.792431 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/96e3bfa8-39a8-40a8-80c0-60d8af991c75-logs\") pod \"glance-default-internal-api-0\" (UID: \"96e3bfa8-39a8-40a8-80c0-60d8af991c75\") " pod="openstack/glance-default-internal-api-0" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:59.792454 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/96e3bfa8-39a8-40a8-80c0-60d8af991c75-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"96e3bfa8-39a8-40a8-80c0-60d8af991c75\") " pod="openstack/glance-default-internal-api-0" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:59.792476 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/96e3bfa8-39a8-40a8-80c0-60d8af991c75-config-data\") pod \"glance-default-internal-api-0\" (UID: \"96e3bfa8-39a8-40a8-80c0-60d8af991c75\") " pod="openstack/glance-default-internal-api-0" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:59.792497 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/96e3bfa8-39a8-40a8-80c0-60d8af991c75-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"96e3bfa8-39a8-40a8-80c0-60d8af991c75\") " pod="openstack/glance-default-internal-api-0" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:59.792517 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/80add234-36e8-4db8-a857-3bc8fcb0904a-config-data\") pod \"glance-default-external-api-0\" (UID: \"80add234-36e8-4db8-a857-3bc8fcb0904a\") " pod="openstack/glance-default-external-api-0" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:59.792552 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/80add234-36e8-4db8-a857-3bc8fcb0904a-logs\") pod \"glance-default-external-api-0\" (UID: \"80add234-36e8-4db8-a857-3bc8fcb0904a\") " pod="openstack/glance-default-external-api-0" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:59.792570 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/80add234-36e8-4db8-a857-3bc8fcb0904a-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"80add234-36e8-4db8-a857-3bc8fcb0904a\") " pod="openstack/glance-default-external-api-0" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:59.792593 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/80add234-36e8-4db8-a857-3bc8fcb0904a-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"80add234-36e8-4db8-a857-3bc8fcb0904a\") " pod="openstack/glance-default-external-api-0" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:59.792619 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/96e3bfa8-39a8-40a8-80c0-60d8af991c75-scripts\") pod \"glance-default-internal-api-0\" (UID: \"96e3bfa8-39a8-40a8-80c0-60d8af991c75\") " pod="openstack/glance-default-internal-api-0" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:59.792640 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/80add234-36e8-4db8-a857-3bc8fcb0904a-ceph\") pod \"glance-default-external-api-0\" (UID: \"80add234-36e8-4db8-a857-3bc8fcb0904a\") " pod="openstack/glance-default-external-api-0" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:59.796488 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 10:52:00 crc kubenswrapper[4838]: E1128 10:51:59.797821 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[ceph combined-ca-bundle config-data glance httpd-run kube-api-access-2hr5m logs public-tls-certs scripts], unattached volumes=[], failed to process volumes=[]: context canceled" pod="openstack/glance-default-external-api-0" podUID="80add234-36e8-4db8-a857-3bc8fcb0904a" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:59.808195 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/80add234-36e8-4db8-a857-3bc8fcb0904a-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"80add234-36e8-4db8-a857-3bc8fcb0904a\") " pod="openstack/glance-default-external-api-0" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:59.808544 4838 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"glance-default-external-api-0\" (UID: \"80add234-36e8-4db8-a857-3bc8fcb0904a\") device mount path \"/mnt/openstack/pv04\"" pod="openstack/glance-default-external-api-0" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:59.811937 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/80add234-36e8-4db8-a857-3bc8fcb0904a-logs\") pod \"glance-default-external-api-0\" (UID: \"80add234-36e8-4db8-a857-3bc8fcb0904a\") " pod="openstack/glance-default-external-api-0" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:59.817417 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/80add234-36e8-4db8-a857-3bc8fcb0904a-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"80add234-36e8-4db8-a857-3bc8fcb0904a\") " pod="openstack/glance-default-external-api-0" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:59.820641 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/80add234-36e8-4db8-a857-3bc8fcb0904a-ceph\") pod \"glance-default-external-api-0\" (UID: \"80add234-36e8-4db8-a857-3bc8fcb0904a\") " pod="openstack/glance-default-external-api-0" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:59.821277 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/80add234-36e8-4db8-a857-3bc8fcb0904a-config-data\") pod \"glance-default-external-api-0\" (UID: \"80add234-36e8-4db8-a857-3bc8fcb0904a\") " pod="openstack/glance-default-external-api-0" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:59.834272 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/80add234-36e8-4db8-a857-3bc8fcb0904a-scripts\") pod \"glance-default-external-api-0\" (UID: \"80add234-36e8-4db8-a857-3bc8fcb0904a\") " pod="openstack/glance-default-external-api-0" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:59.834840 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/80add234-36e8-4db8-a857-3bc8fcb0904a-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"80add234-36e8-4db8-a857-3bc8fcb0904a\") " pod="openstack/glance-default-external-api-0" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:59.845593 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2hr5m\" (UniqueName: \"kubernetes.io/projected/80add234-36e8-4db8-a857-3bc8fcb0904a-kube-api-access-2hr5m\") pod \"glance-default-external-api-0\" (UID: \"80add234-36e8-4db8-a857-3bc8fcb0904a\") " pod="openstack/glance-default-external-api-0" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:59.847401 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 10:52:00 crc kubenswrapper[4838]: E1128 10:51:59.848208 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[ceph combined-ca-bundle config-data glance httpd-run internal-tls-certs kube-api-access-87gdh logs scripts], unattached volumes=[], failed to process volumes=[]: context canceled" pod="openstack/glance-default-internal-api-0" podUID="96e3bfa8-39a8-40a8-80c0-60d8af991c75" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:59.867367 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"glance-default-external-api-0\" (UID: \"80add234-36e8-4db8-a857-3bc8fcb0904a\") " pod="openstack/glance-default-external-api-0" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:59.875848 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/manila-db-create-nff9n"] Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:59.877657 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-db-create-nff9n" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:59.888821 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-db-create-nff9n"] Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:59.893958 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-87gdh\" (UniqueName: \"kubernetes.io/projected/96e3bfa8-39a8-40a8-80c0-60d8af991c75-kube-api-access-87gdh\") pod \"glance-default-internal-api-0\" (UID: \"96e3bfa8-39a8-40a8-80c0-60d8af991c75\") " pod="openstack/glance-default-internal-api-0" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:59.894005 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/96e3bfa8-39a8-40a8-80c0-60d8af991c75-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"96e3bfa8-39a8-40a8-80c0-60d8af991c75\") " pod="openstack/glance-default-internal-api-0" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:59.894069 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/96e3bfa8-39a8-40a8-80c0-60d8af991c75-logs\") pod \"glance-default-internal-api-0\" (UID: \"96e3bfa8-39a8-40a8-80c0-60d8af991c75\") " pod="openstack/glance-default-internal-api-0" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:59.894090 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/96e3bfa8-39a8-40a8-80c0-60d8af991c75-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"96e3bfa8-39a8-40a8-80c0-60d8af991c75\") " pod="openstack/glance-default-internal-api-0" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:59.894117 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/96e3bfa8-39a8-40a8-80c0-60d8af991c75-config-data\") pod \"glance-default-internal-api-0\" (UID: \"96e3bfa8-39a8-40a8-80c0-60d8af991c75\") " pod="openstack/glance-default-internal-api-0" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:59.894132 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/96e3bfa8-39a8-40a8-80c0-60d8af991c75-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"96e3bfa8-39a8-40a8-80c0-60d8af991c75\") " pod="openstack/glance-default-internal-api-0" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:59.894151 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3ee39750-6a0d-4094-a8a3-46fe0adca89b-logs\") pod \"horizon-7c95bbbd67-4qj7h\" (UID: \"3ee39750-6a0d-4094-a8a3-46fe0adca89b\") " pod="openstack/horizon-7c95bbbd67-4qj7h" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:59.894180 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/3ee39750-6a0d-4094-a8a3-46fe0adca89b-scripts\") pod \"horizon-7c95bbbd67-4qj7h\" (UID: \"3ee39750-6a0d-4094-a8a3-46fe0adca89b\") " pod="openstack/horizon-7c95bbbd67-4qj7h" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:59.894220 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/96e3bfa8-39a8-40a8-80c0-60d8af991c75-scripts\") pod \"glance-default-internal-api-0\" (UID: \"96e3bfa8-39a8-40a8-80c0-60d8af991c75\") " pod="openstack/glance-default-internal-api-0" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:59.894237 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/3ee39750-6a0d-4094-a8a3-46fe0adca89b-horizon-secret-key\") pod \"horizon-7c95bbbd67-4qj7h\" (UID: \"3ee39750-6a0d-4094-a8a3-46fe0adca89b\") " pod="openstack/horizon-7c95bbbd67-4qj7h" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:59.894261 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/3ee39750-6a0d-4094-a8a3-46fe0adca89b-config-data\") pod \"horizon-7c95bbbd67-4qj7h\" (UID: \"3ee39750-6a0d-4094-a8a3-46fe0adca89b\") " pod="openstack/horizon-7c95bbbd67-4qj7h" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:59.894301 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/96e3bfa8-39a8-40a8-80c0-60d8af991c75-ceph\") pod \"glance-default-internal-api-0\" (UID: \"96e3bfa8-39a8-40a8-80c0-60d8af991c75\") " pod="openstack/glance-default-internal-api-0" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:59.894323 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-internal-api-0\" (UID: \"96e3bfa8-39a8-40a8-80c0-60d8af991c75\") " pod="openstack/glance-default-internal-api-0" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:59.894361 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mqg8v\" (UniqueName: \"kubernetes.io/projected/3ee39750-6a0d-4094-a8a3-46fe0adca89b-kube-api-access-mqg8v\") pod \"horizon-7c95bbbd67-4qj7h\" (UID: \"3ee39750-6a0d-4094-a8a3-46fe0adca89b\") " pod="openstack/horizon-7c95bbbd67-4qj7h" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:59.897361 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-6dfdc6b877-4j92h"] Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:59.898041 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/96e3bfa8-39a8-40a8-80c0-60d8af991c75-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"96e3bfa8-39a8-40a8-80c0-60d8af991c75\") " pod="openstack/glance-default-internal-api-0" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:59.898985 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-6dfdc6b877-4j92h" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:59.899603 4838 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-internal-api-0\" (UID: \"96e3bfa8-39a8-40a8-80c0-60d8af991c75\") device mount path \"/mnt/openstack/pv12\"" pod="openstack/glance-default-internal-api-0" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:59.901189 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/96e3bfa8-39a8-40a8-80c0-60d8af991c75-logs\") pod \"glance-default-internal-api-0\" (UID: \"96e3bfa8-39a8-40a8-80c0-60d8af991c75\") " pod="openstack/glance-default-internal-api-0" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:59.901280 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/96e3bfa8-39a8-40a8-80c0-60d8af991c75-scripts\") pod \"glance-default-internal-api-0\" (UID: \"96e3bfa8-39a8-40a8-80c0-60d8af991c75\") " pod="openstack/glance-default-internal-api-0" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:59.902654 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/96e3bfa8-39a8-40a8-80c0-60d8af991c75-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"96e3bfa8-39a8-40a8-80c0-60d8af991c75\") " pod="openstack/glance-default-internal-api-0" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:59.903478 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/96e3bfa8-39a8-40a8-80c0-60d8af991c75-config-data\") pod \"glance-default-internal-api-0\" (UID: \"96e3bfa8-39a8-40a8-80c0-60d8af991c75\") " pod="openstack/glance-default-internal-api-0" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:59.903909 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/96e3bfa8-39a8-40a8-80c0-60d8af991c75-ceph\") pod \"glance-default-internal-api-0\" (UID: \"96e3bfa8-39a8-40a8-80c0-60d8af991c75\") " pod="openstack/glance-default-internal-api-0" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:59.914785 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/96e3bfa8-39a8-40a8-80c0-60d8af991c75-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"96e3bfa8-39a8-40a8-80c0-60d8af991c75\") " pod="openstack/glance-default-internal-api-0" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:59.919407 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-87gdh\" (UniqueName: \"kubernetes.io/projected/96e3bfa8-39a8-40a8-80c0-60d8af991c75-kube-api-access-87gdh\") pod \"glance-default-internal-api-0\" (UID: \"96e3bfa8-39a8-40a8-80c0-60d8af991c75\") " pod="openstack/glance-default-internal-api-0" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:59.923869 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/manila-ab9d-account-create-update-8jghk"] Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:59.925157 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-ab9d-account-create-update-8jghk" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:59.927859 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"manila-db-secret" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:59.929705 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-6dfdc6b877-4j92h"] Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:59.940828 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-ab9d-account-create-update-8jghk"] Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:59.944019 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-internal-api-0\" (UID: \"96e3bfa8-39a8-40a8-80c0-60d8af991c75\") " pod="openstack/glance-default-internal-api-0" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:59.995933 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/12b8e3d4-b4f7-4179-83d3-a038f09e4682-logs\") pod \"horizon-6dfdc6b877-4j92h\" (UID: \"12b8e3d4-b4f7-4179-83d3-a038f09e4682\") " pod="openstack/horizon-6dfdc6b877-4j92h" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:59.996493 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mqg8v\" (UniqueName: \"kubernetes.io/projected/3ee39750-6a0d-4094-a8a3-46fe0adca89b-kube-api-access-mqg8v\") pod \"horizon-7c95bbbd67-4qj7h\" (UID: \"3ee39750-6a0d-4094-a8a3-46fe0adca89b\") " pod="openstack/horizon-7c95bbbd67-4qj7h" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:59.996530 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0024b279-ca1c-4058-924a-0f044953dc17-operator-scripts\") pod \"manila-db-create-nff9n\" (UID: \"0024b279-ca1c-4058-924a-0f044953dc17\") " pod="openstack/manila-db-create-nff9n" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:59.996561 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hgvt5\" (UniqueName: \"kubernetes.io/projected/12b8e3d4-b4f7-4179-83d3-a038f09e4682-kube-api-access-hgvt5\") pod \"horizon-6dfdc6b877-4j92h\" (UID: \"12b8e3d4-b4f7-4179-83d3-a038f09e4682\") " pod="openstack/horizon-6dfdc6b877-4j92h" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:59.996590 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/12b8e3d4-b4f7-4179-83d3-a038f09e4682-config-data\") pod \"horizon-6dfdc6b877-4j92h\" (UID: \"12b8e3d4-b4f7-4179-83d3-a038f09e4682\") " pod="openstack/horizon-6dfdc6b877-4j92h" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:59.996730 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-82t8m\" (UniqueName: \"kubernetes.io/projected/0024b279-ca1c-4058-924a-0f044953dc17-kube-api-access-82t8m\") pod \"manila-db-create-nff9n\" (UID: \"0024b279-ca1c-4058-924a-0f044953dc17\") " pod="openstack/manila-db-create-nff9n" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:59.996758 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3ee39750-6a0d-4094-a8a3-46fe0adca89b-logs\") pod \"horizon-7c95bbbd67-4qj7h\" (UID: \"3ee39750-6a0d-4094-a8a3-46fe0adca89b\") " pod="openstack/horizon-7c95bbbd67-4qj7h" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:59.996785 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/12b8e3d4-b4f7-4179-83d3-a038f09e4682-horizon-secret-key\") pod \"horizon-6dfdc6b877-4j92h\" (UID: \"12b8e3d4-b4f7-4179-83d3-a038f09e4682\") " pod="openstack/horizon-6dfdc6b877-4j92h" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:59.997144 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3ee39750-6a0d-4094-a8a3-46fe0adca89b-logs\") pod \"horizon-7c95bbbd67-4qj7h\" (UID: \"3ee39750-6a0d-4094-a8a3-46fe0adca89b\") " pod="openstack/horizon-7c95bbbd67-4qj7h" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:59.997200 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/3ee39750-6a0d-4094-a8a3-46fe0adca89b-scripts\") pod \"horizon-7c95bbbd67-4qj7h\" (UID: \"3ee39750-6a0d-4094-a8a3-46fe0adca89b\") " pod="openstack/horizon-7c95bbbd67-4qj7h" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:59.998783 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/3ee39750-6a0d-4094-a8a3-46fe0adca89b-scripts\") pod \"horizon-7c95bbbd67-4qj7h\" (UID: \"3ee39750-6a0d-4094-a8a3-46fe0adca89b\") " pod="openstack/horizon-7c95bbbd67-4qj7h" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:59.998816 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/12b8e3d4-b4f7-4179-83d3-a038f09e4682-scripts\") pod \"horizon-6dfdc6b877-4j92h\" (UID: \"12b8e3d4-b4f7-4179-83d3-a038f09e4682\") " pod="openstack/horizon-6dfdc6b877-4j92h" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:59.998854 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/3ee39750-6a0d-4094-a8a3-46fe0adca89b-horizon-secret-key\") pod \"horizon-7c95bbbd67-4qj7h\" (UID: \"3ee39750-6a0d-4094-a8a3-46fe0adca89b\") " pod="openstack/horizon-7c95bbbd67-4qj7h" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:51:59.998888 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/3ee39750-6a0d-4094-a8a3-46fe0adca89b-config-data\") pod \"horizon-7c95bbbd67-4qj7h\" (UID: \"3ee39750-6a0d-4094-a8a3-46fe0adca89b\") " pod="openstack/horizon-7c95bbbd67-4qj7h" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:52:00.000092 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/3ee39750-6a0d-4094-a8a3-46fe0adca89b-config-data\") pod \"horizon-7c95bbbd67-4qj7h\" (UID: \"3ee39750-6a0d-4094-a8a3-46fe0adca89b\") " pod="openstack/horizon-7c95bbbd67-4qj7h" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:52:00.001784 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/3ee39750-6a0d-4094-a8a3-46fe0adca89b-horizon-secret-key\") pod \"horizon-7c95bbbd67-4qj7h\" (UID: \"3ee39750-6a0d-4094-a8a3-46fe0adca89b\") " pod="openstack/horizon-7c95bbbd67-4qj7h" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:52:00.013674 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mqg8v\" (UniqueName: \"kubernetes.io/projected/3ee39750-6a0d-4094-a8a3-46fe0adca89b-kube-api-access-mqg8v\") pod \"horizon-7c95bbbd67-4qj7h\" (UID: \"3ee39750-6a0d-4094-a8a3-46fe0adca89b\") " pod="openstack/horizon-7c95bbbd67-4qj7h" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:52:00.063686 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-7c95bbbd67-4qj7h" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:52:00.102384 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jcvpc\" (UniqueName: \"kubernetes.io/projected/5f46bbeb-4cbe-4fe8-8d87-fea23975ae8f-kube-api-access-jcvpc\") pod \"manila-ab9d-account-create-update-8jghk\" (UID: \"5f46bbeb-4cbe-4fe8-8d87-fea23975ae8f\") " pod="openstack/manila-ab9d-account-create-update-8jghk" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:52:00.102459 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/12b8e3d4-b4f7-4179-83d3-a038f09e4682-scripts\") pod \"horizon-6dfdc6b877-4j92h\" (UID: \"12b8e3d4-b4f7-4179-83d3-a038f09e4682\") " pod="openstack/horizon-6dfdc6b877-4j92h" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:52:00.102531 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/12b8e3d4-b4f7-4179-83d3-a038f09e4682-logs\") pod \"horizon-6dfdc6b877-4j92h\" (UID: \"12b8e3d4-b4f7-4179-83d3-a038f09e4682\") " pod="openstack/horizon-6dfdc6b877-4j92h" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:52:00.102566 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0024b279-ca1c-4058-924a-0f044953dc17-operator-scripts\") pod \"manila-db-create-nff9n\" (UID: \"0024b279-ca1c-4058-924a-0f044953dc17\") " pod="openstack/manila-db-create-nff9n" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:52:00.102593 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5f46bbeb-4cbe-4fe8-8d87-fea23975ae8f-operator-scripts\") pod \"manila-ab9d-account-create-update-8jghk\" (UID: \"5f46bbeb-4cbe-4fe8-8d87-fea23975ae8f\") " pod="openstack/manila-ab9d-account-create-update-8jghk" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:52:00.102611 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hgvt5\" (UniqueName: \"kubernetes.io/projected/12b8e3d4-b4f7-4179-83d3-a038f09e4682-kube-api-access-hgvt5\") pod \"horizon-6dfdc6b877-4j92h\" (UID: \"12b8e3d4-b4f7-4179-83d3-a038f09e4682\") " pod="openstack/horizon-6dfdc6b877-4j92h" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:52:00.102633 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/12b8e3d4-b4f7-4179-83d3-a038f09e4682-config-data\") pod \"horizon-6dfdc6b877-4j92h\" (UID: \"12b8e3d4-b4f7-4179-83d3-a038f09e4682\") " pod="openstack/horizon-6dfdc6b877-4j92h" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:52:00.102671 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-82t8m\" (UniqueName: \"kubernetes.io/projected/0024b279-ca1c-4058-924a-0f044953dc17-kube-api-access-82t8m\") pod \"manila-db-create-nff9n\" (UID: \"0024b279-ca1c-4058-924a-0f044953dc17\") " pod="openstack/manila-db-create-nff9n" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:52:00.102701 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/12b8e3d4-b4f7-4179-83d3-a038f09e4682-horizon-secret-key\") pod \"horizon-6dfdc6b877-4j92h\" (UID: \"12b8e3d4-b4f7-4179-83d3-a038f09e4682\") " pod="openstack/horizon-6dfdc6b877-4j92h" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:52:00.103693 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/12b8e3d4-b4f7-4179-83d3-a038f09e4682-logs\") pod \"horizon-6dfdc6b877-4j92h\" (UID: \"12b8e3d4-b4f7-4179-83d3-a038f09e4682\") " pod="openstack/horizon-6dfdc6b877-4j92h" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:52:00.103993 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/12b8e3d4-b4f7-4179-83d3-a038f09e4682-scripts\") pod \"horizon-6dfdc6b877-4j92h\" (UID: \"12b8e3d4-b4f7-4179-83d3-a038f09e4682\") " pod="openstack/horizon-6dfdc6b877-4j92h" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:52:00.104203 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/12b8e3d4-b4f7-4179-83d3-a038f09e4682-config-data\") pod \"horizon-6dfdc6b877-4j92h\" (UID: \"12b8e3d4-b4f7-4179-83d3-a038f09e4682\") " pod="openstack/horizon-6dfdc6b877-4j92h" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:52:00.104754 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0024b279-ca1c-4058-924a-0f044953dc17-operator-scripts\") pod \"manila-db-create-nff9n\" (UID: \"0024b279-ca1c-4058-924a-0f044953dc17\") " pod="openstack/manila-db-create-nff9n" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:52:00.115134 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/12b8e3d4-b4f7-4179-83d3-a038f09e4682-horizon-secret-key\") pod \"horizon-6dfdc6b877-4j92h\" (UID: \"12b8e3d4-b4f7-4179-83d3-a038f09e4682\") " pod="openstack/horizon-6dfdc6b877-4j92h" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:52:00.120013 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hgvt5\" (UniqueName: \"kubernetes.io/projected/12b8e3d4-b4f7-4179-83d3-a038f09e4682-kube-api-access-hgvt5\") pod \"horizon-6dfdc6b877-4j92h\" (UID: \"12b8e3d4-b4f7-4179-83d3-a038f09e4682\") " pod="openstack/horizon-6dfdc6b877-4j92h" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:52:00.124880 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-82t8m\" (UniqueName: \"kubernetes.io/projected/0024b279-ca1c-4058-924a-0f044953dc17-kube-api-access-82t8m\") pod \"manila-db-create-nff9n\" (UID: \"0024b279-ca1c-4058-924a-0f044953dc17\") " pod="openstack/manila-db-create-nff9n" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:52:00.204529 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jcvpc\" (UniqueName: \"kubernetes.io/projected/5f46bbeb-4cbe-4fe8-8d87-fea23975ae8f-kube-api-access-jcvpc\") pod \"manila-ab9d-account-create-update-8jghk\" (UID: \"5f46bbeb-4cbe-4fe8-8d87-fea23975ae8f\") " pod="openstack/manila-ab9d-account-create-update-8jghk" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:52:00.204976 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5f46bbeb-4cbe-4fe8-8d87-fea23975ae8f-operator-scripts\") pod \"manila-ab9d-account-create-update-8jghk\" (UID: \"5f46bbeb-4cbe-4fe8-8d87-fea23975ae8f\") " pod="openstack/manila-ab9d-account-create-update-8jghk" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:52:00.206241 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5f46bbeb-4cbe-4fe8-8d87-fea23975ae8f-operator-scripts\") pod \"manila-ab9d-account-create-update-8jghk\" (UID: \"5f46bbeb-4cbe-4fe8-8d87-fea23975ae8f\") " pod="openstack/manila-ab9d-account-create-update-8jghk" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:52:00.248680 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jcvpc\" (UniqueName: \"kubernetes.io/projected/5f46bbeb-4cbe-4fe8-8d87-fea23975ae8f-kube-api-access-jcvpc\") pod \"manila-ab9d-account-create-update-8jghk\" (UID: \"5f46bbeb-4cbe-4fe8-8d87-fea23975ae8f\") " pod="openstack/manila-ab9d-account-create-update-8jghk" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:52:00.304268 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-db-create-nff9n" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:52:00.345327 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-6dfdc6b877-4j92h" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:52:00.353374 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-ab9d-account-create-update-8jghk" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:52:00.495939 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:52:00.496725 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:52:00.519143 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:52:00.537115 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:52:00.545537 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-backup-0"] Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:52:00.598219 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-7c95bbbd67-4qj7h"] Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:52:00.616291 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/80add234-36e8-4db8-a857-3bc8fcb0904a-public-tls-certs\") pod \"80add234-36e8-4db8-a857-3bc8fcb0904a\" (UID: \"80add234-36e8-4db8-a857-3bc8fcb0904a\") " Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:52:00.616357 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/80add234-36e8-4db8-a857-3bc8fcb0904a-logs\") pod \"80add234-36e8-4db8-a857-3bc8fcb0904a\" (UID: \"80add234-36e8-4db8-a857-3bc8fcb0904a\") " Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:52:00.616411 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"80add234-36e8-4db8-a857-3bc8fcb0904a\" (UID: \"80add234-36e8-4db8-a857-3bc8fcb0904a\") " Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:52:00.616448 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/80add234-36e8-4db8-a857-3bc8fcb0904a-scripts\") pod \"80add234-36e8-4db8-a857-3bc8fcb0904a\" (UID: \"80add234-36e8-4db8-a857-3bc8fcb0904a\") " Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:52:00.616516 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/80add234-36e8-4db8-a857-3bc8fcb0904a-ceph\") pod \"80add234-36e8-4db8-a857-3bc8fcb0904a\" (UID: \"80add234-36e8-4db8-a857-3bc8fcb0904a\") " Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:52:00.616535 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/80add234-36e8-4db8-a857-3bc8fcb0904a-httpd-run\") pod \"80add234-36e8-4db8-a857-3bc8fcb0904a\" (UID: \"80add234-36e8-4db8-a857-3bc8fcb0904a\") " Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:52:00.616576 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/80add234-36e8-4db8-a857-3bc8fcb0904a-config-data\") pod \"80add234-36e8-4db8-a857-3bc8fcb0904a\" (UID: \"80add234-36e8-4db8-a857-3bc8fcb0904a\") " Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:52:00.616622 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/80add234-36e8-4db8-a857-3bc8fcb0904a-combined-ca-bundle\") pod \"80add234-36e8-4db8-a857-3bc8fcb0904a\" (UID: \"80add234-36e8-4db8-a857-3bc8fcb0904a\") " Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:52:00.616650 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2hr5m\" (UniqueName: \"kubernetes.io/projected/80add234-36e8-4db8-a857-3bc8fcb0904a-kube-api-access-2hr5m\") pod \"80add234-36e8-4db8-a857-3bc8fcb0904a\" (UID: \"80add234-36e8-4db8-a857-3bc8fcb0904a\") " Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:52:00.620641 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/80add234-36e8-4db8-a857-3bc8fcb0904a-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "80add234-36e8-4db8-a857-3bc8fcb0904a" (UID: "80add234-36e8-4db8-a857-3bc8fcb0904a"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:52:00.620947 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/80add234-36e8-4db8-a857-3bc8fcb0904a-logs" (OuterVolumeSpecName: "logs") pod "80add234-36e8-4db8-a857-3bc8fcb0904a" (UID: "80add234-36e8-4db8-a857-3bc8fcb0904a"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:52:00.643943 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/80add234-36e8-4db8-a857-3bc8fcb0904a-config-data" (OuterVolumeSpecName: "config-data") pod "80add234-36e8-4db8-a857-3bc8fcb0904a" (UID: "80add234-36e8-4db8-a857-3bc8fcb0904a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:52:00.644118 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/80add234-36e8-4db8-a857-3bc8fcb0904a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "80add234-36e8-4db8-a857-3bc8fcb0904a" (UID: "80add234-36e8-4db8-a857-3bc8fcb0904a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:52:00.643969 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage04-crc" (OuterVolumeSpecName: "glance") pod "80add234-36e8-4db8-a857-3bc8fcb0904a" (UID: "80add234-36e8-4db8-a857-3bc8fcb0904a"). InnerVolumeSpecName "local-storage04-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:52:00.643980 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/80add234-36e8-4db8-a857-3bc8fcb0904a-scripts" (OuterVolumeSpecName: "scripts") pod "80add234-36e8-4db8-a857-3bc8fcb0904a" (UID: "80add234-36e8-4db8-a857-3bc8fcb0904a"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:52:00.644061 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/80add234-36e8-4db8-a857-3bc8fcb0904a-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "80add234-36e8-4db8-a857-3bc8fcb0904a" (UID: "80add234-36e8-4db8-a857-3bc8fcb0904a"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:52:00.644706 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/80add234-36e8-4db8-a857-3bc8fcb0904a-kube-api-access-2hr5m" (OuterVolumeSpecName: "kube-api-access-2hr5m") pod "80add234-36e8-4db8-a857-3bc8fcb0904a" (UID: "80add234-36e8-4db8-a857-3bc8fcb0904a"). InnerVolumeSpecName "kube-api-access-2hr5m". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:52:00.648562 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/80add234-36e8-4db8-a857-3bc8fcb0904a-ceph" (OuterVolumeSpecName: "ceph") pod "80add234-36e8-4db8-a857-3bc8fcb0904a" (UID: "80add234-36e8-4db8-a857-3bc8fcb0904a"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:52:00.691959 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-db-create-nff9n"] Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:52:00.719154 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/96e3bfa8-39a8-40a8-80c0-60d8af991c75-combined-ca-bundle\") pod \"96e3bfa8-39a8-40a8-80c0-60d8af991c75\" (UID: \"96e3bfa8-39a8-40a8-80c0-60d8af991c75\") " Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:52:00.719267 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/96e3bfa8-39a8-40a8-80c0-60d8af991c75-httpd-run\") pod \"96e3bfa8-39a8-40a8-80c0-60d8af991c75\" (UID: \"96e3bfa8-39a8-40a8-80c0-60d8af991c75\") " Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:52:00.719289 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/96e3bfa8-39a8-40a8-80c0-60d8af991c75-scripts\") pod \"96e3bfa8-39a8-40a8-80c0-60d8af991c75\" (UID: \"96e3bfa8-39a8-40a8-80c0-60d8af991c75\") " Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:52:00.719307 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/96e3bfa8-39a8-40a8-80c0-60d8af991c75-internal-tls-certs\") pod \"96e3bfa8-39a8-40a8-80c0-60d8af991c75\" (UID: \"96e3bfa8-39a8-40a8-80c0-60d8af991c75\") " Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:52:00.719335 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"96e3bfa8-39a8-40a8-80c0-60d8af991c75\" (UID: \"96e3bfa8-39a8-40a8-80c0-60d8af991c75\") " Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:52:00.719397 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/96e3bfa8-39a8-40a8-80c0-60d8af991c75-config-data\") pod \"96e3bfa8-39a8-40a8-80c0-60d8af991c75\" (UID: \"96e3bfa8-39a8-40a8-80c0-60d8af991c75\") " Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:52:00.719413 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/96e3bfa8-39a8-40a8-80c0-60d8af991c75-ceph\") pod \"96e3bfa8-39a8-40a8-80c0-60d8af991c75\" (UID: \"96e3bfa8-39a8-40a8-80c0-60d8af991c75\") " Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:52:00.719505 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-87gdh\" (UniqueName: \"kubernetes.io/projected/96e3bfa8-39a8-40a8-80c0-60d8af991c75-kube-api-access-87gdh\") pod \"96e3bfa8-39a8-40a8-80c0-60d8af991c75\" (UID: \"96e3bfa8-39a8-40a8-80c0-60d8af991c75\") " Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:52:00.719524 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/96e3bfa8-39a8-40a8-80c0-60d8af991c75-logs\") pod \"96e3bfa8-39a8-40a8-80c0-60d8af991c75\" (UID: \"96e3bfa8-39a8-40a8-80c0-60d8af991c75\") " Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:52:00.720409 4838 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/80add234-36e8-4db8-a857-3bc8fcb0904a-ceph\") on node \"crc\" DevicePath \"\"" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:52:00.720425 4838 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/80add234-36e8-4db8-a857-3bc8fcb0904a-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:52:00.720434 4838 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/80add234-36e8-4db8-a857-3bc8fcb0904a-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:52:00.720443 4838 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/80add234-36e8-4db8-a857-3bc8fcb0904a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:52:00.720452 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2hr5m\" (UniqueName: \"kubernetes.io/projected/80add234-36e8-4db8-a857-3bc8fcb0904a-kube-api-access-2hr5m\") on node \"crc\" DevicePath \"\"" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:52:00.720462 4838 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/80add234-36e8-4db8-a857-3bc8fcb0904a-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:52:00.720470 4838 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/80add234-36e8-4db8-a857-3bc8fcb0904a-logs\") on node \"crc\" DevicePath \"\"" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:52:00.720487 4838 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") on node \"crc\" " Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:52:00.720496 4838 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/80add234-36e8-4db8-a857-3bc8fcb0904a-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:52:00.723644 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/96e3bfa8-39a8-40a8-80c0-60d8af991c75-scripts" (OuterVolumeSpecName: "scripts") pod "96e3bfa8-39a8-40a8-80c0-60d8af991c75" (UID: "96e3bfa8-39a8-40a8-80c0-60d8af991c75"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:52:00.725731 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/96e3bfa8-39a8-40a8-80c0-60d8af991c75-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "96e3bfa8-39a8-40a8-80c0-60d8af991c75" (UID: "96e3bfa8-39a8-40a8-80c0-60d8af991c75"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:52:00.727077 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/96e3bfa8-39a8-40a8-80c0-60d8af991c75-logs" (OuterVolumeSpecName: "logs") pod "96e3bfa8-39a8-40a8-80c0-60d8af991c75" (UID: "96e3bfa8-39a8-40a8-80c0-60d8af991c75"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:52:00.732745 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage12-crc" (OuterVolumeSpecName: "glance") pod "96e3bfa8-39a8-40a8-80c0-60d8af991c75" (UID: "96e3bfa8-39a8-40a8-80c0-60d8af991c75"). InnerVolumeSpecName "local-storage12-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:52:00.732805 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/96e3bfa8-39a8-40a8-80c0-60d8af991c75-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "96e3bfa8-39a8-40a8-80c0-60d8af991c75" (UID: "96e3bfa8-39a8-40a8-80c0-60d8af991c75"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:52:00.733337 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/96e3bfa8-39a8-40a8-80c0-60d8af991c75-config-data" (OuterVolumeSpecName: "config-data") pod "96e3bfa8-39a8-40a8-80c0-60d8af991c75" (UID: "96e3bfa8-39a8-40a8-80c0-60d8af991c75"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:52:00.734418 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/96e3bfa8-39a8-40a8-80c0-60d8af991c75-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "96e3bfa8-39a8-40a8-80c0-60d8af991c75" (UID: "96e3bfa8-39a8-40a8-80c0-60d8af991c75"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:52:00.737072 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/96e3bfa8-39a8-40a8-80c0-60d8af991c75-kube-api-access-87gdh" (OuterVolumeSpecName: "kube-api-access-87gdh") pod "96e3bfa8-39a8-40a8-80c0-60d8af991c75" (UID: "96e3bfa8-39a8-40a8-80c0-60d8af991c75"). InnerVolumeSpecName "kube-api-access-87gdh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:52:00.747991 4838 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage04-crc" (UniqueName: "kubernetes.io/local-volume/local-storage04-crc") on node "crc" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:52:00.752323 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/96e3bfa8-39a8-40a8-80c0-60d8af991c75-ceph" (OuterVolumeSpecName: "ceph") pod "96e3bfa8-39a8-40a8-80c0-60d8af991c75" (UID: "96e3bfa8-39a8-40a8-80c0-60d8af991c75"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:52:00.822880 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-87gdh\" (UniqueName: \"kubernetes.io/projected/96e3bfa8-39a8-40a8-80c0-60d8af991c75-kube-api-access-87gdh\") on node \"crc\" DevicePath \"\"" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:52:00.822908 4838 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/96e3bfa8-39a8-40a8-80c0-60d8af991c75-logs\") on node \"crc\" DevicePath \"\"" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:52:00.822919 4838 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/96e3bfa8-39a8-40a8-80c0-60d8af991c75-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:52:00.822931 4838 reconciler_common.go:293] "Volume detached for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") on node \"crc\" DevicePath \"\"" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:52:00.822939 4838 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/96e3bfa8-39a8-40a8-80c0-60d8af991c75-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:52:00.822947 4838 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/96e3bfa8-39a8-40a8-80c0-60d8af991c75-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:52:00.822955 4838 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/96e3bfa8-39a8-40a8-80c0-60d8af991c75-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:52:00.823755 4838 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") on node \"crc\" " Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:52:00.823772 4838 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/96e3bfa8-39a8-40a8-80c0-60d8af991c75-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:52:00.823782 4838 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/96e3bfa8-39a8-40a8-80c0-60d8af991c75-ceph\") on node \"crc\" DevicePath \"\"" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:52:00.846414 4838 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage12-crc" (UniqueName: "kubernetes.io/local-volume/local-storage12-crc") on node "crc" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:52:00.924901 4838 reconciler_common.go:293] "Volume detached for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") on node \"crc\" DevicePath \"\"" Nov 28 10:52:00 crc kubenswrapper[4838]: I1128 10:52:00.975073 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-6dfdc6b877-4j92h"] Nov 28 10:52:01 crc kubenswrapper[4838]: I1128 10:52:01.009600 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-ab9d-account-create-update-8jghk"] Nov 28 10:52:01 crc kubenswrapper[4838]: I1128 10:52:01.511691 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-6dfdc6b877-4j92h" event={"ID":"12b8e3d4-b4f7-4179-83d3-a038f09e4682","Type":"ContainerStarted","Data":"3aa30f439e13087727014b7effabb2e54e4acaf896187b24e0308d362a6a7a58"} Nov 28 10:52:01 crc kubenswrapper[4838]: I1128 10:52:01.514533 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-db-create-nff9n" event={"ID":"0024b279-ca1c-4058-924a-0f044953dc17","Type":"ContainerStarted","Data":"27458f64ea8095cdf09caa7a1503a93964a837c051e4b16e675b91a98ba315e6"} Nov 28 10:52:01 crc kubenswrapper[4838]: I1128 10:52:01.516361 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-7c95bbbd67-4qj7h" event={"ID":"3ee39750-6a0d-4094-a8a3-46fe0adca89b","Type":"ContainerStarted","Data":"6818a6308eb11da60d655a942aa252e940270858f11f8f7d0bd52cdf58b6f580"} Nov 28 10:52:01 crc kubenswrapper[4838]: I1128 10:52:01.518447 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-backup-0" event={"ID":"8525f0f2-643f-4177-a4f8-12ca22b43363","Type":"ContainerStarted","Data":"1855deca12eba39d2d4b356f4a80419e6de7a5e9de500add7baff86366d926e7"} Nov 28 10:52:01 crc kubenswrapper[4838]: I1128 10:52:01.520450 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-ab9d-account-create-update-8jghk" event={"ID":"5f46bbeb-4cbe-4fe8-8d87-fea23975ae8f","Type":"ContainerStarted","Data":"4d2f6e7222c542293ae5b6f587cdea1ca80ad5ae0c76e59b4a1dfc0165241f04"} Nov 28 10:52:01 crc kubenswrapper[4838]: I1128 10:52:01.520515 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 28 10:52:01 crc kubenswrapper[4838]: I1128 10:52:01.520469 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 28 10:52:01 crc kubenswrapper[4838]: I1128 10:52:01.610016 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 10:52:01 crc kubenswrapper[4838]: W1128 10:52:01.616705 4838 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5efcb033_775b_46d6_8c77_2bafc360c749.slice/crio-5e042e2f8f2b9c2f4886188366904f51da8a94eff0e2f6de710188402fa3564c WatchSource:0}: Error finding container 5e042e2f8f2b9c2f4886188366904f51da8a94eff0e2f6de710188402fa3564c: Status 404 returned error can't find the container with id 5e042e2f8f2b9c2f4886188366904f51da8a94eff0e2f6de710188402fa3564c Nov 28 10:52:01 crc kubenswrapper[4838]: I1128 10:52:01.620839 4838 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 10:52:01 crc kubenswrapper[4838]: I1128 10:52:01.631943 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-volume-volume1-0"] Nov 28 10:52:01 crc kubenswrapper[4838]: I1128 10:52:01.649803 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-6dfdc6b877-4j92h"] Nov 28 10:52:01 crc kubenswrapper[4838]: I1128 10:52:01.656736 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 10:52:01 crc kubenswrapper[4838]: I1128 10:52:01.658292 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 28 10:52:01 crc kubenswrapper[4838]: I1128 10:52:01.665378 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-scripts" Nov 28 10:52:01 crc kubenswrapper[4838]: I1128 10:52:01.665617 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Nov 28 10:52:01 crc kubenswrapper[4838]: I1128 10:52:01.665728 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-d7h7r" Nov 28 10:52:01 crc kubenswrapper[4838]: I1128 10:52:01.665757 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Nov 28 10:52:01 crc kubenswrapper[4838]: I1128 10:52:01.682978 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 10:52:01 crc kubenswrapper[4838]: I1128 10:52:01.701449 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 10:52:01 crc kubenswrapper[4838]: I1128 10:52:01.732344 4838 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 10:52:01 crc kubenswrapper[4838]: I1128 10:52:01.740737 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/477f5eef-1cdb-494d-b339-e562c650b1a3-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"477f5eef-1cdb-494d-b339-e562c650b1a3\") " pod="openstack/glance-default-external-api-0" Nov 28 10:52:01 crc kubenswrapper[4838]: I1128 10:52:01.740834 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/477f5eef-1cdb-494d-b339-e562c650b1a3-logs\") pod \"glance-default-external-api-0\" (UID: \"477f5eef-1cdb-494d-b339-e562c650b1a3\") " pod="openstack/glance-default-external-api-0" Nov 28 10:52:01 crc kubenswrapper[4838]: I1128 10:52:01.740856 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"glance-default-external-api-0\" (UID: \"477f5eef-1cdb-494d-b339-e562c650b1a3\") " pod="openstack/glance-default-external-api-0" Nov 28 10:52:01 crc kubenswrapper[4838]: I1128 10:52:01.740910 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/477f5eef-1cdb-494d-b339-e562c650b1a3-ceph\") pod \"glance-default-external-api-0\" (UID: \"477f5eef-1cdb-494d-b339-e562c650b1a3\") " pod="openstack/glance-default-external-api-0" Nov 28 10:52:01 crc kubenswrapper[4838]: I1128 10:52:01.740978 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/477f5eef-1cdb-494d-b339-e562c650b1a3-config-data\") pod \"glance-default-external-api-0\" (UID: \"477f5eef-1cdb-494d-b339-e562c650b1a3\") " pod="openstack/glance-default-external-api-0" Nov 28 10:52:01 crc kubenswrapper[4838]: I1128 10:52:01.741021 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/477f5eef-1cdb-494d-b339-e562c650b1a3-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"477f5eef-1cdb-494d-b339-e562c650b1a3\") " pod="openstack/glance-default-external-api-0" Nov 28 10:52:01 crc kubenswrapper[4838]: I1128 10:52:01.741046 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pwl9j\" (UniqueName: \"kubernetes.io/projected/477f5eef-1cdb-494d-b339-e562c650b1a3-kube-api-access-pwl9j\") pod \"glance-default-external-api-0\" (UID: \"477f5eef-1cdb-494d-b339-e562c650b1a3\") " pod="openstack/glance-default-external-api-0" Nov 28 10:52:01 crc kubenswrapper[4838]: I1128 10:52:01.741068 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/477f5eef-1cdb-494d-b339-e562c650b1a3-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"477f5eef-1cdb-494d-b339-e562c650b1a3\") " pod="openstack/glance-default-external-api-0" Nov 28 10:52:01 crc kubenswrapper[4838]: I1128 10:52:01.741098 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/477f5eef-1cdb-494d-b339-e562c650b1a3-scripts\") pod \"glance-default-external-api-0\" (UID: \"477f5eef-1cdb-494d-b339-e562c650b1a3\") " pod="openstack/glance-default-external-api-0" Nov 28 10:52:01 crc kubenswrapper[4838]: I1128 10:52:01.750253 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-565ff4b848-b45gx"] Nov 28 10:52:01 crc kubenswrapper[4838]: I1128 10:52:01.751942 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-565ff4b848-b45gx" Nov 28 10:52:01 crc kubenswrapper[4838]: I1128 10:52:01.756418 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-horizon-svc" Nov 28 10:52:01 crc kubenswrapper[4838]: I1128 10:52:01.802967 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 10:52:01 crc kubenswrapper[4838]: I1128 10:52:01.804622 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 28 10:52:01 crc kubenswrapper[4838]: I1128 10:52:01.807230 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Nov 28 10:52:01 crc kubenswrapper[4838]: I1128 10:52:01.808400 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Nov 28 10:52:01 crc kubenswrapper[4838]: I1128 10:52:01.820005 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-565ff4b848-b45gx"] Nov 28 10:52:01 crc kubenswrapper[4838]: I1128 10:52:01.832458 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 10:52:01 crc kubenswrapper[4838]: I1128 10:52:01.844159 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"glance-default-external-api-0\" (UID: \"477f5eef-1cdb-494d-b339-e562c650b1a3\") " pod="openstack/glance-default-external-api-0" Nov 28 10:52:01 crc kubenswrapper[4838]: I1128 10:52:01.844197 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-86ftg\" (UniqueName: \"kubernetes.io/projected/580cb0e9-6caf-4f93-986e-a0cdd62d8cd2-kube-api-access-86ftg\") pod \"horizon-565ff4b848-b45gx\" (UID: \"580cb0e9-6caf-4f93-986e-a0cdd62d8cd2\") " pod="openstack/horizon-565ff4b848-b45gx" Nov 28 10:52:01 crc kubenswrapper[4838]: I1128 10:52:01.844220 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/580cb0e9-6caf-4f93-986e-a0cdd62d8cd2-logs\") pod \"horizon-565ff4b848-b45gx\" (UID: \"580cb0e9-6caf-4f93-986e-a0cdd62d8cd2\") " pod="openstack/horizon-565ff4b848-b45gx" Nov 28 10:52:01 crc kubenswrapper[4838]: I1128 10:52:01.844243 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/580cb0e9-6caf-4f93-986e-a0cdd62d8cd2-horizon-tls-certs\") pod \"horizon-565ff4b848-b45gx\" (UID: \"580cb0e9-6caf-4f93-986e-a0cdd62d8cd2\") " pod="openstack/horizon-565ff4b848-b45gx" Nov 28 10:52:01 crc kubenswrapper[4838]: I1128 10:52:01.844261 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/477f5eef-1cdb-494d-b339-e562c650b1a3-ceph\") pod \"glance-default-external-api-0\" (UID: \"477f5eef-1cdb-494d-b339-e562c650b1a3\") " pod="openstack/glance-default-external-api-0" Nov 28 10:52:01 crc kubenswrapper[4838]: I1128 10:52:01.844294 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/477f5eef-1cdb-494d-b339-e562c650b1a3-config-data\") pod \"glance-default-external-api-0\" (UID: \"477f5eef-1cdb-494d-b339-e562c650b1a3\") " pod="openstack/glance-default-external-api-0" Nov 28 10:52:01 crc kubenswrapper[4838]: I1128 10:52:01.844325 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/477f5eef-1cdb-494d-b339-e562c650b1a3-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"477f5eef-1cdb-494d-b339-e562c650b1a3\") " pod="openstack/glance-default-external-api-0" Nov 28 10:52:01 crc kubenswrapper[4838]: I1128 10:52:01.844351 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pwl9j\" (UniqueName: \"kubernetes.io/projected/477f5eef-1cdb-494d-b339-e562c650b1a3-kube-api-access-pwl9j\") pod \"glance-default-external-api-0\" (UID: \"477f5eef-1cdb-494d-b339-e562c650b1a3\") " pod="openstack/glance-default-external-api-0" Nov 28 10:52:01 crc kubenswrapper[4838]: I1128 10:52:01.844380 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/477f5eef-1cdb-494d-b339-e562c650b1a3-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"477f5eef-1cdb-494d-b339-e562c650b1a3\") " pod="openstack/glance-default-external-api-0" Nov 28 10:52:01 crc kubenswrapper[4838]: I1128 10:52:01.844407 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/477f5eef-1cdb-494d-b339-e562c650b1a3-scripts\") pod \"glance-default-external-api-0\" (UID: \"477f5eef-1cdb-494d-b339-e562c650b1a3\") " pod="openstack/glance-default-external-api-0" Nov 28 10:52:01 crc kubenswrapper[4838]: I1128 10:52:01.844465 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/580cb0e9-6caf-4f93-986e-a0cdd62d8cd2-combined-ca-bundle\") pod \"horizon-565ff4b848-b45gx\" (UID: \"580cb0e9-6caf-4f93-986e-a0cdd62d8cd2\") " pod="openstack/horizon-565ff4b848-b45gx" Nov 28 10:52:01 crc kubenswrapper[4838]: I1128 10:52:01.844483 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/477f5eef-1cdb-494d-b339-e562c650b1a3-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"477f5eef-1cdb-494d-b339-e562c650b1a3\") " pod="openstack/glance-default-external-api-0" Nov 28 10:52:01 crc kubenswrapper[4838]: I1128 10:52:01.844497 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/580cb0e9-6caf-4f93-986e-a0cdd62d8cd2-horizon-secret-key\") pod \"horizon-565ff4b848-b45gx\" (UID: \"580cb0e9-6caf-4f93-986e-a0cdd62d8cd2\") " pod="openstack/horizon-565ff4b848-b45gx" Nov 28 10:52:01 crc kubenswrapper[4838]: I1128 10:52:01.844515 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/580cb0e9-6caf-4f93-986e-a0cdd62d8cd2-config-data\") pod \"horizon-565ff4b848-b45gx\" (UID: \"580cb0e9-6caf-4f93-986e-a0cdd62d8cd2\") " pod="openstack/horizon-565ff4b848-b45gx" Nov 28 10:52:01 crc kubenswrapper[4838]: I1128 10:52:01.844539 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/580cb0e9-6caf-4f93-986e-a0cdd62d8cd2-scripts\") pod \"horizon-565ff4b848-b45gx\" (UID: \"580cb0e9-6caf-4f93-986e-a0cdd62d8cd2\") " pod="openstack/horizon-565ff4b848-b45gx" Nov 28 10:52:01 crc kubenswrapper[4838]: I1128 10:52:01.844569 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/477f5eef-1cdb-494d-b339-e562c650b1a3-logs\") pod \"glance-default-external-api-0\" (UID: \"477f5eef-1cdb-494d-b339-e562c650b1a3\") " pod="openstack/glance-default-external-api-0" Nov 28 10:52:01 crc kubenswrapper[4838]: I1128 10:52:01.848796 4838 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"glance-default-external-api-0\" (UID: \"477f5eef-1cdb-494d-b339-e562c650b1a3\") device mount path \"/mnt/openstack/pv04\"" pod="openstack/glance-default-external-api-0" Nov 28 10:52:01 crc kubenswrapper[4838]: I1128 10:52:01.854451 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/477f5eef-1cdb-494d-b339-e562c650b1a3-logs\") pod \"glance-default-external-api-0\" (UID: \"477f5eef-1cdb-494d-b339-e562c650b1a3\") " pod="openstack/glance-default-external-api-0" Nov 28 10:52:01 crc kubenswrapper[4838]: I1128 10:52:01.854610 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/477f5eef-1cdb-494d-b339-e562c650b1a3-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"477f5eef-1cdb-494d-b339-e562c650b1a3\") " pod="openstack/glance-default-external-api-0" Nov 28 10:52:01 crc kubenswrapper[4838]: I1128 10:52:01.860436 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 10:52:01 crc kubenswrapper[4838]: E1128 10:52:01.861266 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[ceph combined-ca-bundle config-data glance kube-api-access-pwl9j public-tls-certs scripts], unattached volumes=[], failed to process volumes=[]: context canceled" pod="openstack/glance-default-external-api-0" podUID="477f5eef-1cdb-494d-b339-e562c650b1a3" Nov 28 10:52:01 crc kubenswrapper[4838]: I1128 10:52:01.864742 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/477f5eef-1cdb-494d-b339-e562c650b1a3-ceph\") pod \"glance-default-external-api-0\" (UID: \"477f5eef-1cdb-494d-b339-e562c650b1a3\") " pod="openstack/glance-default-external-api-0" Nov 28 10:52:01 crc kubenswrapper[4838]: I1128 10:52:01.873604 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/477f5eef-1cdb-494d-b339-e562c650b1a3-config-data\") pod \"glance-default-external-api-0\" (UID: \"477f5eef-1cdb-494d-b339-e562c650b1a3\") " pod="openstack/glance-default-external-api-0" Nov 28 10:52:01 crc kubenswrapper[4838]: I1128 10:52:01.874705 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/477f5eef-1cdb-494d-b339-e562c650b1a3-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"477f5eef-1cdb-494d-b339-e562c650b1a3\") " pod="openstack/glance-default-external-api-0" Nov 28 10:52:01 crc kubenswrapper[4838]: I1128 10:52:01.875376 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/477f5eef-1cdb-494d-b339-e562c650b1a3-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"477f5eef-1cdb-494d-b339-e562c650b1a3\") " pod="openstack/glance-default-external-api-0" Nov 28 10:52:01 crc kubenswrapper[4838]: I1128 10:52:01.876338 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/477f5eef-1cdb-494d-b339-e562c650b1a3-scripts\") pod \"glance-default-external-api-0\" (UID: \"477f5eef-1cdb-494d-b339-e562c650b1a3\") " pod="openstack/glance-default-external-api-0" Nov 28 10:52:01 crc kubenswrapper[4838]: I1128 10:52:01.883524 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pwl9j\" (UniqueName: \"kubernetes.io/projected/477f5eef-1cdb-494d-b339-e562c650b1a3-kube-api-access-pwl9j\") pod \"glance-default-external-api-0\" (UID: \"477f5eef-1cdb-494d-b339-e562c650b1a3\") " pod="openstack/glance-default-external-api-0" Nov 28 10:52:01 crc kubenswrapper[4838]: I1128 10:52:01.896030 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-7c95bbbd67-4qj7h"] Nov 28 10:52:01 crc kubenswrapper[4838]: I1128 10:52:01.915938 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 10:52:01 crc kubenswrapper[4838]: E1128 10:52:01.916859 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[ceph combined-ca-bundle config-data glance httpd-run internal-tls-certs kube-api-access-g95tm logs scripts], unattached volumes=[], failed to process volumes=[]: context canceled" pod="openstack/glance-default-internal-api-0" podUID="05a2665f-f0e0-433f-a83a-f094b2673cdf" Nov 28 10:52:01 crc kubenswrapper[4838]: I1128 10:52:01.925528 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-77d65cd94d-8f62l"] Nov 28 10:52:01 crc kubenswrapper[4838]: I1128 10:52:01.927187 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-77d65cd94d-8f62l" Nov 28 10:52:01 crc kubenswrapper[4838]: I1128 10:52:01.936626 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-77d65cd94d-8f62l"] Nov 28 10:52:01 crc kubenswrapper[4838]: I1128 10:52:01.951158 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/05a2665f-f0e0-433f-a83a-f094b2673cdf-scripts\") pod \"glance-default-internal-api-0\" (UID: \"05a2665f-f0e0-433f-a83a-f094b2673cdf\") " pod="openstack/glance-default-internal-api-0" Nov 28 10:52:01 crc kubenswrapper[4838]: I1128 10:52:01.951237 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g95tm\" (UniqueName: \"kubernetes.io/projected/05a2665f-f0e0-433f-a83a-f094b2673cdf-kube-api-access-g95tm\") pod \"glance-default-internal-api-0\" (UID: \"05a2665f-f0e0-433f-a83a-f094b2673cdf\") " pod="openstack/glance-default-internal-api-0" Nov 28 10:52:01 crc kubenswrapper[4838]: I1128 10:52:01.951275 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-internal-api-0\" (UID: \"05a2665f-f0e0-433f-a83a-f094b2673cdf\") " pod="openstack/glance-default-internal-api-0" Nov 28 10:52:01 crc kubenswrapper[4838]: I1128 10:52:01.951312 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/05a2665f-f0e0-433f-a83a-f094b2673cdf-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"05a2665f-f0e0-433f-a83a-f094b2673cdf\") " pod="openstack/glance-default-internal-api-0" Nov 28 10:52:01 crc kubenswrapper[4838]: I1128 10:52:01.951342 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/05a2665f-f0e0-433f-a83a-f094b2673cdf-config-data\") pod \"glance-default-internal-api-0\" (UID: \"05a2665f-f0e0-433f-a83a-f094b2673cdf\") " pod="openstack/glance-default-internal-api-0" Nov 28 10:52:01 crc kubenswrapper[4838]: I1128 10:52:01.951362 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/580cb0e9-6caf-4f93-986e-a0cdd62d8cd2-combined-ca-bundle\") pod \"horizon-565ff4b848-b45gx\" (UID: \"580cb0e9-6caf-4f93-986e-a0cdd62d8cd2\") " pod="openstack/horizon-565ff4b848-b45gx" Nov 28 10:52:01 crc kubenswrapper[4838]: I1128 10:52:01.951380 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/580cb0e9-6caf-4f93-986e-a0cdd62d8cd2-horizon-secret-key\") pod \"horizon-565ff4b848-b45gx\" (UID: \"580cb0e9-6caf-4f93-986e-a0cdd62d8cd2\") " pod="openstack/horizon-565ff4b848-b45gx" Nov 28 10:52:01 crc kubenswrapper[4838]: I1128 10:52:01.951396 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/05a2665f-f0e0-433f-a83a-f094b2673cdf-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"05a2665f-f0e0-433f-a83a-f094b2673cdf\") " pod="openstack/glance-default-internal-api-0" Nov 28 10:52:01 crc kubenswrapper[4838]: I1128 10:52:01.951415 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/580cb0e9-6caf-4f93-986e-a0cdd62d8cd2-config-data\") pod \"horizon-565ff4b848-b45gx\" (UID: \"580cb0e9-6caf-4f93-986e-a0cdd62d8cd2\") " pod="openstack/horizon-565ff4b848-b45gx" Nov 28 10:52:01 crc kubenswrapper[4838]: I1128 10:52:01.951438 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/05a2665f-f0e0-433f-a83a-f094b2673cdf-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"05a2665f-f0e0-433f-a83a-f094b2673cdf\") " pod="openstack/glance-default-internal-api-0" Nov 28 10:52:01 crc kubenswrapper[4838]: I1128 10:52:01.951469 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/580cb0e9-6caf-4f93-986e-a0cdd62d8cd2-scripts\") pod \"horizon-565ff4b848-b45gx\" (UID: \"580cb0e9-6caf-4f93-986e-a0cdd62d8cd2\") " pod="openstack/horizon-565ff4b848-b45gx" Nov 28 10:52:01 crc kubenswrapper[4838]: I1128 10:52:01.951514 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-86ftg\" (UniqueName: \"kubernetes.io/projected/580cb0e9-6caf-4f93-986e-a0cdd62d8cd2-kube-api-access-86ftg\") pod \"horizon-565ff4b848-b45gx\" (UID: \"580cb0e9-6caf-4f93-986e-a0cdd62d8cd2\") " pod="openstack/horizon-565ff4b848-b45gx" Nov 28 10:52:01 crc kubenswrapper[4838]: I1128 10:52:01.951533 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/580cb0e9-6caf-4f93-986e-a0cdd62d8cd2-logs\") pod \"horizon-565ff4b848-b45gx\" (UID: \"580cb0e9-6caf-4f93-986e-a0cdd62d8cd2\") " pod="openstack/horizon-565ff4b848-b45gx" Nov 28 10:52:01 crc kubenswrapper[4838]: I1128 10:52:01.951552 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/580cb0e9-6caf-4f93-986e-a0cdd62d8cd2-horizon-tls-certs\") pod \"horizon-565ff4b848-b45gx\" (UID: \"580cb0e9-6caf-4f93-986e-a0cdd62d8cd2\") " pod="openstack/horizon-565ff4b848-b45gx" Nov 28 10:52:01 crc kubenswrapper[4838]: I1128 10:52:01.951584 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/05a2665f-f0e0-433f-a83a-f094b2673cdf-ceph\") pod \"glance-default-internal-api-0\" (UID: \"05a2665f-f0e0-433f-a83a-f094b2673cdf\") " pod="openstack/glance-default-internal-api-0" Nov 28 10:52:01 crc kubenswrapper[4838]: I1128 10:52:01.951634 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/05a2665f-f0e0-433f-a83a-f094b2673cdf-logs\") pod \"glance-default-internal-api-0\" (UID: \"05a2665f-f0e0-433f-a83a-f094b2673cdf\") " pod="openstack/glance-default-internal-api-0" Nov 28 10:52:01 crc kubenswrapper[4838]: I1128 10:52:01.952392 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/580cb0e9-6caf-4f93-986e-a0cdd62d8cd2-logs\") pod \"horizon-565ff4b848-b45gx\" (UID: \"580cb0e9-6caf-4f93-986e-a0cdd62d8cd2\") " pod="openstack/horizon-565ff4b848-b45gx" Nov 28 10:52:01 crc kubenswrapper[4838]: I1128 10:52:01.952626 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/580cb0e9-6caf-4f93-986e-a0cdd62d8cd2-scripts\") pod \"horizon-565ff4b848-b45gx\" (UID: \"580cb0e9-6caf-4f93-986e-a0cdd62d8cd2\") " pod="openstack/horizon-565ff4b848-b45gx" Nov 28 10:52:01 crc kubenswrapper[4838]: I1128 10:52:01.952894 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/580cb0e9-6caf-4f93-986e-a0cdd62d8cd2-config-data\") pod \"horizon-565ff4b848-b45gx\" (UID: \"580cb0e9-6caf-4f93-986e-a0cdd62d8cd2\") " pod="openstack/horizon-565ff4b848-b45gx" Nov 28 10:52:01 crc kubenswrapper[4838]: I1128 10:52:01.955141 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/580cb0e9-6caf-4f93-986e-a0cdd62d8cd2-horizon-secret-key\") pod \"horizon-565ff4b848-b45gx\" (UID: \"580cb0e9-6caf-4f93-986e-a0cdd62d8cd2\") " pod="openstack/horizon-565ff4b848-b45gx" Nov 28 10:52:01 crc kubenswrapper[4838]: I1128 10:52:01.955249 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/580cb0e9-6caf-4f93-986e-a0cdd62d8cd2-combined-ca-bundle\") pod \"horizon-565ff4b848-b45gx\" (UID: \"580cb0e9-6caf-4f93-986e-a0cdd62d8cd2\") " pod="openstack/horizon-565ff4b848-b45gx" Nov 28 10:52:01 crc kubenswrapper[4838]: I1128 10:52:01.956269 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/580cb0e9-6caf-4f93-986e-a0cdd62d8cd2-horizon-tls-certs\") pod \"horizon-565ff4b848-b45gx\" (UID: \"580cb0e9-6caf-4f93-986e-a0cdd62d8cd2\") " pod="openstack/horizon-565ff4b848-b45gx" Nov 28 10:52:01 crc kubenswrapper[4838]: I1128 10:52:01.965835 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-86ftg\" (UniqueName: \"kubernetes.io/projected/580cb0e9-6caf-4f93-986e-a0cdd62d8cd2-kube-api-access-86ftg\") pod \"horizon-565ff4b848-b45gx\" (UID: \"580cb0e9-6caf-4f93-986e-a0cdd62d8cd2\") " pod="openstack/horizon-565ff4b848-b45gx" Nov 28 10:52:02 crc kubenswrapper[4838]: I1128 10:52:02.050051 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"glance-default-external-api-0\" (UID: \"477f5eef-1cdb-494d-b339-e562c650b1a3\") " pod="openstack/glance-default-external-api-0" Nov 28 10:52:02 crc kubenswrapper[4838]: I1128 10:52:02.059570 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/05a2665f-f0e0-433f-a83a-f094b2673cdf-logs\") pod \"glance-default-internal-api-0\" (UID: \"05a2665f-f0e0-433f-a83a-f094b2673cdf\") " pod="openstack/glance-default-internal-api-0" Nov 28 10:52:02 crc kubenswrapper[4838]: I1128 10:52:02.059635 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/05a2665f-f0e0-433f-a83a-f094b2673cdf-scripts\") pod \"glance-default-internal-api-0\" (UID: \"05a2665f-f0e0-433f-a83a-f094b2673cdf\") " pod="openstack/glance-default-internal-api-0" Nov 28 10:52:02 crc kubenswrapper[4838]: I1128 10:52:02.059669 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/97cbb2f0-d45e-4b75-ad50-becba9e4db9b-config-data\") pod \"horizon-77d65cd94d-8f62l\" (UID: \"97cbb2f0-d45e-4b75-ad50-becba9e4db9b\") " pod="openstack/horizon-77d65cd94d-8f62l" Nov 28 10:52:02 crc kubenswrapper[4838]: I1128 10:52:02.059699 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g95tm\" (UniqueName: \"kubernetes.io/projected/05a2665f-f0e0-433f-a83a-f094b2673cdf-kube-api-access-g95tm\") pod \"glance-default-internal-api-0\" (UID: \"05a2665f-f0e0-433f-a83a-f094b2673cdf\") " pod="openstack/glance-default-internal-api-0" Nov 28 10:52:02 crc kubenswrapper[4838]: I1128 10:52:02.059781 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/97cbb2f0-d45e-4b75-ad50-becba9e4db9b-logs\") pod \"horizon-77d65cd94d-8f62l\" (UID: \"97cbb2f0-d45e-4b75-ad50-becba9e4db9b\") " pod="openstack/horizon-77d65cd94d-8f62l" Nov 28 10:52:02 crc kubenswrapper[4838]: I1128 10:52:02.059812 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/97cbb2f0-d45e-4b75-ad50-becba9e4db9b-horizon-secret-key\") pod \"horizon-77d65cd94d-8f62l\" (UID: \"97cbb2f0-d45e-4b75-ad50-becba9e4db9b\") " pod="openstack/horizon-77d65cd94d-8f62l" Nov 28 10:52:02 crc kubenswrapper[4838]: I1128 10:52:02.059839 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-internal-api-0\" (UID: \"05a2665f-f0e0-433f-a83a-f094b2673cdf\") " pod="openstack/glance-default-internal-api-0" Nov 28 10:52:02 crc kubenswrapper[4838]: I1128 10:52:02.059888 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/05a2665f-f0e0-433f-a83a-f094b2673cdf-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"05a2665f-f0e0-433f-a83a-f094b2673cdf\") " pod="openstack/glance-default-internal-api-0" Nov 28 10:52:02 crc kubenswrapper[4838]: I1128 10:52:02.059915 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/05a2665f-f0e0-433f-a83a-f094b2673cdf-config-data\") pod \"glance-default-internal-api-0\" (UID: \"05a2665f-f0e0-433f-a83a-f094b2673cdf\") " pod="openstack/glance-default-internal-api-0" Nov 28 10:52:02 crc kubenswrapper[4838]: I1128 10:52:02.059935 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/05a2665f-f0e0-433f-a83a-f094b2673cdf-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"05a2665f-f0e0-433f-a83a-f094b2673cdf\") " pod="openstack/glance-default-internal-api-0" Nov 28 10:52:02 crc kubenswrapper[4838]: I1128 10:52:02.059965 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/05a2665f-f0e0-433f-a83a-f094b2673cdf-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"05a2665f-f0e0-433f-a83a-f094b2673cdf\") " pod="openstack/glance-default-internal-api-0" Nov 28 10:52:02 crc kubenswrapper[4838]: I1128 10:52:02.060015 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/97cbb2f0-d45e-4b75-ad50-becba9e4db9b-combined-ca-bundle\") pod \"horizon-77d65cd94d-8f62l\" (UID: \"97cbb2f0-d45e-4b75-ad50-becba9e4db9b\") " pod="openstack/horizon-77d65cd94d-8f62l" Nov 28 10:52:02 crc kubenswrapper[4838]: I1128 10:52:02.060056 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/97cbb2f0-d45e-4b75-ad50-becba9e4db9b-horizon-tls-certs\") pod \"horizon-77d65cd94d-8f62l\" (UID: \"97cbb2f0-d45e-4b75-ad50-becba9e4db9b\") " pod="openstack/horizon-77d65cd94d-8f62l" Nov 28 10:52:02 crc kubenswrapper[4838]: I1128 10:52:02.060104 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/05a2665f-f0e0-433f-a83a-f094b2673cdf-ceph\") pod \"glance-default-internal-api-0\" (UID: \"05a2665f-f0e0-433f-a83a-f094b2673cdf\") " pod="openstack/glance-default-internal-api-0" Nov 28 10:52:02 crc kubenswrapper[4838]: I1128 10:52:02.060156 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9dh8g\" (UniqueName: \"kubernetes.io/projected/97cbb2f0-d45e-4b75-ad50-becba9e4db9b-kube-api-access-9dh8g\") pod \"horizon-77d65cd94d-8f62l\" (UID: \"97cbb2f0-d45e-4b75-ad50-becba9e4db9b\") " pod="openstack/horizon-77d65cd94d-8f62l" Nov 28 10:52:02 crc kubenswrapper[4838]: I1128 10:52:02.060216 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/97cbb2f0-d45e-4b75-ad50-becba9e4db9b-scripts\") pod \"horizon-77d65cd94d-8f62l\" (UID: \"97cbb2f0-d45e-4b75-ad50-becba9e4db9b\") " pod="openstack/horizon-77d65cd94d-8f62l" Nov 28 10:52:02 crc kubenswrapper[4838]: I1128 10:52:02.063043 4838 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-internal-api-0\" (UID: \"05a2665f-f0e0-433f-a83a-f094b2673cdf\") device mount path \"/mnt/openstack/pv12\"" pod="openstack/glance-default-internal-api-0" Nov 28 10:52:02 crc kubenswrapper[4838]: I1128 10:52:02.068087 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/05a2665f-f0e0-433f-a83a-f094b2673cdf-logs\") pod \"glance-default-internal-api-0\" (UID: \"05a2665f-f0e0-433f-a83a-f094b2673cdf\") " pod="openstack/glance-default-internal-api-0" Nov 28 10:52:02 crc kubenswrapper[4838]: I1128 10:52:02.068178 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/05a2665f-f0e0-433f-a83a-f094b2673cdf-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"05a2665f-f0e0-433f-a83a-f094b2673cdf\") " pod="openstack/glance-default-internal-api-0" Nov 28 10:52:02 crc kubenswrapper[4838]: I1128 10:52:02.068376 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/05a2665f-f0e0-433f-a83a-f094b2673cdf-scripts\") pod \"glance-default-internal-api-0\" (UID: \"05a2665f-f0e0-433f-a83a-f094b2673cdf\") " pod="openstack/glance-default-internal-api-0" Nov 28 10:52:02 crc kubenswrapper[4838]: I1128 10:52:02.074389 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/05a2665f-f0e0-433f-a83a-f094b2673cdf-ceph\") pod \"glance-default-internal-api-0\" (UID: \"05a2665f-f0e0-433f-a83a-f094b2673cdf\") " pod="openstack/glance-default-internal-api-0" Nov 28 10:52:02 crc kubenswrapper[4838]: I1128 10:52:02.075097 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/05a2665f-f0e0-433f-a83a-f094b2673cdf-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"05a2665f-f0e0-433f-a83a-f094b2673cdf\") " pod="openstack/glance-default-internal-api-0" Nov 28 10:52:02 crc kubenswrapper[4838]: I1128 10:52:02.075176 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/05a2665f-f0e0-433f-a83a-f094b2673cdf-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"05a2665f-f0e0-433f-a83a-f094b2673cdf\") " pod="openstack/glance-default-internal-api-0" Nov 28 10:52:02 crc kubenswrapper[4838]: I1128 10:52:02.077207 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/05a2665f-f0e0-433f-a83a-f094b2673cdf-config-data\") pod \"glance-default-internal-api-0\" (UID: \"05a2665f-f0e0-433f-a83a-f094b2673cdf\") " pod="openstack/glance-default-internal-api-0" Nov 28 10:52:02 crc kubenswrapper[4838]: I1128 10:52:02.080558 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-565ff4b848-b45gx" Nov 28 10:52:02 crc kubenswrapper[4838]: I1128 10:52:02.084901 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g95tm\" (UniqueName: \"kubernetes.io/projected/05a2665f-f0e0-433f-a83a-f094b2673cdf-kube-api-access-g95tm\") pod \"glance-default-internal-api-0\" (UID: \"05a2665f-f0e0-433f-a83a-f094b2673cdf\") " pod="openstack/glance-default-internal-api-0" Nov 28 10:52:02 crc kubenswrapper[4838]: I1128 10:52:02.090372 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-internal-api-0\" (UID: \"05a2665f-f0e0-433f-a83a-f094b2673cdf\") " pod="openstack/glance-default-internal-api-0" Nov 28 10:52:02 crc kubenswrapper[4838]: I1128 10:52:02.161768 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/97cbb2f0-d45e-4b75-ad50-becba9e4db9b-config-data\") pod \"horizon-77d65cd94d-8f62l\" (UID: \"97cbb2f0-d45e-4b75-ad50-becba9e4db9b\") " pod="openstack/horizon-77d65cd94d-8f62l" Nov 28 10:52:02 crc kubenswrapper[4838]: I1128 10:52:02.161824 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/97cbb2f0-d45e-4b75-ad50-becba9e4db9b-logs\") pod \"horizon-77d65cd94d-8f62l\" (UID: \"97cbb2f0-d45e-4b75-ad50-becba9e4db9b\") " pod="openstack/horizon-77d65cd94d-8f62l" Nov 28 10:52:02 crc kubenswrapper[4838]: I1128 10:52:02.161861 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/97cbb2f0-d45e-4b75-ad50-becba9e4db9b-horizon-secret-key\") pod \"horizon-77d65cd94d-8f62l\" (UID: \"97cbb2f0-d45e-4b75-ad50-becba9e4db9b\") " pod="openstack/horizon-77d65cd94d-8f62l" Nov 28 10:52:02 crc kubenswrapper[4838]: I1128 10:52:02.161961 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/97cbb2f0-d45e-4b75-ad50-becba9e4db9b-combined-ca-bundle\") pod \"horizon-77d65cd94d-8f62l\" (UID: \"97cbb2f0-d45e-4b75-ad50-becba9e4db9b\") " pod="openstack/horizon-77d65cd94d-8f62l" Nov 28 10:52:02 crc kubenswrapper[4838]: I1128 10:52:02.161998 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/97cbb2f0-d45e-4b75-ad50-becba9e4db9b-horizon-tls-certs\") pod \"horizon-77d65cd94d-8f62l\" (UID: \"97cbb2f0-d45e-4b75-ad50-becba9e4db9b\") " pod="openstack/horizon-77d65cd94d-8f62l" Nov 28 10:52:02 crc kubenswrapper[4838]: I1128 10:52:02.162084 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9dh8g\" (UniqueName: \"kubernetes.io/projected/97cbb2f0-d45e-4b75-ad50-becba9e4db9b-kube-api-access-9dh8g\") pod \"horizon-77d65cd94d-8f62l\" (UID: \"97cbb2f0-d45e-4b75-ad50-becba9e4db9b\") " pod="openstack/horizon-77d65cd94d-8f62l" Nov 28 10:52:02 crc kubenswrapper[4838]: I1128 10:52:02.162111 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/97cbb2f0-d45e-4b75-ad50-becba9e4db9b-scripts\") pod \"horizon-77d65cd94d-8f62l\" (UID: \"97cbb2f0-d45e-4b75-ad50-becba9e4db9b\") " pod="openstack/horizon-77d65cd94d-8f62l" Nov 28 10:52:02 crc kubenswrapper[4838]: I1128 10:52:02.166599 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/97cbb2f0-d45e-4b75-ad50-becba9e4db9b-logs\") pod \"horizon-77d65cd94d-8f62l\" (UID: \"97cbb2f0-d45e-4b75-ad50-becba9e4db9b\") " pod="openstack/horizon-77d65cd94d-8f62l" Nov 28 10:52:02 crc kubenswrapper[4838]: I1128 10:52:02.166683 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/97cbb2f0-d45e-4b75-ad50-becba9e4db9b-scripts\") pod \"horizon-77d65cd94d-8f62l\" (UID: \"97cbb2f0-d45e-4b75-ad50-becba9e4db9b\") " pod="openstack/horizon-77d65cd94d-8f62l" Nov 28 10:52:02 crc kubenswrapper[4838]: I1128 10:52:02.167604 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/97cbb2f0-d45e-4b75-ad50-becba9e4db9b-config-data\") pod \"horizon-77d65cd94d-8f62l\" (UID: \"97cbb2f0-d45e-4b75-ad50-becba9e4db9b\") " pod="openstack/horizon-77d65cd94d-8f62l" Nov 28 10:52:02 crc kubenswrapper[4838]: I1128 10:52:02.174457 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/97cbb2f0-d45e-4b75-ad50-becba9e4db9b-horizon-secret-key\") pod \"horizon-77d65cd94d-8f62l\" (UID: \"97cbb2f0-d45e-4b75-ad50-becba9e4db9b\") " pod="openstack/horizon-77d65cd94d-8f62l" Nov 28 10:52:02 crc kubenswrapper[4838]: I1128 10:52:02.174604 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/97cbb2f0-d45e-4b75-ad50-becba9e4db9b-combined-ca-bundle\") pod \"horizon-77d65cd94d-8f62l\" (UID: \"97cbb2f0-d45e-4b75-ad50-becba9e4db9b\") " pod="openstack/horizon-77d65cd94d-8f62l" Nov 28 10:52:02 crc kubenswrapper[4838]: I1128 10:52:02.174827 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/97cbb2f0-d45e-4b75-ad50-becba9e4db9b-horizon-tls-certs\") pod \"horizon-77d65cd94d-8f62l\" (UID: \"97cbb2f0-d45e-4b75-ad50-becba9e4db9b\") " pod="openstack/horizon-77d65cd94d-8f62l" Nov 28 10:52:02 crc kubenswrapper[4838]: I1128 10:52:02.180059 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9dh8g\" (UniqueName: \"kubernetes.io/projected/97cbb2f0-d45e-4b75-ad50-becba9e4db9b-kube-api-access-9dh8g\") pod \"horizon-77d65cd94d-8f62l\" (UID: \"97cbb2f0-d45e-4b75-ad50-becba9e4db9b\") " pod="openstack/horizon-77d65cd94d-8f62l" Nov 28 10:52:02 crc kubenswrapper[4838]: I1128 10:52:02.392230 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-77d65cd94d-8f62l" Nov 28 10:52:02 crc kubenswrapper[4838]: I1128 10:52:02.544371 4838 generic.go:334] "Generic (PLEG): container finished" podID="0024b279-ca1c-4058-924a-0f044953dc17" containerID="b1bc8f10c2e92416696446754dcb7629ee3bcc846892660caba543a47ab0b4c1" exitCode=0 Nov 28 10:52:02 crc kubenswrapper[4838]: I1128 10:52:02.544523 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-db-create-nff9n" event={"ID":"0024b279-ca1c-4058-924a-0f044953dc17","Type":"ContainerDied","Data":"b1bc8f10c2e92416696446754dcb7629ee3bcc846892660caba543a47ab0b4c1"} Nov 28 10:52:02 crc kubenswrapper[4838]: I1128 10:52:02.546642 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-ab9d-account-create-update-8jghk" event={"ID":"5f46bbeb-4cbe-4fe8-8d87-fea23975ae8f","Type":"ContainerStarted","Data":"52f9c287a626abca89cb78e5731081da2e9b221ab051e1170200679c0ef6d313"} Nov 28 10:52:02 crc kubenswrapper[4838]: I1128 10:52:02.552671 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 28 10:52:02 crc kubenswrapper[4838]: I1128 10:52:02.553525 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-volume-volume1-0" event={"ID":"5efcb033-775b-46d6-8c77-2bafc360c749","Type":"ContainerStarted","Data":"5e042e2f8f2b9c2f4886188366904f51da8a94eff0e2f6de710188402fa3564c"} Nov 28 10:52:02 crc kubenswrapper[4838]: I1128 10:52:02.553580 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 28 10:52:02 crc kubenswrapper[4838]: I1128 10:52:02.595145 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 28 10:52:02 crc kubenswrapper[4838]: I1128 10:52:02.598965 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="80add234-36e8-4db8-a857-3bc8fcb0904a" path="/var/lib/kubelet/pods/80add234-36e8-4db8-a857-3bc8fcb0904a/volumes" Nov 28 10:52:02 crc kubenswrapper[4838]: I1128 10:52:02.599706 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="96e3bfa8-39a8-40a8-80c0-60d8af991c75" path="/var/lib/kubelet/pods/96e3bfa8-39a8-40a8-80c0-60d8af991c75/volumes" Nov 28 10:52:02 crc kubenswrapper[4838]: I1128 10:52:02.602291 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 28 10:52:02 crc kubenswrapper[4838]: I1128 10:52:02.605706 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/manila-ab9d-account-create-update-8jghk" podStartSLOduration=3.603929397 podStartE2EDuration="3.603929397s" podCreationTimestamp="2025-11-28 10:51:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 10:52:02.590724768 +0000 UTC m=+3294.289698938" watchObservedRunningTime="2025-11-28 10:52:02.603929397 +0000 UTC m=+3294.302903567" Nov 28 10:52:02 crc kubenswrapper[4838]: I1128 10:52:02.678768 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/05a2665f-f0e0-433f-a83a-f094b2673cdf-httpd-run\") pod \"05a2665f-f0e0-433f-a83a-f094b2673cdf\" (UID: \"05a2665f-f0e0-433f-a83a-f094b2673cdf\") " Nov 28 10:52:02 crc kubenswrapper[4838]: I1128 10:52:02.678902 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"477f5eef-1cdb-494d-b339-e562c650b1a3\" (UID: \"477f5eef-1cdb-494d-b339-e562c650b1a3\") " Nov 28 10:52:02 crc kubenswrapper[4838]: I1128 10:52:02.678936 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/05a2665f-f0e0-433f-a83a-f094b2673cdf-ceph\") pod \"05a2665f-f0e0-433f-a83a-f094b2673cdf\" (UID: \"05a2665f-f0e0-433f-a83a-f094b2673cdf\") " Nov 28 10:52:02 crc kubenswrapper[4838]: I1128 10:52:02.679010 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/05a2665f-f0e0-433f-a83a-f094b2673cdf-scripts\") pod \"05a2665f-f0e0-433f-a83a-f094b2673cdf\" (UID: \"05a2665f-f0e0-433f-a83a-f094b2673cdf\") " Nov 28 10:52:02 crc kubenswrapper[4838]: I1128 10:52:02.679074 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/05a2665f-f0e0-433f-a83a-f094b2673cdf-internal-tls-certs\") pod \"05a2665f-f0e0-433f-a83a-f094b2673cdf\" (UID: \"05a2665f-f0e0-433f-a83a-f094b2673cdf\") " Nov 28 10:52:02 crc kubenswrapper[4838]: I1128 10:52:02.679107 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/477f5eef-1cdb-494d-b339-e562c650b1a3-combined-ca-bundle\") pod \"477f5eef-1cdb-494d-b339-e562c650b1a3\" (UID: \"477f5eef-1cdb-494d-b339-e562c650b1a3\") " Nov 28 10:52:02 crc kubenswrapper[4838]: I1128 10:52:02.679126 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"05a2665f-f0e0-433f-a83a-f094b2673cdf\" (UID: \"05a2665f-f0e0-433f-a83a-f094b2673cdf\") " Nov 28 10:52:02 crc kubenswrapper[4838]: I1128 10:52:02.679144 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pwl9j\" (UniqueName: \"kubernetes.io/projected/477f5eef-1cdb-494d-b339-e562c650b1a3-kube-api-access-pwl9j\") pod \"477f5eef-1cdb-494d-b339-e562c650b1a3\" (UID: \"477f5eef-1cdb-494d-b339-e562c650b1a3\") " Nov 28 10:52:02 crc kubenswrapper[4838]: I1128 10:52:02.679194 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/477f5eef-1cdb-494d-b339-e562c650b1a3-public-tls-certs\") pod \"477f5eef-1cdb-494d-b339-e562c650b1a3\" (UID: \"477f5eef-1cdb-494d-b339-e562c650b1a3\") " Nov 28 10:52:02 crc kubenswrapper[4838]: I1128 10:52:02.679214 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g95tm\" (UniqueName: \"kubernetes.io/projected/05a2665f-f0e0-433f-a83a-f094b2673cdf-kube-api-access-g95tm\") pod \"05a2665f-f0e0-433f-a83a-f094b2673cdf\" (UID: \"05a2665f-f0e0-433f-a83a-f094b2673cdf\") " Nov 28 10:52:02 crc kubenswrapper[4838]: I1128 10:52:02.679234 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/477f5eef-1cdb-494d-b339-e562c650b1a3-scripts\") pod \"477f5eef-1cdb-494d-b339-e562c650b1a3\" (UID: \"477f5eef-1cdb-494d-b339-e562c650b1a3\") " Nov 28 10:52:02 crc kubenswrapper[4838]: I1128 10:52:02.679260 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/05a2665f-f0e0-433f-a83a-f094b2673cdf-config-data\") pod \"05a2665f-f0e0-433f-a83a-f094b2673cdf\" (UID: \"05a2665f-f0e0-433f-a83a-f094b2673cdf\") " Nov 28 10:52:02 crc kubenswrapper[4838]: I1128 10:52:02.679359 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/477f5eef-1cdb-494d-b339-e562c650b1a3-httpd-run\") pod \"477f5eef-1cdb-494d-b339-e562c650b1a3\" (UID: \"477f5eef-1cdb-494d-b339-e562c650b1a3\") " Nov 28 10:52:02 crc kubenswrapper[4838]: I1128 10:52:02.679404 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/477f5eef-1cdb-494d-b339-e562c650b1a3-config-data\") pod \"477f5eef-1cdb-494d-b339-e562c650b1a3\" (UID: \"477f5eef-1cdb-494d-b339-e562c650b1a3\") " Nov 28 10:52:02 crc kubenswrapper[4838]: I1128 10:52:02.679419 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/477f5eef-1cdb-494d-b339-e562c650b1a3-logs\") pod \"477f5eef-1cdb-494d-b339-e562c650b1a3\" (UID: \"477f5eef-1cdb-494d-b339-e562c650b1a3\") " Nov 28 10:52:02 crc kubenswrapper[4838]: I1128 10:52:02.679459 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/05a2665f-f0e0-433f-a83a-f094b2673cdf-combined-ca-bundle\") pod \"05a2665f-f0e0-433f-a83a-f094b2673cdf\" (UID: \"05a2665f-f0e0-433f-a83a-f094b2673cdf\") " Nov 28 10:52:02 crc kubenswrapper[4838]: I1128 10:52:02.679497 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/477f5eef-1cdb-494d-b339-e562c650b1a3-ceph\") pod \"477f5eef-1cdb-494d-b339-e562c650b1a3\" (UID: \"477f5eef-1cdb-494d-b339-e562c650b1a3\") " Nov 28 10:52:02 crc kubenswrapper[4838]: I1128 10:52:02.679526 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/05a2665f-f0e0-433f-a83a-f094b2673cdf-logs\") pod \"05a2665f-f0e0-433f-a83a-f094b2673cdf\" (UID: \"05a2665f-f0e0-433f-a83a-f094b2673cdf\") " Nov 28 10:52:02 crc kubenswrapper[4838]: I1128 10:52:02.681960 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/477f5eef-1cdb-494d-b339-e562c650b1a3-logs" (OuterVolumeSpecName: "logs") pod "477f5eef-1cdb-494d-b339-e562c650b1a3" (UID: "477f5eef-1cdb-494d-b339-e562c650b1a3"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 10:52:02 crc kubenswrapper[4838]: I1128 10:52:02.682581 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/477f5eef-1cdb-494d-b339-e562c650b1a3-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "477f5eef-1cdb-494d-b339-e562c650b1a3" (UID: "477f5eef-1cdb-494d-b339-e562c650b1a3"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 10:52:02 crc kubenswrapper[4838]: I1128 10:52:02.690161 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/05a2665f-f0e0-433f-a83a-f094b2673cdf-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "05a2665f-f0e0-433f-a83a-f094b2673cdf" (UID: "05a2665f-f0e0-433f-a83a-f094b2673cdf"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 10:52:02 crc kubenswrapper[4838]: I1128 10:52:02.703404 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/05a2665f-f0e0-433f-a83a-f094b2673cdf-kube-api-access-g95tm" (OuterVolumeSpecName: "kube-api-access-g95tm") pod "05a2665f-f0e0-433f-a83a-f094b2673cdf" (UID: "05a2665f-f0e0-433f-a83a-f094b2673cdf"). InnerVolumeSpecName "kube-api-access-g95tm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:52:02 crc kubenswrapper[4838]: I1128 10:52:02.703556 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/477f5eef-1cdb-494d-b339-e562c650b1a3-scripts" (OuterVolumeSpecName: "scripts") pod "477f5eef-1cdb-494d-b339-e562c650b1a3" (UID: "477f5eef-1cdb-494d-b339-e562c650b1a3"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:52:02 crc kubenswrapper[4838]: I1128 10:52:02.703898 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/05a2665f-f0e0-433f-a83a-f094b2673cdf-logs" (OuterVolumeSpecName: "logs") pod "05a2665f-f0e0-433f-a83a-f094b2673cdf" (UID: "05a2665f-f0e0-433f-a83a-f094b2673cdf"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 10:52:02 crc kubenswrapper[4838]: I1128 10:52:02.709813 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/477f5eef-1cdb-494d-b339-e562c650b1a3-ceph" (OuterVolumeSpecName: "ceph") pod "477f5eef-1cdb-494d-b339-e562c650b1a3" (UID: "477f5eef-1cdb-494d-b339-e562c650b1a3"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:52:02 crc kubenswrapper[4838]: I1128 10:52:02.719957 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/05a2665f-f0e0-433f-a83a-f094b2673cdf-ceph" (OuterVolumeSpecName: "ceph") pod "05a2665f-f0e0-433f-a83a-f094b2673cdf" (UID: "05a2665f-f0e0-433f-a83a-f094b2673cdf"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:52:02 crc kubenswrapper[4838]: I1128 10:52:02.736944 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/05a2665f-f0e0-433f-a83a-f094b2673cdf-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "05a2665f-f0e0-433f-a83a-f094b2673cdf" (UID: "05a2665f-f0e0-433f-a83a-f094b2673cdf"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:52:02 crc kubenswrapper[4838]: I1128 10:52:02.739298 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-565ff4b848-b45gx"] Nov 28 10:52:02 crc kubenswrapper[4838]: I1128 10:52:02.751917 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/477f5eef-1cdb-494d-b339-e562c650b1a3-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "477f5eef-1cdb-494d-b339-e562c650b1a3" (UID: "477f5eef-1cdb-494d-b339-e562c650b1a3"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:52:02 crc kubenswrapper[4838]: I1128 10:52:02.752026 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/05a2665f-f0e0-433f-a83a-f094b2673cdf-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "05a2665f-f0e0-433f-a83a-f094b2673cdf" (UID: "05a2665f-f0e0-433f-a83a-f094b2673cdf"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:52:02 crc kubenswrapper[4838]: I1128 10:52:02.754872 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/477f5eef-1cdb-494d-b339-e562c650b1a3-config-data" (OuterVolumeSpecName: "config-data") pod "477f5eef-1cdb-494d-b339-e562c650b1a3" (UID: "477f5eef-1cdb-494d-b339-e562c650b1a3"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:52:02 crc kubenswrapper[4838]: I1128 10:52:02.755853 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/05a2665f-f0e0-433f-a83a-f094b2673cdf-config-data" (OuterVolumeSpecName: "config-data") pod "05a2665f-f0e0-433f-a83a-f094b2673cdf" (UID: "05a2665f-f0e0-433f-a83a-f094b2673cdf"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:52:02 crc kubenswrapper[4838]: I1128 10:52:02.770172 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/05a2665f-f0e0-433f-a83a-f094b2673cdf-scripts" (OuterVolumeSpecName: "scripts") pod "05a2665f-f0e0-433f-a83a-f094b2673cdf" (UID: "05a2665f-f0e0-433f-a83a-f094b2673cdf"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:52:02 crc kubenswrapper[4838]: I1128 10:52:02.773828 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/477f5eef-1cdb-494d-b339-e562c650b1a3-kube-api-access-pwl9j" (OuterVolumeSpecName: "kube-api-access-pwl9j") pod "477f5eef-1cdb-494d-b339-e562c650b1a3" (UID: "477f5eef-1cdb-494d-b339-e562c650b1a3"). InnerVolumeSpecName "kube-api-access-pwl9j". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:52:02 crc kubenswrapper[4838]: I1128 10:52:02.773962 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage04-crc" (OuterVolumeSpecName: "glance") pod "477f5eef-1cdb-494d-b339-e562c650b1a3" (UID: "477f5eef-1cdb-494d-b339-e562c650b1a3"). InnerVolumeSpecName "local-storage04-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 28 10:52:02 crc kubenswrapper[4838]: I1128 10:52:02.774063 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage12-crc" (OuterVolumeSpecName: "glance") pod "05a2665f-f0e0-433f-a83a-f094b2673cdf" (UID: "05a2665f-f0e0-433f-a83a-f094b2673cdf"). InnerVolumeSpecName "local-storage12-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 28 10:52:02 crc kubenswrapper[4838]: I1128 10:52:02.782708 4838 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/05a2665f-f0e0-433f-a83a-f094b2673cdf-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 28 10:52:02 crc kubenswrapper[4838]: I1128 10:52:02.782859 4838 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") on node \"crc\" " Nov 28 10:52:02 crc kubenswrapper[4838]: I1128 10:52:02.782870 4838 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/05a2665f-f0e0-433f-a83a-f094b2673cdf-ceph\") on node \"crc\" DevicePath \"\"" Nov 28 10:52:02 crc kubenswrapper[4838]: I1128 10:52:02.782879 4838 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/05a2665f-f0e0-433f-a83a-f094b2673cdf-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 10:52:02 crc kubenswrapper[4838]: I1128 10:52:02.782888 4838 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/05a2665f-f0e0-433f-a83a-f094b2673cdf-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 10:52:02 crc kubenswrapper[4838]: I1128 10:52:02.782897 4838 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/477f5eef-1cdb-494d-b339-e562c650b1a3-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 10:52:02 crc kubenswrapper[4838]: I1128 10:52:02.782911 4838 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") on node \"crc\" " Nov 28 10:52:02 crc kubenswrapper[4838]: I1128 10:52:02.782919 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pwl9j\" (UniqueName: \"kubernetes.io/projected/477f5eef-1cdb-494d-b339-e562c650b1a3-kube-api-access-pwl9j\") on node \"crc\" DevicePath \"\"" Nov 28 10:52:02 crc kubenswrapper[4838]: I1128 10:52:02.782931 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g95tm\" (UniqueName: \"kubernetes.io/projected/05a2665f-f0e0-433f-a83a-f094b2673cdf-kube-api-access-g95tm\") on node \"crc\" DevicePath \"\"" Nov 28 10:52:02 crc kubenswrapper[4838]: I1128 10:52:02.782940 4838 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/477f5eef-1cdb-494d-b339-e562c650b1a3-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 10:52:02 crc kubenswrapper[4838]: I1128 10:52:02.782948 4838 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/05a2665f-f0e0-433f-a83a-f094b2673cdf-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 10:52:02 crc kubenswrapper[4838]: I1128 10:52:02.782956 4838 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/477f5eef-1cdb-494d-b339-e562c650b1a3-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 28 10:52:02 crc kubenswrapper[4838]: I1128 10:52:02.782964 4838 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/477f5eef-1cdb-494d-b339-e562c650b1a3-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 10:52:02 crc kubenswrapper[4838]: I1128 10:52:02.782974 4838 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/477f5eef-1cdb-494d-b339-e562c650b1a3-logs\") on node \"crc\" DevicePath \"\"" Nov 28 10:52:02 crc kubenswrapper[4838]: I1128 10:52:02.782986 4838 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/05a2665f-f0e0-433f-a83a-f094b2673cdf-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 10:52:02 crc kubenswrapper[4838]: I1128 10:52:02.782997 4838 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/477f5eef-1cdb-494d-b339-e562c650b1a3-ceph\") on node \"crc\" DevicePath \"\"" Nov 28 10:52:02 crc kubenswrapper[4838]: I1128 10:52:02.783010 4838 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/05a2665f-f0e0-433f-a83a-f094b2673cdf-logs\") on node \"crc\" DevicePath \"\"" Nov 28 10:52:02 crc kubenswrapper[4838]: I1128 10:52:02.798473 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/477f5eef-1cdb-494d-b339-e562c650b1a3-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "477f5eef-1cdb-494d-b339-e562c650b1a3" (UID: "477f5eef-1cdb-494d-b339-e562c650b1a3"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:52:02 crc kubenswrapper[4838]: I1128 10:52:02.885246 4838 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/477f5eef-1cdb-494d-b339-e562c650b1a3-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 10:52:02 crc kubenswrapper[4838]: I1128 10:52:02.895066 4838 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage04-crc" (UniqueName: "kubernetes.io/local-volume/local-storage04-crc") on node "crc" Nov 28 10:52:02 crc kubenswrapper[4838]: I1128 10:52:02.903324 4838 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage12-crc" (UniqueName: "kubernetes.io/local-volume/local-storage12-crc") on node "crc" Nov 28 10:52:02 crc kubenswrapper[4838]: I1128 10:52:02.994692 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-77d65cd94d-8f62l"] Nov 28 10:52:02 crc kubenswrapper[4838]: I1128 10:52:02.996020 4838 reconciler_common.go:293] "Volume detached for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") on node \"crc\" DevicePath \"\"" Nov 28 10:52:02 crc kubenswrapper[4838]: I1128 10:52:02.996074 4838 reconciler_common.go:293] "Volume detached for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") on node \"crc\" DevicePath \"\"" Nov 28 10:52:03 crc kubenswrapper[4838]: I1128 10:52:03.562386 4838 scope.go:117] "RemoveContainer" containerID="3d80b1b6cf83170cb103f16c70c6a8b3db90d429f22463d02230864c009beb4b" Nov 28 10:52:03 crc kubenswrapper[4838]: E1128 10:52:03.563269 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-5dxdd_openshift-machine-config-operator(5c3daa53-8c4e-4e30-aeba-146602dd45cd)\"" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" Nov 28 10:52:03 crc kubenswrapper[4838]: I1128 10:52:03.564048 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-backup-0" event={"ID":"8525f0f2-643f-4177-a4f8-12ca22b43363","Type":"ContainerStarted","Data":"c48b1575e14d883d6414141f844367359f30c78d3e3319db443fcb4cdffcf754"} Nov 28 10:52:03 crc kubenswrapper[4838]: I1128 10:52:03.565609 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-77d65cd94d-8f62l" event={"ID":"97cbb2f0-d45e-4b75-ad50-becba9e4db9b","Type":"ContainerStarted","Data":"e89729e59cf23b32188bcf3b25612600dc1cba121fc0253661a84259a7d5fe9d"} Nov 28 10:52:03 crc kubenswrapper[4838]: I1128 10:52:03.567420 4838 generic.go:334] "Generic (PLEG): container finished" podID="5f46bbeb-4cbe-4fe8-8d87-fea23975ae8f" containerID="52f9c287a626abca89cb78e5731081da2e9b221ab051e1170200679c0ef6d313" exitCode=0 Nov 28 10:52:03 crc kubenswrapper[4838]: I1128 10:52:03.567454 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-ab9d-account-create-update-8jghk" event={"ID":"5f46bbeb-4cbe-4fe8-8d87-fea23975ae8f","Type":"ContainerDied","Data":"52f9c287a626abca89cb78e5731081da2e9b221ab051e1170200679c0ef6d313"} Nov 28 10:52:03 crc kubenswrapper[4838]: I1128 10:52:03.570258 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-565ff4b848-b45gx" event={"ID":"580cb0e9-6caf-4f93-986e-a0cdd62d8cd2","Type":"ContainerStarted","Data":"ab6d910e4d5a60a4f62fe7c8ad4e3a565a1b9b0737bf9137fcab68f1b4456b85"} Nov 28 10:52:03 crc kubenswrapper[4838]: I1128 10:52:03.570354 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 28 10:52:03 crc kubenswrapper[4838]: I1128 10:52:03.570354 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 28 10:52:03 crc kubenswrapper[4838]: I1128 10:52:03.665504 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 10:52:03 crc kubenswrapper[4838]: I1128 10:52:03.677927 4838 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 10:52:03 crc kubenswrapper[4838]: I1128 10:52:03.699160 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 10:52:03 crc kubenswrapper[4838]: I1128 10:52:03.702627 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 28 10:52:03 crc kubenswrapper[4838]: I1128 10:52:03.704142 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Nov 28 10:52:03 crc kubenswrapper[4838]: I1128 10:52:03.705974 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-d7h7r" Nov 28 10:52:03 crc kubenswrapper[4838]: I1128 10:52:03.706101 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Nov 28 10:52:03 crc kubenswrapper[4838]: I1128 10:52:03.706340 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-scripts" Nov 28 10:52:03 crc kubenswrapper[4838]: I1128 10:52:03.713732 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 10:52:03 crc kubenswrapper[4838]: I1128 10:52:03.720439 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 10:52:03 crc kubenswrapper[4838]: I1128 10:52:03.729174 4838 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 10:52:03 crc kubenswrapper[4838]: I1128 10:52:03.737137 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 10:52:03 crc kubenswrapper[4838]: I1128 10:52:03.739342 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 28 10:52:03 crc kubenswrapper[4838]: I1128 10:52:03.746643 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Nov 28 10:52:03 crc kubenswrapper[4838]: I1128 10:52:03.747350 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Nov 28 10:52:03 crc kubenswrapper[4838]: I1128 10:52:03.765870 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 10:52:03 crc kubenswrapper[4838]: I1128 10:52:03.816682 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/89b4bc38-621a-4f06-acb9-a59089d304c1-logs\") pod \"glance-default-internal-api-0\" (UID: \"89b4bc38-621a-4f06-acb9-a59089d304c1\") " pod="openstack/glance-default-internal-api-0" Nov 28 10:52:03 crc kubenswrapper[4838]: I1128 10:52:03.816779 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/89b4bc38-621a-4f06-acb9-a59089d304c1-ceph\") pod \"glance-default-internal-api-0\" (UID: \"89b4bc38-621a-4f06-acb9-a59089d304c1\") " pod="openstack/glance-default-internal-api-0" Nov 28 10:52:03 crc kubenswrapper[4838]: I1128 10:52:03.816795 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/65afdfae-6cab-4f19-9c41-49b9409a7352-logs\") pod \"glance-default-external-api-0\" (UID: \"65afdfae-6cab-4f19-9c41-49b9409a7352\") " pod="openstack/glance-default-external-api-0" Nov 28 10:52:03 crc kubenswrapper[4838]: I1128 10:52:03.816812 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-internal-api-0\" (UID: \"89b4bc38-621a-4f06-acb9-a59089d304c1\") " pod="openstack/glance-default-internal-api-0" Nov 28 10:52:03 crc kubenswrapper[4838]: I1128 10:52:03.816828 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/65afdfae-6cab-4f19-9c41-49b9409a7352-ceph\") pod \"glance-default-external-api-0\" (UID: \"65afdfae-6cab-4f19-9c41-49b9409a7352\") " pod="openstack/glance-default-external-api-0" Nov 28 10:52:03 crc kubenswrapper[4838]: I1128 10:52:03.816852 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/65afdfae-6cab-4f19-9c41-49b9409a7352-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"65afdfae-6cab-4f19-9c41-49b9409a7352\") " pod="openstack/glance-default-external-api-0" Nov 28 10:52:03 crc kubenswrapper[4838]: I1128 10:52:03.816911 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/89b4bc38-621a-4f06-acb9-a59089d304c1-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"89b4bc38-621a-4f06-acb9-a59089d304c1\") " pod="openstack/glance-default-internal-api-0" Nov 28 10:52:03 crc kubenswrapper[4838]: I1128 10:52:03.817003 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/89b4bc38-621a-4f06-acb9-a59089d304c1-scripts\") pod \"glance-default-internal-api-0\" (UID: \"89b4bc38-621a-4f06-acb9-a59089d304c1\") " pod="openstack/glance-default-internal-api-0" Nov 28 10:52:03 crc kubenswrapper[4838]: I1128 10:52:03.817025 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/65afdfae-6cab-4f19-9c41-49b9409a7352-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"65afdfae-6cab-4f19-9c41-49b9409a7352\") " pod="openstack/glance-default-external-api-0" Nov 28 10:52:03 crc kubenswrapper[4838]: I1128 10:52:03.817056 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6dgmc\" (UniqueName: \"kubernetes.io/projected/65afdfae-6cab-4f19-9c41-49b9409a7352-kube-api-access-6dgmc\") pod \"glance-default-external-api-0\" (UID: \"65afdfae-6cab-4f19-9c41-49b9409a7352\") " pod="openstack/glance-default-external-api-0" Nov 28 10:52:03 crc kubenswrapper[4838]: I1128 10:52:03.817093 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"glance-default-external-api-0\" (UID: \"65afdfae-6cab-4f19-9c41-49b9409a7352\") " pod="openstack/glance-default-external-api-0" Nov 28 10:52:03 crc kubenswrapper[4838]: I1128 10:52:03.817131 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c2rns\" (UniqueName: \"kubernetes.io/projected/89b4bc38-621a-4f06-acb9-a59089d304c1-kube-api-access-c2rns\") pod \"glance-default-internal-api-0\" (UID: \"89b4bc38-621a-4f06-acb9-a59089d304c1\") " pod="openstack/glance-default-internal-api-0" Nov 28 10:52:03 crc kubenswrapper[4838]: I1128 10:52:03.817272 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/89b4bc38-621a-4f06-acb9-a59089d304c1-config-data\") pod \"glance-default-internal-api-0\" (UID: \"89b4bc38-621a-4f06-acb9-a59089d304c1\") " pod="openstack/glance-default-internal-api-0" Nov 28 10:52:03 crc kubenswrapper[4838]: I1128 10:52:03.817400 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/65afdfae-6cab-4f19-9c41-49b9409a7352-scripts\") pod \"glance-default-external-api-0\" (UID: \"65afdfae-6cab-4f19-9c41-49b9409a7352\") " pod="openstack/glance-default-external-api-0" Nov 28 10:52:03 crc kubenswrapper[4838]: I1128 10:52:03.817528 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/65afdfae-6cab-4f19-9c41-49b9409a7352-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"65afdfae-6cab-4f19-9c41-49b9409a7352\") " pod="openstack/glance-default-external-api-0" Nov 28 10:52:03 crc kubenswrapper[4838]: I1128 10:52:03.817651 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/89b4bc38-621a-4f06-acb9-a59089d304c1-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"89b4bc38-621a-4f06-acb9-a59089d304c1\") " pod="openstack/glance-default-internal-api-0" Nov 28 10:52:03 crc kubenswrapper[4838]: I1128 10:52:03.817690 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/89b4bc38-621a-4f06-acb9-a59089d304c1-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"89b4bc38-621a-4f06-acb9-a59089d304c1\") " pod="openstack/glance-default-internal-api-0" Nov 28 10:52:03 crc kubenswrapper[4838]: I1128 10:52:03.817932 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/65afdfae-6cab-4f19-9c41-49b9409a7352-config-data\") pod \"glance-default-external-api-0\" (UID: \"65afdfae-6cab-4f19-9c41-49b9409a7352\") " pod="openstack/glance-default-external-api-0" Nov 28 10:52:03 crc kubenswrapper[4838]: I1128 10:52:03.920449 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/89b4bc38-621a-4f06-acb9-a59089d304c1-logs\") pod \"glance-default-internal-api-0\" (UID: \"89b4bc38-621a-4f06-acb9-a59089d304c1\") " pod="openstack/glance-default-internal-api-0" Nov 28 10:52:03 crc kubenswrapper[4838]: I1128 10:52:03.921062 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/89b4bc38-621a-4f06-acb9-a59089d304c1-logs\") pod \"glance-default-internal-api-0\" (UID: \"89b4bc38-621a-4f06-acb9-a59089d304c1\") " pod="openstack/glance-default-internal-api-0" Nov 28 10:52:03 crc kubenswrapper[4838]: I1128 10:52:03.921122 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/89b4bc38-621a-4f06-acb9-a59089d304c1-ceph\") pod \"glance-default-internal-api-0\" (UID: \"89b4bc38-621a-4f06-acb9-a59089d304c1\") " pod="openstack/glance-default-internal-api-0" Nov 28 10:52:03 crc kubenswrapper[4838]: I1128 10:52:03.921142 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/65afdfae-6cab-4f19-9c41-49b9409a7352-logs\") pod \"glance-default-external-api-0\" (UID: \"65afdfae-6cab-4f19-9c41-49b9409a7352\") " pod="openstack/glance-default-external-api-0" Nov 28 10:52:03 crc kubenswrapper[4838]: I1128 10:52:03.921548 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/65afdfae-6cab-4f19-9c41-49b9409a7352-logs\") pod \"glance-default-external-api-0\" (UID: \"65afdfae-6cab-4f19-9c41-49b9409a7352\") " pod="openstack/glance-default-external-api-0" Nov 28 10:52:03 crc kubenswrapper[4838]: I1128 10:52:03.921654 4838 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-internal-api-0\" (UID: \"89b4bc38-621a-4f06-acb9-a59089d304c1\") device mount path \"/mnt/openstack/pv12\"" pod="openstack/glance-default-internal-api-0" Nov 28 10:52:03 crc kubenswrapper[4838]: I1128 10:52:03.922380 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-internal-api-0\" (UID: \"89b4bc38-621a-4f06-acb9-a59089d304c1\") " pod="openstack/glance-default-internal-api-0" Nov 28 10:52:03 crc kubenswrapper[4838]: I1128 10:52:03.922420 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/65afdfae-6cab-4f19-9c41-49b9409a7352-ceph\") pod \"glance-default-external-api-0\" (UID: \"65afdfae-6cab-4f19-9c41-49b9409a7352\") " pod="openstack/glance-default-external-api-0" Nov 28 10:52:03 crc kubenswrapper[4838]: I1128 10:52:03.922445 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/65afdfae-6cab-4f19-9c41-49b9409a7352-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"65afdfae-6cab-4f19-9c41-49b9409a7352\") " pod="openstack/glance-default-external-api-0" Nov 28 10:52:03 crc kubenswrapper[4838]: I1128 10:52:03.922475 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/89b4bc38-621a-4f06-acb9-a59089d304c1-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"89b4bc38-621a-4f06-acb9-a59089d304c1\") " pod="openstack/glance-default-internal-api-0" Nov 28 10:52:03 crc kubenswrapper[4838]: I1128 10:52:03.922519 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/89b4bc38-621a-4f06-acb9-a59089d304c1-scripts\") pod \"glance-default-internal-api-0\" (UID: \"89b4bc38-621a-4f06-acb9-a59089d304c1\") " pod="openstack/glance-default-internal-api-0" Nov 28 10:52:03 crc kubenswrapper[4838]: I1128 10:52:03.922535 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/65afdfae-6cab-4f19-9c41-49b9409a7352-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"65afdfae-6cab-4f19-9c41-49b9409a7352\") " pod="openstack/glance-default-external-api-0" Nov 28 10:52:03 crc kubenswrapper[4838]: I1128 10:52:03.922558 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6dgmc\" (UniqueName: \"kubernetes.io/projected/65afdfae-6cab-4f19-9c41-49b9409a7352-kube-api-access-6dgmc\") pod \"glance-default-external-api-0\" (UID: \"65afdfae-6cab-4f19-9c41-49b9409a7352\") " pod="openstack/glance-default-external-api-0" Nov 28 10:52:03 crc kubenswrapper[4838]: I1128 10:52:03.922601 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"glance-default-external-api-0\" (UID: \"65afdfae-6cab-4f19-9c41-49b9409a7352\") " pod="openstack/glance-default-external-api-0" Nov 28 10:52:03 crc kubenswrapper[4838]: I1128 10:52:03.922635 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c2rns\" (UniqueName: \"kubernetes.io/projected/89b4bc38-621a-4f06-acb9-a59089d304c1-kube-api-access-c2rns\") pod \"glance-default-internal-api-0\" (UID: \"89b4bc38-621a-4f06-acb9-a59089d304c1\") " pod="openstack/glance-default-internal-api-0" Nov 28 10:52:03 crc kubenswrapper[4838]: I1128 10:52:03.922657 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/89b4bc38-621a-4f06-acb9-a59089d304c1-config-data\") pod \"glance-default-internal-api-0\" (UID: \"89b4bc38-621a-4f06-acb9-a59089d304c1\") " pod="openstack/glance-default-internal-api-0" Nov 28 10:52:03 crc kubenswrapper[4838]: I1128 10:52:03.922697 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/65afdfae-6cab-4f19-9c41-49b9409a7352-scripts\") pod \"glance-default-external-api-0\" (UID: \"65afdfae-6cab-4f19-9c41-49b9409a7352\") " pod="openstack/glance-default-external-api-0" Nov 28 10:52:03 crc kubenswrapper[4838]: I1128 10:52:03.922805 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/65afdfae-6cab-4f19-9c41-49b9409a7352-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"65afdfae-6cab-4f19-9c41-49b9409a7352\") " pod="openstack/glance-default-external-api-0" Nov 28 10:52:03 crc kubenswrapper[4838]: I1128 10:52:03.922844 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/89b4bc38-621a-4f06-acb9-a59089d304c1-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"89b4bc38-621a-4f06-acb9-a59089d304c1\") " pod="openstack/glance-default-internal-api-0" Nov 28 10:52:03 crc kubenswrapper[4838]: I1128 10:52:03.922861 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/89b4bc38-621a-4f06-acb9-a59089d304c1-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"89b4bc38-621a-4f06-acb9-a59089d304c1\") " pod="openstack/glance-default-internal-api-0" Nov 28 10:52:03 crc kubenswrapper[4838]: I1128 10:52:03.922857 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/65afdfae-6cab-4f19-9c41-49b9409a7352-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"65afdfae-6cab-4f19-9c41-49b9409a7352\") " pod="openstack/glance-default-external-api-0" Nov 28 10:52:03 crc kubenswrapper[4838]: I1128 10:52:03.922885 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/65afdfae-6cab-4f19-9c41-49b9409a7352-config-data\") pod \"glance-default-external-api-0\" (UID: \"65afdfae-6cab-4f19-9c41-49b9409a7352\") " pod="openstack/glance-default-external-api-0" Nov 28 10:52:03 crc kubenswrapper[4838]: I1128 10:52:03.922968 4838 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"glance-default-external-api-0\" (UID: \"65afdfae-6cab-4f19-9c41-49b9409a7352\") device mount path \"/mnt/openstack/pv04\"" pod="openstack/glance-default-external-api-0" Nov 28 10:52:03 crc kubenswrapper[4838]: I1128 10:52:03.923850 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/89b4bc38-621a-4f06-acb9-a59089d304c1-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"89b4bc38-621a-4f06-acb9-a59089d304c1\") " pod="openstack/glance-default-internal-api-0" Nov 28 10:52:03 crc kubenswrapper[4838]: I1128 10:52:03.928878 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/89b4bc38-621a-4f06-acb9-a59089d304c1-scripts\") pod \"glance-default-internal-api-0\" (UID: \"89b4bc38-621a-4f06-acb9-a59089d304c1\") " pod="openstack/glance-default-internal-api-0" Nov 28 10:52:03 crc kubenswrapper[4838]: I1128 10:52:03.928976 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/89b4bc38-621a-4f06-acb9-a59089d304c1-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"89b4bc38-621a-4f06-acb9-a59089d304c1\") " pod="openstack/glance-default-internal-api-0" Nov 28 10:52:03 crc kubenswrapper[4838]: I1128 10:52:03.929450 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/89b4bc38-621a-4f06-acb9-a59089d304c1-config-data\") pod \"glance-default-internal-api-0\" (UID: \"89b4bc38-621a-4f06-acb9-a59089d304c1\") " pod="openstack/glance-default-internal-api-0" Nov 28 10:52:03 crc kubenswrapper[4838]: I1128 10:52:03.932147 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/65afdfae-6cab-4f19-9c41-49b9409a7352-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"65afdfae-6cab-4f19-9c41-49b9409a7352\") " pod="openstack/glance-default-external-api-0" Nov 28 10:52:03 crc kubenswrapper[4838]: I1128 10:52:03.933028 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/65afdfae-6cab-4f19-9c41-49b9409a7352-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"65afdfae-6cab-4f19-9c41-49b9409a7352\") " pod="openstack/glance-default-external-api-0" Nov 28 10:52:03 crc kubenswrapper[4838]: I1128 10:52:03.935090 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/65afdfae-6cab-4f19-9c41-49b9409a7352-scripts\") pod \"glance-default-external-api-0\" (UID: \"65afdfae-6cab-4f19-9c41-49b9409a7352\") " pod="openstack/glance-default-external-api-0" Nov 28 10:52:03 crc kubenswrapper[4838]: I1128 10:52:03.937312 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/89b4bc38-621a-4f06-acb9-a59089d304c1-ceph\") pod \"glance-default-internal-api-0\" (UID: \"89b4bc38-621a-4f06-acb9-a59089d304c1\") " pod="openstack/glance-default-internal-api-0" Nov 28 10:52:03 crc kubenswrapper[4838]: I1128 10:52:03.938252 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/65afdfae-6cab-4f19-9c41-49b9409a7352-config-data\") pod \"glance-default-external-api-0\" (UID: \"65afdfae-6cab-4f19-9c41-49b9409a7352\") " pod="openstack/glance-default-external-api-0" Nov 28 10:52:03 crc kubenswrapper[4838]: I1128 10:52:03.938535 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c2rns\" (UniqueName: \"kubernetes.io/projected/89b4bc38-621a-4f06-acb9-a59089d304c1-kube-api-access-c2rns\") pod \"glance-default-internal-api-0\" (UID: \"89b4bc38-621a-4f06-acb9-a59089d304c1\") " pod="openstack/glance-default-internal-api-0" Nov 28 10:52:03 crc kubenswrapper[4838]: I1128 10:52:03.938864 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/89b4bc38-621a-4f06-acb9-a59089d304c1-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"89b4bc38-621a-4f06-acb9-a59089d304c1\") " pod="openstack/glance-default-internal-api-0" Nov 28 10:52:03 crc kubenswrapper[4838]: I1128 10:52:03.939248 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/65afdfae-6cab-4f19-9c41-49b9409a7352-ceph\") pod \"glance-default-external-api-0\" (UID: \"65afdfae-6cab-4f19-9c41-49b9409a7352\") " pod="openstack/glance-default-external-api-0" Nov 28 10:52:03 crc kubenswrapper[4838]: I1128 10:52:03.939549 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6dgmc\" (UniqueName: \"kubernetes.io/projected/65afdfae-6cab-4f19-9c41-49b9409a7352-kube-api-access-6dgmc\") pod \"glance-default-external-api-0\" (UID: \"65afdfae-6cab-4f19-9c41-49b9409a7352\") " pod="openstack/glance-default-external-api-0" Nov 28 10:52:03 crc kubenswrapper[4838]: I1128 10:52:03.957293 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"glance-default-external-api-0\" (UID: \"65afdfae-6cab-4f19-9c41-49b9409a7352\") " pod="openstack/glance-default-external-api-0" Nov 28 10:52:03 crc kubenswrapper[4838]: I1128 10:52:03.962065 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-internal-api-0\" (UID: \"89b4bc38-621a-4f06-acb9-a59089d304c1\") " pod="openstack/glance-default-internal-api-0" Nov 28 10:52:04 crc kubenswrapper[4838]: I1128 10:52:04.028428 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 28 10:52:04 crc kubenswrapper[4838]: I1128 10:52:04.058644 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 28 10:52:04 crc kubenswrapper[4838]: I1128 10:52:04.063531 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/manila-db-create-nff9n" Nov 28 10:52:04 crc kubenswrapper[4838]: I1128 10:52:04.227956 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0024b279-ca1c-4058-924a-0f044953dc17-operator-scripts\") pod \"0024b279-ca1c-4058-924a-0f044953dc17\" (UID: \"0024b279-ca1c-4058-924a-0f044953dc17\") " Nov 28 10:52:04 crc kubenswrapper[4838]: I1128 10:52:04.228332 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-82t8m\" (UniqueName: \"kubernetes.io/projected/0024b279-ca1c-4058-924a-0f044953dc17-kube-api-access-82t8m\") pod \"0024b279-ca1c-4058-924a-0f044953dc17\" (UID: \"0024b279-ca1c-4058-924a-0f044953dc17\") " Nov 28 10:52:04 crc kubenswrapper[4838]: I1128 10:52:04.229273 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0024b279-ca1c-4058-924a-0f044953dc17-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "0024b279-ca1c-4058-924a-0f044953dc17" (UID: "0024b279-ca1c-4058-924a-0f044953dc17"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 10:52:04 crc kubenswrapper[4838]: I1128 10:52:04.236330 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0024b279-ca1c-4058-924a-0f044953dc17-kube-api-access-82t8m" (OuterVolumeSpecName: "kube-api-access-82t8m") pod "0024b279-ca1c-4058-924a-0f044953dc17" (UID: "0024b279-ca1c-4058-924a-0f044953dc17"). InnerVolumeSpecName "kube-api-access-82t8m". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:52:04 crc kubenswrapper[4838]: I1128 10:52:04.330623 4838 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0024b279-ca1c-4058-924a-0f044953dc17-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 10:52:04 crc kubenswrapper[4838]: I1128 10:52:04.330657 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-82t8m\" (UniqueName: \"kubernetes.io/projected/0024b279-ca1c-4058-924a-0f044953dc17-kube-api-access-82t8m\") on node \"crc\" DevicePath \"\"" Nov 28 10:52:04 crc kubenswrapper[4838]: I1128 10:52:04.576167 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="05a2665f-f0e0-433f-a83a-f094b2673cdf" path="/var/lib/kubelet/pods/05a2665f-f0e0-433f-a83a-f094b2673cdf/volumes" Nov 28 10:52:04 crc kubenswrapper[4838]: I1128 10:52:04.576979 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="477f5eef-1cdb-494d-b339-e562c650b1a3" path="/var/lib/kubelet/pods/477f5eef-1cdb-494d-b339-e562c650b1a3/volumes" Nov 28 10:52:04 crc kubenswrapper[4838]: I1128 10:52:04.588269 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/manila-db-create-nff9n" Nov 28 10:52:04 crc kubenswrapper[4838]: I1128 10:52:04.588268 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-db-create-nff9n" event={"ID":"0024b279-ca1c-4058-924a-0f044953dc17","Type":"ContainerDied","Data":"27458f64ea8095cdf09caa7a1503a93964a837c051e4b16e675b91a98ba315e6"} Nov 28 10:52:04 crc kubenswrapper[4838]: I1128 10:52:04.588335 4838 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="27458f64ea8095cdf09caa7a1503a93964a837c051e4b16e675b91a98ba315e6" Nov 28 10:52:04 crc kubenswrapper[4838]: I1128 10:52:04.722047 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 10:52:04 crc kubenswrapper[4838]: W1128 10:52:04.733768 4838 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod65afdfae_6cab_4f19_9c41_49b9409a7352.slice/crio-00fd93fbd2ed5041f00eebb2c1b8d92f873c7dee706cff4ab1d37d221d8b1985 WatchSource:0}: Error finding container 00fd93fbd2ed5041f00eebb2c1b8d92f873c7dee706cff4ab1d37d221d8b1985: Status 404 returned error can't find the container with id 00fd93fbd2ed5041f00eebb2c1b8d92f873c7dee706cff4ab1d37d221d8b1985 Nov 28 10:52:04 crc kubenswrapper[4838]: I1128 10:52:04.964681 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/manila-ab9d-account-create-update-8jghk" Nov 28 10:52:05 crc kubenswrapper[4838]: I1128 10:52:05.147615 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5f46bbeb-4cbe-4fe8-8d87-fea23975ae8f-operator-scripts\") pod \"5f46bbeb-4cbe-4fe8-8d87-fea23975ae8f\" (UID: \"5f46bbeb-4cbe-4fe8-8d87-fea23975ae8f\") " Nov 28 10:52:05 crc kubenswrapper[4838]: I1128 10:52:05.147949 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jcvpc\" (UniqueName: \"kubernetes.io/projected/5f46bbeb-4cbe-4fe8-8d87-fea23975ae8f-kube-api-access-jcvpc\") pod \"5f46bbeb-4cbe-4fe8-8d87-fea23975ae8f\" (UID: \"5f46bbeb-4cbe-4fe8-8d87-fea23975ae8f\") " Nov 28 10:52:05 crc kubenswrapper[4838]: I1128 10:52:05.148657 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5f46bbeb-4cbe-4fe8-8d87-fea23975ae8f-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "5f46bbeb-4cbe-4fe8-8d87-fea23975ae8f" (UID: "5f46bbeb-4cbe-4fe8-8d87-fea23975ae8f"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 10:52:05 crc kubenswrapper[4838]: I1128 10:52:05.163420 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5f46bbeb-4cbe-4fe8-8d87-fea23975ae8f-kube-api-access-jcvpc" (OuterVolumeSpecName: "kube-api-access-jcvpc") pod "5f46bbeb-4cbe-4fe8-8d87-fea23975ae8f" (UID: "5f46bbeb-4cbe-4fe8-8d87-fea23975ae8f"). InnerVolumeSpecName "kube-api-access-jcvpc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:52:05 crc kubenswrapper[4838]: I1128 10:52:05.250973 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jcvpc\" (UniqueName: \"kubernetes.io/projected/5f46bbeb-4cbe-4fe8-8d87-fea23975ae8f-kube-api-access-jcvpc\") on node \"crc\" DevicePath \"\"" Nov 28 10:52:05 crc kubenswrapper[4838]: I1128 10:52:05.251431 4838 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5f46bbeb-4cbe-4fe8-8d87-fea23975ae8f-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 10:52:05 crc kubenswrapper[4838]: I1128 10:52:05.380128 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 10:52:05 crc kubenswrapper[4838]: I1128 10:52:05.626552 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-ab9d-account-create-update-8jghk" event={"ID":"5f46bbeb-4cbe-4fe8-8d87-fea23975ae8f","Type":"ContainerDied","Data":"4d2f6e7222c542293ae5b6f587cdea1ca80ad5ae0c76e59b4a1dfc0165241f04"} Nov 28 10:52:05 crc kubenswrapper[4838]: I1128 10:52:05.626584 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/manila-ab9d-account-create-update-8jghk" Nov 28 10:52:05 crc kubenswrapper[4838]: I1128 10:52:05.626591 4838 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4d2f6e7222c542293ae5b6f587cdea1ca80ad5ae0c76e59b4a1dfc0165241f04" Nov 28 10:52:05 crc kubenswrapper[4838]: I1128 10:52:05.630461 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"65afdfae-6cab-4f19-9c41-49b9409a7352","Type":"ContainerStarted","Data":"a775b401d9ec454b70711f0286b404b4340c6e6bb25516548dbc3293512f76f0"} Nov 28 10:52:05 crc kubenswrapper[4838]: I1128 10:52:05.630496 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"65afdfae-6cab-4f19-9c41-49b9409a7352","Type":"ContainerStarted","Data":"00fd93fbd2ed5041f00eebb2c1b8d92f873c7dee706cff4ab1d37d221d8b1985"} Nov 28 10:52:05 crc kubenswrapper[4838]: I1128 10:52:05.634958 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-backup-0" event={"ID":"8525f0f2-643f-4177-a4f8-12ca22b43363","Type":"ContainerStarted","Data":"9e9f70be94842191e0b653234a2b92a8eb76a53c63bdcd0a25bc6c33eb35ff52"} Nov 28 10:52:05 crc kubenswrapper[4838]: I1128 10:52:05.660246 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-backup-0" podStartSLOduration=5.755183992 podStartE2EDuration="7.6602242s" podCreationTimestamp="2025-11-28 10:51:58 +0000 UTC" firstStartedPulling="2025-11-28 10:52:00.557905698 +0000 UTC m=+3292.256879868" lastFinishedPulling="2025-11-28 10:52:02.462945906 +0000 UTC m=+3294.161920076" observedRunningTime="2025-11-28 10:52:05.65543375 +0000 UTC m=+3297.354407920" watchObservedRunningTime="2025-11-28 10:52:05.6602242 +0000 UTC m=+3297.359198370" Nov 28 10:52:08 crc kubenswrapper[4838]: I1128 10:52:08.949732 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-backup-0" Nov 28 10:52:09 crc kubenswrapper[4838]: I1128 10:52:09.137656 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-backup-0" Nov 28 10:52:10 crc kubenswrapper[4838]: I1128 10:52:10.316098 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/manila-db-sync-rnkm7"] Nov 28 10:52:10 crc kubenswrapper[4838]: E1128 10:52:10.316822 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5f46bbeb-4cbe-4fe8-8d87-fea23975ae8f" containerName="mariadb-account-create-update" Nov 28 10:52:10 crc kubenswrapper[4838]: I1128 10:52:10.316837 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="5f46bbeb-4cbe-4fe8-8d87-fea23975ae8f" containerName="mariadb-account-create-update" Nov 28 10:52:10 crc kubenswrapper[4838]: E1128 10:52:10.316852 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0024b279-ca1c-4058-924a-0f044953dc17" containerName="mariadb-database-create" Nov 28 10:52:10 crc kubenswrapper[4838]: I1128 10:52:10.316858 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="0024b279-ca1c-4058-924a-0f044953dc17" containerName="mariadb-database-create" Nov 28 10:52:10 crc kubenswrapper[4838]: I1128 10:52:10.317049 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="0024b279-ca1c-4058-924a-0f044953dc17" containerName="mariadb-database-create" Nov 28 10:52:10 crc kubenswrapper[4838]: I1128 10:52:10.317069 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="5f46bbeb-4cbe-4fe8-8d87-fea23975ae8f" containerName="mariadb-account-create-update" Nov 28 10:52:10 crc kubenswrapper[4838]: I1128 10:52:10.317670 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-db-sync-rnkm7" Nov 28 10:52:10 crc kubenswrapper[4838]: I1128 10:52:10.320970 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"manila-manila-dockercfg-c4j65" Nov 28 10:52:10 crc kubenswrapper[4838]: I1128 10:52:10.321253 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"manila-config-data" Nov 28 10:52:10 crc kubenswrapper[4838]: I1128 10:52:10.329934 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-db-sync-rnkm7"] Nov 28 10:52:10 crc kubenswrapper[4838]: I1128 10:52:10.462894 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/05628975-e8c2-42db-b5a0-dc9536eb3e75-config-data\") pod \"manila-db-sync-rnkm7\" (UID: \"05628975-e8c2-42db-b5a0-dc9536eb3e75\") " pod="openstack/manila-db-sync-rnkm7" Nov 28 10:52:10 crc kubenswrapper[4838]: I1128 10:52:10.462979 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7v6m7\" (UniqueName: \"kubernetes.io/projected/05628975-e8c2-42db-b5a0-dc9536eb3e75-kube-api-access-7v6m7\") pod \"manila-db-sync-rnkm7\" (UID: \"05628975-e8c2-42db-b5a0-dc9536eb3e75\") " pod="openstack/manila-db-sync-rnkm7" Nov 28 10:52:10 crc kubenswrapper[4838]: I1128 10:52:10.463093 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"job-config-data\" (UniqueName: \"kubernetes.io/secret/05628975-e8c2-42db-b5a0-dc9536eb3e75-job-config-data\") pod \"manila-db-sync-rnkm7\" (UID: \"05628975-e8c2-42db-b5a0-dc9536eb3e75\") " pod="openstack/manila-db-sync-rnkm7" Nov 28 10:52:10 crc kubenswrapper[4838]: I1128 10:52:10.463212 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/05628975-e8c2-42db-b5a0-dc9536eb3e75-combined-ca-bundle\") pod \"manila-db-sync-rnkm7\" (UID: \"05628975-e8c2-42db-b5a0-dc9536eb3e75\") " pod="openstack/manila-db-sync-rnkm7" Nov 28 10:52:10 crc kubenswrapper[4838]: I1128 10:52:10.564499 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"job-config-data\" (UniqueName: \"kubernetes.io/secret/05628975-e8c2-42db-b5a0-dc9536eb3e75-job-config-data\") pod \"manila-db-sync-rnkm7\" (UID: \"05628975-e8c2-42db-b5a0-dc9536eb3e75\") " pod="openstack/manila-db-sync-rnkm7" Nov 28 10:52:10 crc kubenswrapper[4838]: I1128 10:52:10.564573 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/05628975-e8c2-42db-b5a0-dc9536eb3e75-combined-ca-bundle\") pod \"manila-db-sync-rnkm7\" (UID: \"05628975-e8c2-42db-b5a0-dc9536eb3e75\") " pod="openstack/manila-db-sync-rnkm7" Nov 28 10:52:10 crc kubenswrapper[4838]: I1128 10:52:10.564614 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/05628975-e8c2-42db-b5a0-dc9536eb3e75-config-data\") pod \"manila-db-sync-rnkm7\" (UID: \"05628975-e8c2-42db-b5a0-dc9536eb3e75\") " pod="openstack/manila-db-sync-rnkm7" Nov 28 10:52:10 crc kubenswrapper[4838]: I1128 10:52:10.564655 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7v6m7\" (UniqueName: \"kubernetes.io/projected/05628975-e8c2-42db-b5a0-dc9536eb3e75-kube-api-access-7v6m7\") pod \"manila-db-sync-rnkm7\" (UID: \"05628975-e8c2-42db-b5a0-dc9536eb3e75\") " pod="openstack/manila-db-sync-rnkm7" Nov 28 10:52:10 crc kubenswrapper[4838]: I1128 10:52:10.569142 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/05628975-e8c2-42db-b5a0-dc9536eb3e75-combined-ca-bundle\") pod \"manila-db-sync-rnkm7\" (UID: \"05628975-e8c2-42db-b5a0-dc9536eb3e75\") " pod="openstack/manila-db-sync-rnkm7" Nov 28 10:52:10 crc kubenswrapper[4838]: I1128 10:52:10.569561 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"job-config-data\" (UniqueName: \"kubernetes.io/secret/05628975-e8c2-42db-b5a0-dc9536eb3e75-job-config-data\") pod \"manila-db-sync-rnkm7\" (UID: \"05628975-e8c2-42db-b5a0-dc9536eb3e75\") " pod="openstack/manila-db-sync-rnkm7" Nov 28 10:52:10 crc kubenswrapper[4838]: I1128 10:52:10.569947 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/05628975-e8c2-42db-b5a0-dc9536eb3e75-config-data\") pod \"manila-db-sync-rnkm7\" (UID: \"05628975-e8c2-42db-b5a0-dc9536eb3e75\") " pod="openstack/manila-db-sync-rnkm7" Nov 28 10:52:10 crc kubenswrapper[4838]: I1128 10:52:10.585700 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7v6m7\" (UniqueName: \"kubernetes.io/projected/05628975-e8c2-42db-b5a0-dc9536eb3e75-kube-api-access-7v6m7\") pod \"manila-db-sync-rnkm7\" (UID: \"05628975-e8c2-42db-b5a0-dc9536eb3e75\") " pod="openstack/manila-db-sync-rnkm7" Nov 28 10:52:10 crc kubenswrapper[4838]: I1128 10:52:10.667112 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-db-sync-rnkm7" Nov 28 10:52:10 crc kubenswrapper[4838]: I1128 10:52:10.692304 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"89b4bc38-621a-4f06-acb9-a59089d304c1","Type":"ContainerStarted","Data":"e75cbace01d8c2fed46d59f6ee9efea2fd226cc9215fecff35ffd97c7434b325"} Nov 28 10:52:11 crc kubenswrapper[4838]: I1128 10:52:11.438376 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-db-sync-rnkm7"] Nov 28 10:52:11 crc kubenswrapper[4838]: W1128 10:52:11.445745 4838 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod05628975_e8c2_42db_b5a0_dc9536eb3e75.slice/crio-5cb37bd9b1b6f02c2e02463ba113d246acf05fbe28044278b78b80cbe11e8683 WatchSource:0}: Error finding container 5cb37bd9b1b6f02c2e02463ba113d246acf05fbe28044278b78b80cbe11e8683: Status 404 returned error can't find the container with id 5cb37bd9b1b6f02c2e02463ba113d246acf05fbe28044278b78b80cbe11e8683 Nov 28 10:52:11 crc kubenswrapper[4838]: I1128 10:52:11.729106 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-7c95bbbd67-4qj7h" event={"ID":"3ee39750-6a0d-4094-a8a3-46fe0adca89b","Type":"ContainerStarted","Data":"2157d9c578fd79a339c867ff1d8dcb621b3e51a485643ac293e5f9a6a61e3c58"} Nov 28 10:52:11 crc kubenswrapper[4838]: I1128 10:52:11.730780 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-565ff4b848-b45gx" event={"ID":"580cb0e9-6caf-4f93-986e-a0cdd62d8cd2","Type":"ContainerStarted","Data":"07b13478bd4bc4909ab58488a40ca59a947862e788cdd515fce2962940d87199"} Nov 28 10:52:11 crc kubenswrapper[4838]: I1128 10:52:11.731479 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-db-sync-rnkm7" event={"ID":"05628975-e8c2-42db-b5a0-dc9536eb3e75","Type":"ContainerStarted","Data":"5cb37bd9b1b6f02c2e02463ba113d246acf05fbe28044278b78b80cbe11e8683"} Nov 28 10:52:11 crc kubenswrapper[4838]: I1128 10:52:11.739024 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-volume-volume1-0" event={"ID":"5efcb033-775b-46d6-8c77-2bafc360c749","Type":"ContainerStarted","Data":"857eee1ac5c58e228723c2d938a79bb67e230b2375a6fd9d708413a9d78232e0"} Nov 28 10:52:11 crc kubenswrapper[4838]: I1128 10:52:11.745352 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"65afdfae-6cab-4f19-9c41-49b9409a7352","Type":"ContainerStarted","Data":"f334c8429707b49218093e9718ddd291ee5a96ea3bfd5759eb6814615f32d026"} Nov 28 10:52:11 crc kubenswrapper[4838]: I1128 10:52:11.768286 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=8.768267493 podStartE2EDuration="8.768267493s" podCreationTimestamp="2025-11-28 10:52:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 10:52:11.768003466 +0000 UTC m=+3303.466977646" watchObservedRunningTime="2025-11-28 10:52:11.768267493 +0000 UTC m=+3303.467241663" Nov 28 10:52:12 crc kubenswrapper[4838]: I1128 10:52:12.756742 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-6dfdc6b877-4j92h" event={"ID":"12b8e3d4-b4f7-4179-83d3-a038f09e4682","Type":"ContainerStarted","Data":"9e844b64ce84e1d7f0621982ed710538c49fb1124e85b7ac63ba827a96c90e13"} Nov 28 10:52:12 crc kubenswrapper[4838]: I1128 10:52:12.759062 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-volume-volume1-0" event={"ID":"5efcb033-775b-46d6-8c77-2bafc360c749","Type":"ContainerStarted","Data":"728e90fd72bc91c53299fa53567f2c66d85dab43fbb2b360dd71eee55951f921"} Nov 28 10:52:12 crc kubenswrapper[4838]: I1128 10:52:12.761150 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"89b4bc38-621a-4f06-acb9-a59089d304c1","Type":"ContainerStarted","Data":"22f3afe2b967c6f99091acc3f3d32f5a946ca60fca27ed04879488704e880cfd"} Nov 28 10:52:12 crc kubenswrapper[4838]: I1128 10:52:12.770381 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-7c95bbbd67-4qj7h" event={"ID":"3ee39750-6a0d-4094-a8a3-46fe0adca89b","Type":"ContainerStarted","Data":"fc769ce7d3ce2a3c24558a35eaf0fb4fe219535d9899d70519dee6cff9aa4b33"} Nov 28 10:52:12 crc kubenswrapper[4838]: I1128 10:52:12.770519 4838 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-7c95bbbd67-4qj7h" podUID="3ee39750-6a0d-4094-a8a3-46fe0adca89b" containerName="horizon-log" containerID="cri-o://2157d9c578fd79a339c867ff1d8dcb621b3e51a485643ac293e5f9a6a61e3c58" gracePeriod=30 Nov 28 10:52:12 crc kubenswrapper[4838]: I1128 10:52:12.770754 4838 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-7c95bbbd67-4qj7h" podUID="3ee39750-6a0d-4094-a8a3-46fe0adca89b" containerName="horizon" containerID="cri-o://fc769ce7d3ce2a3c24558a35eaf0fb4fe219535d9899d70519dee6cff9aa4b33" gracePeriod=30 Nov 28 10:52:12 crc kubenswrapper[4838]: I1128 10:52:12.773987 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-565ff4b848-b45gx" event={"ID":"580cb0e9-6caf-4f93-986e-a0cdd62d8cd2","Type":"ContainerStarted","Data":"c46d2f5a288bc031deb50b4c4507cac1d9c85391a267a560fda8b5a38f93334f"} Nov 28 10:52:12 crc kubenswrapper[4838]: I1128 10:52:12.777016 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-77d65cd94d-8f62l" event={"ID":"97cbb2f0-d45e-4b75-ad50-becba9e4db9b","Type":"ContainerStarted","Data":"eca39b04796ce1817aade7786eac4216e6410eb6fcd0fd8b1e6bd89938cd9fa9"} Nov 28 10:52:12 crc kubenswrapper[4838]: I1128 10:52:12.777042 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-77d65cd94d-8f62l" event={"ID":"97cbb2f0-d45e-4b75-ad50-becba9e4db9b","Type":"ContainerStarted","Data":"2ab9f29122586104d11768b184f1c7abfd989fb03083c8e9687b8e0fa27d3ca4"} Nov 28 10:52:12 crc kubenswrapper[4838]: I1128 10:52:12.797900 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-volume-volume1-0" podStartSLOduration=5.444676034 podStartE2EDuration="14.797880513s" podCreationTimestamp="2025-11-28 10:51:58 +0000 UTC" firstStartedPulling="2025-11-28 10:52:01.637023862 +0000 UTC m=+3293.335998032" lastFinishedPulling="2025-11-28 10:52:10.990228301 +0000 UTC m=+3302.689202511" observedRunningTime="2025-11-28 10:52:12.792700702 +0000 UTC m=+3304.491674872" watchObservedRunningTime="2025-11-28 10:52:12.797880513 +0000 UTC m=+3304.496854683" Nov 28 10:52:12 crc kubenswrapper[4838]: I1128 10:52:12.819445 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/horizon-7c95bbbd67-4qj7h" podStartSLOduration=3.433782214 podStartE2EDuration="13.819428739s" podCreationTimestamp="2025-11-28 10:51:59 +0000 UTC" firstStartedPulling="2025-11-28 10:52:00.606417116 +0000 UTC m=+3292.305391286" lastFinishedPulling="2025-11-28 10:52:10.992063631 +0000 UTC m=+3302.691037811" observedRunningTime="2025-11-28 10:52:12.811425442 +0000 UTC m=+3304.510399612" watchObservedRunningTime="2025-11-28 10:52:12.819428739 +0000 UTC m=+3304.518402909" Nov 28 10:52:12 crc kubenswrapper[4838]: I1128 10:52:12.831943 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/horizon-565ff4b848-b45gx" podStartSLOduration=3.642215205 podStartE2EDuration="11.831922328s" podCreationTimestamp="2025-11-28 10:52:01 +0000 UTC" firstStartedPulling="2025-11-28 10:52:02.850756314 +0000 UTC m=+3294.549730484" lastFinishedPulling="2025-11-28 10:52:11.040463437 +0000 UTC m=+3302.739437607" observedRunningTime="2025-11-28 10:52:12.8297876 +0000 UTC m=+3304.528761770" watchObservedRunningTime="2025-11-28 10:52:12.831922328 +0000 UTC m=+3304.530896498" Nov 28 10:52:13 crc kubenswrapper[4838]: I1128 10:52:13.927565 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-volume-volume1-0" Nov 28 10:52:14 crc kubenswrapper[4838]: I1128 10:52:14.059909 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Nov 28 10:52:14 crc kubenswrapper[4838]: I1128 10:52:14.059978 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Nov 28 10:52:14 crc kubenswrapper[4838]: I1128 10:52:14.419170 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Nov 28 10:52:14 crc kubenswrapper[4838]: I1128 10:52:14.421649 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Nov 28 10:52:14 crc kubenswrapper[4838]: I1128 10:52:14.809072 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"89b4bc38-621a-4f06-acb9-a59089d304c1","Type":"ContainerStarted","Data":"f1a37e2111a41ce58f7daea56d1afbb6af6765d95a648f3e781c6293845ab0e6"} Nov 28 10:52:14 crc kubenswrapper[4838]: I1128 10:52:14.815751 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Nov 28 10:52:14 crc kubenswrapper[4838]: I1128 10:52:14.816048 4838 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-6dfdc6b877-4j92h" podUID="12b8e3d4-b4f7-4179-83d3-a038f09e4682" containerName="horizon-log" containerID="cri-o://9e844b64ce84e1d7f0621982ed710538c49fb1124e85b7ac63ba827a96c90e13" gracePeriod=30 Nov 28 10:52:14 crc kubenswrapper[4838]: I1128 10:52:14.816099 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Nov 28 10:52:14 crc kubenswrapper[4838]: I1128 10:52:14.817642 4838 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-6dfdc6b877-4j92h" podUID="12b8e3d4-b4f7-4179-83d3-a038f09e4682" containerName="horizon" containerID="cri-o://08c16acb87cf0772907a56f04ecf22c5d6e58669df25dd13e2ff4ee02b36df62" gracePeriod=30 Nov 28 10:52:14 crc kubenswrapper[4838]: I1128 10:52:14.862355 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=11.862328594 podStartE2EDuration="11.862328594s" podCreationTimestamp="2025-11-28 10:52:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 10:52:14.836210303 +0000 UTC m=+3306.535184483" watchObservedRunningTime="2025-11-28 10:52:14.862328594 +0000 UTC m=+3306.561302774" Nov 28 10:52:14 crc kubenswrapper[4838]: I1128 10:52:14.880486 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/horizon-77d65cd94d-8f62l" podStartSLOduration=5.25971441 podStartE2EDuration="13.880464306s" podCreationTimestamp="2025-11-28 10:52:01 +0000 UTC" firstStartedPulling="2025-11-28 10:52:03.006030553 +0000 UTC m=+3294.705004723" lastFinishedPulling="2025-11-28 10:52:11.626780439 +0000 UTC m=+3303.325754619" observedRunningTime="2025-11-28 10:52:14.877619309 +0000 UTC m=+3306.576593479" watchObservedRunningTime="2025-11-28 10:52:14.880464306 +0000 UTC m=+3306.579438476" Nov 28 10:52:14 crc kubenswrapper[4838]: I1128 10:52:14.908413 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/horizon-6dfdc6b877-4j92h" podStartSLOduration=4.737323546 podStartE2EDuration="15.908391615s" podCreationTimestamp="2025-11-28 10:51:59 +0000 UTC" firstStartedPulling="2025-11-28 10:52:00.985866297 +0000 UTC m=+3292.684840467" lastFinishedPulling="2025-11-28 10:52:12.156934356 +0000 UTC m=+3303.855908536" observedRunningTime="2025-11-28 10:52:14.900174072 +0000 UTC m=+3306.599148252" watchObservedRunningTime="2025-11-28 10:52:14.908391615 +0000 UTC m=+3306.607365785" Nov 28 10:52:15 crc kubenswrapper[4838]: I1128 10:52:15.827516 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-6dfdc6b877-4j92h" event={"ID":"12b8e3d4-b4f7-4179-83d3-a038f09e4682","Type":"ContainerStarted","Data":"08c16acb87cf0772907a56f04ecf22c5d6e58669df25dd13e2ff4ee02b36df62"} Nov 28 10:52:16 crc kubenswrapper[4838]: I1128 10:52:16.834790 4838 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 28 10:52:17 crc kubenswrapper[4838]: I1128 10:52:17.562401 4838 scope.go:117] "RemoveContainer" containerID="3d80b1b6cf83170cb103f16c70c6a8b3db90d429f22463d02230864c009beb4b" Nov 28 10:52:17 crc kubenswrapper[4838]: E1128 10:52:17.562823 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-5dxdd_openshift-machine-config-operator(5c3daa53-8c4e-4e30-aeba-146602dd45cd)\"" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" Nov 28 10:52:18 crc kubenswrapper[4838]: I1128 10:52:18.951380 4838 scope.go:117] "RemoveContainer" containerID="2b7fdcf29b666f9440cc14693014caf46e129ab3364ad7a137ed92e31439c163" Nov 28 10:52:19 crc kubenswrapper[4838]: I1128 10:52:19.135798 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-volume-volume1-0" Nov 28 10:52:20 crc kubenswrapper[4838]: I1128 10:52:20.064558 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-7c95bbbd67-4qj7h" Nov 28 10:52:20 crc kubenswrapper[4838]: I1128 10:52:20.345984 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-6dfdc6b877-4j92h" Nov 28 10:52:22 crc kubenswrapper[4838]: I1128 10:52:22.081211 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-565ff4b848-b45gx" Nov 28 10:52:22 crc kubenswrapper[4838]: I1128 10:52:22.081902 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-565ff4b848-b45gx" Nov 28 10:52:22 crc kubenswrapper[4838]: I1128 10:52:22.083415 4838 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-565ff4b848-b45gx" podUID="580cb0e9-6caf-4f93-986e-a0cdd62d8cd2" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.243:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.243:8443: connect: connection refused" Nov 28 10:52:22 crc kubenswrapper[4838]: I1128 10:52:22.392955 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-77d65cd94d-8f62l" Nov 28 10:52:22 crc kubenswrapper[4838]: I1128 10:52:22.393040 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-77d65cd94d-8f62l" Nov 28 10:52:22 crc kubenswrapper[4838]: I1128 10:52:22.395042 4838 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-77d65cd94d-8f62l" podUID="97cbb2f0-d45e-4b75-ad50-becba9e4db9b" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.245:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.245:8443: connect: connection refused" Nov 28 10:52:22 crc kubenswrapper[4838]: I1128 10:52:22.694945 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Nov 28 10:52:22 crc kubenswrapper[4838]: I1128 10:52:22.707572 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Nov 28 10:52:24 crc kubenswrapper[4838]: I1128 10:52:24.029351 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Nov 28 10:52:24 crc kubenswrapper[4838]: I1128 10:52:24.029753 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Nov 28 10:52:24 crc kubenswrapper[4838]: I1128 10:52:24.084387 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Nov 28 10:52:24 crc kubenswrapper[4838]: I1128 10:52:24.089642 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Nov 28 10:52:24 crc kubenswrapper[4838]: I1128 10:52:24.922967 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Nov 28 10:52:24 crc kubenswrapper[4838]: I1128 10:52:24.923393 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Nov 28 10:52:26 crc kubenswrapper[4838]: I1128 10:52:26.844380 4838 scope.go:117] "RemoveContainer" containerID="d13a7806a35b07a38eb965878a64169d07f9e7858d26dcf505ad43938d18f09c" Nov 28 10:52:26 crc kubenswrapper[4838]: I1128 10:52:26.880336 4838 scope.go:117] "RemoveContainer" containerID="efcb7fb36498a498901db499fb7c2a695fb0923a6ef34391f71c7c4ffc7a367d" Nov 28 10:52:26 crc kubenswrapper[4838]: E1128 10:52:26.987792 4838 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-manila-api:current-podified" Nov 28 10:52:26 crc kubenswrapper[4838]: E1128 10:52:26.988325 4838 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manila-db-sync,Image:quay.io/podified-antelope-centos9/openstack-manila-api:current-podified,Command:[/bin/bash],Args:[-c sleep 0 && /usr/bin/manila-manage --config-dir /etc/manila/manila.conf.d db sync],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:job-config-data,ReadOnly:true,MountPath:/etc/manila/manila.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:db-sync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-7v6m7,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42429,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:*42429,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod manila-db-sync-rnkm7_openstack(05628975-e8c2-42db-b5a0-dc9536eb3e75): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 28 10:52:26 crc kubenswrapper[4838]: E1128 10:52:26.989664 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/manila-db-sync-rnkm7" podUID="05628975-e8c2-42db-b5a0-dc9536eb3e75" Nov 28 10:52:27 crc kubenswrapper[4838]: I1128 10:52:27.743824 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Nov 28 10:52:27 crc kubenswrapper[4838]: I1128 10:52:27.744200 4838 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 28 10:52:27 crc kubenswrapper[4838]: I1128 10:52:27.745535 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Nov 28 10:52:27 crc kubenswrapper[4838]: E1128 10:52:27.970131 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-manila-api:current-podified\\\"\"" pod="openstack/manila-db-sync-rnkm7" podUID="05628975-e8c2-42db-b5a0-dc9536eb3e75" Nov 28 10:52:29 crc kubenswrapper[4838]: I1128 10:52:29.563061 4838 scope.go:117] "RemoveContainer" containerID="3d80b1b6cf83170cb103f16c70c6a8b3db90d429f22463d02230864c009beb4b" Nov 28 10:52:29 crc kubenswrapper[4838]: E1128 10:52:29.563681 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-5dxdd_openshift-machine-config-operator(5c3daa53-8c4e-4e30-aeba-146602dd45cd)\"" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" Nov 28 10:52:32 crc kubenswrapper[4838]: I1128 10:52:32.081622 4838 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-565ff4b848-b45gx" podUID="580cb0e9-6caf-4f93-986e-a0cdd62d8cd2" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.243:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.243:8443: connect: connection refused" Nov 28 10:52:32 crc kubenswrapper[4838]: I1128 10:52:32.393784 4838 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-77d65cd94d-8f62l" podUID="97cbb2f0-d45e-4b75-ad50-becba9e4db9b" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.245:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.245:8443: connect: connection refused" Nov 28 10:52:39 crc kubenswrapper[4838]: I1128 10:52:39.634887 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-6gx67"] Nov 28 10:52:39 crc kubenswrapper[4838]: I1128 10:52:39.651160 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-6gx67" Nov 28 10:52:39 crc kubenswrapper[4838]: I1128 10:52:39.681760 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-6gx67"] Nov 28 10:52:39 crc kubenswrapper[4838]: I1128 10:52:39.753180 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xbggl\" (UniqueName: \"kubernetes.io/projected/ae733bf5-694c-4e7a-9ea1-34d75ed1fec6-kube-api-access-xbggl\") pod \"redhat-operators-6gx67\" (UID: \"ae733bf5-694c-4e7a-9ea1-34d75ed1fec6\") " pod="openshift-marketplace/redhat-operators-6gx67" Nov 28 10:52:39 crc kubenswrapper[4838]: I1128 10:52:39.753235 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ae733bf5-694c-4e7a-9ea1-34d75ed1fec6-catalog-content\") pod \"redhat-operators-6gx67\" (UID: \"ae733bf5-694c-4e7a-9ea1-34d75ed1fec6\") " pod="openshift-marketplace/redhat-operators-6gx67" Nov 28 10:52:39 crc kubenswrapper[4838]: I1128 10:52:39.753363 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ae733bf5-694c-4e7a-9ea1-34d75ed1fec6-utilities\") pod \"redhat-operators-6gx67\" (UID: \"ae733bf5-694c-4e7a-9ea1-34d75ed1fec6\") " pod="openshift-marketplace/redhat-operators-6gx67" Nov 28 10:52:39 crc kubenswrapper[4838]: I1128 10:52:39.855410 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ae733bf5-694c-4e7a-9ea1-34d75ed1fec6-utilities\") pod \"redhat-operators-6gx67\" (UID: \"ae733bf5-694c-4e7a-9ea1-34d75ed1fec6\") " pod="openshift-marketplace/redhat-operators-6gx67" Nov 28 10:52:39 crc kubenswrapper[4838]: I1128 10:52:39.855528 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xbggl\" (UniqueName: \"kubernetes.io/projected/ae733bf5-694c-4e7a-9ea1-34d75ed1fec6-kube-api-access-xbggl\") pod \"redhat-operators-6gx67\" (UID: \"ae733bf5-694c-4e7a-9ea1-34d75ed1fec6\") " pod="openshift-marketplace/redhat-operators-6gx67" Nov 28 10:52:39 crc kubenswrapper[4838]: I1128 10:52:39.855547 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ae733bf5-694c-4e7a-9ea1-34d75ed1fec6-catalog-content\") pod \"redhat-operators-6gx67\" (UID: \"ae733bf5-694c-4e7a-9ea1-34d75ed1fec6\") " pod="openshift-marketplace/redhat-operators-6gx67" Nov 28 10:52:39 crc kubenswrapper[4838]: I1128 10:52:39.856067 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ae733bf5-694c-4e7a-9ea1-34d75ed1fec6-catalog-content\") pod \"redhat-operators-6gx67\" (UID: \"ae733bf5-694c-4e7a-9ea1-34d75ed1fec6\") " pod="openshift-marketplace/redhat-operators-6gx67" Nov 28 10:52:39 crc kubenswrapper[4838]: I1128 10:52:39.856457 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ae733bf5-694c-4e7a-9ea1-34d75ed1fec6-utilities\") pod \"redhat-operators-6gx67\" (UID: \"ae733bf5-694c-4e7a-9ea1-34d75ed1fec6\") " pod="openshift-marketplace/redhat-operators-6gx67" Nov 28 10:52:39 crc kubenswrapper[4838]: I1128 10:52:39.877664 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xbggl\" (UniqueName: \"kubernetes.io/projected/ae733bf5-694c-4e7a-9ea1-34d75ed1fec6-kube-api-access-xbggl\") pod \"redhat-operators-6gx67\" (UID: \"ae733bf5-694c-4e7a-9ea1-34d75ed1fec6\") " pod="openshift-marketplace/redhat-operators-6gx67" Nov 28 10:52:39 crc kubenswrapper[4838]: I1128 10:52:39.975969 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-6gx67" Nov 28 10:52:40 crc kubenswrapper[4838]: I1128 10:52:40.087492 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-db-sync-rnkm7" event={"ID":"05628975-e8c2-42db-b5a0-dc9536eb3e75","Type":"ContainerStarted","Data":"ba9491d4066fe9a83cc6e20b2fe20c40497d9126f96986013abddd70ab38a5d9"} Nov 28 10:52:40 crc kubenswrapper[4838]: I1128 10:52:40.111181 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/manila-db-sync-rnkm7" podStartSLOduration=2.296527759 podStartE2EDuration="30.11116479s" podCreationTimestamp="2025-11-28 10:52:10 +0000 UTC" firstStartedPulling="2025-11-28 10:52:11.447784455 +0000 UTC m=+3303.146758625" lastFinishedPulling="2025-11-28 10:52:39.262421476 +0000 UTC m=+3330.961395656" observedRunningTime="2025-11-28 10:52:40.104625422 +0000 UTC m=+3331.803599592" watchObservedRunningTime="2025-11-28 10:52:40.11116479 +0000 UTC m=+3331.810138950" Nov 28 10:52:40 crc kubenswrapper[4838]: I1128 10:52:40.478658 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-6gx67"] Nov 28 10:52:41 crc kubenswrapper[4838]: I1128 10:52:41.103916 4838 generic.go:334] "Generic (PLEG): container finished" podID="ae733bf5-694c-4e7a-9ea1-34d75ed1fec6" containerID="749cac8e221bff3977c38a3e35012fa1b8ebda61e0c10cda4e15c37773748334" exitCode=0 Nov 28 10:52:41 crc kubenswrapper[4838]: I1128 10:52:41.104174 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6gx67" event={"ID":"ae733bf5-694c-4e7a-9ea1-34d75ed1fec6","Type":"ContainerDied","Data":"749cac8e221bff3977c38a3e35012fa1b8ebda61e0c10cda4e15c37773748334"} Nov 28 10:52:41 crc kubenswrapper[4838]: I1128 10:52:41.104201 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6gx67" event={"ID":"ae733bf5-694c-4e7a-9ea1-34d75ed1fec6","Type":"ContainerStarted","Data":"fcbe26671a44097643303f999902cc35535e541c883dcc876aa894b68f4c6d73"} Nov 28 10:52:42 crc kubenswrapper[4838]: I1128 10:52:42.122939 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6gx67" event={"ID":"ae733bf5-694c-4e7a-9ea1-34d75ed1fec6","Type":"ContainerStarted","Data":"1a6fdc945bb9857a53036ce5ce0b96af3192aa7bc06e48e023b8ae8ed9f02696"} Nov 28 10:52:42 crc kubenswrapper[4838]: I1128 10:52:42.564143 4838 scope.go:117] "RemoveContainer" containerID="3d80b1b6cf83170cb103f16c70c6a8b3db90d429f22463d02230864c009beb4b" Nov 28 10:52:42 crc kubenswrapper[4838]: E1128 10:52:42.564954 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-5dxdd_openshift-machine-config-operator(5c3daa53-8c4e-4e30-aeba-146602dd45cd)\"" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" Nov 28 10:52:43 crc kubenswrapper[4838]: I1128 10:52:43.134285 4838 generic.go:334] "Generic (PLEG): container finished" podID="3ee39750-6a0d-4094-a8a3-46fe0adca89b" containerID="fc769ce7d3ce2a3c24558a35eaf0fb4fe219535d9899d70519dee6cff9aa4b33" exitCode=137 Nov 28 10:52:43 crc kubenswrapper[4838]: I1128 10:52:43.134330 4838 generic.go:334] "Generic (PLEG): container finished" podID="3ee39750-6a0d-4094-a8a3-46fe0adca89b" containerID="2157d9c578fd79a339c867ff1d8dcb621b3e51a485643ac293e5f9a6a61e3c58" exitCode=137 Nov 28 10:52:43 crc kubenswrapper[4838]: I1128 10:52:43.134387 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-7c95bbbd67-4qj7h" event={"ID":"3ee39750-6a0d-4094-a8a3-46fe0adca89b","Type":"ContainerDied","Data":"fc769ce7d3ce2a3c24558a35eaf0fb4fe219535d9899d70519dee6cff9aa4b33"} Nov 28 10:52:43 crc kubenswrapper[4838]: I1128 10:52:43.134420 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-7c95bbbd67-4qj7h" event={"ID":"3ee39750-6a0d-4094-a8a3-46fe0adca89b","Type":"ContainerDied","Data":"2157d9c578fd79a339c867ff1d8dcb621b3e51a485643ac293e5f9a6a61e3c58"} Nov 28 10:52:43 crc kubenswrapper[4838]: I1128 10:52:43.136360 4838 generic.go:334] "Generic (PLEG): container finished" podID="ae733bf5-694c-4e7a-9ea1-34d75ed1fec6" containerID="1a6fdc945bb9857a53036ce5ce0b96af3192aa7bc06e48e023b8ae8ed9f02696" exitCode=0 Nov 28 10:52:43 crc kubenswrapper[4838]: I1128 10:52:43.136386 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6gx67" event={"ID":"ae733bf5-694c-4e7a-9ea1-34d75ed1fec6","Type":"ContainerDied","Data":"1a6fdc945bb9857a53036ce5ce0b96af3192aa7bc06e48e023b8ae8ed9f02696"} Nov 28 10:52:44 crc kubenswrapper[4838]: I1128 10:52:44.147850 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6gx67" event={"ID":"ae733bf5-694c-4e7a-9ea1-34d75ed1fec6","Type":"ContainerStarted","Data":"8c3c1250351f50db84e8a94645530d01aac7074e6b07934600d8b19c10bc3c5f"} Nov 28 10:52:44 crc kubenswrapper[4838]: I1128 10:52:44.185593 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-6gx67" podStartSLOduration=2.39948191 podStartE2EDuration="5.18556309s" podCreationTimestamp="2025-11-28 10:52:39 +0000 UTC" firstStartedPulling="2025-11-28 10:52:41.108480572 +0000 UTC m=+3332.807454742" lastFinishedPulling="2025-11-28 10:52:43.894561752 +0000 UTC m=+3335.593535922" observedRunningTime="2025-11-28 10:52:44.166637915 +0000 UTC m=+3335.865612075" watchObservedRunningTime="2025-11-28 10:52:44.18556309 +0000 UTC m=+3335.884537260" Nov 28 10:52:44 crc kubenswrapper[4838]: I1128 10:52:44.286107 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-7c95bbbd67-4qj7h" Nov 28 10:52:44 crc kubenswrapper[4838]: I1128 10:52:44.462895 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mqg8v\" (UniqueName: \"kubernetes.io/projected/3ee39750-6a0d-4094-a8a3-46fe0adca89b-kube-api-access-mqg8v\") pod \"3ee39750-6a0d-4094-a8a3-46fe0adca89b\" (UID: \"3ee39750-6a0d-4094-a8a3-46fe0adca89b\") " Nov 28 10:52:44 crc kubenswrapper[4838]: I1128 10:52:44.463240 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/3ee39750-6a0d-4094-a8a3-46fe0adca89b-horizon-secret-key\") pod \"3ee39750-6a0d-4094-a8a3-46fe0adca89b\" (UID: \"3ee39750-6a0d-4094-a8a3-46fe0adca89b\") " Nov 28 10:52:44 crc kubenswrapper[4838]: I1128 10:52:44.463446 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/3ee39750-6a0d-4094-a8a3-46fe0adca89b-config-data\") pod \"3ee39750-6a0d-4094-a8a3-46fe0adca89b\" (UID: \"3ee39750-6a0d-4094-a8a3-46fe0adca89b\") " Nov 28 10:52:44 crc kubenswrapper[4838]: I1128 10:52:44.463601 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/3ee39750-6a0d-4094-a8a3-46fe0adca89b-scripts\") pod \"3ee39750-6a0d-4094-a8a3-46fe0adca89b\" (UID: \"3ee39750-6a0d-4094-a8a3-46fe0adca89b\") " Nov 28 10:52:44 crc kubenswrapper[4838]: I1128 10:52:44.463698 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3ee39750-6a0d-4094-a8a3-46fe0adca89b-logs\") pod \"3ee39750-6a0d-4094-a8a3-46fe0adca89b\" (UID: \"3ee39750-6a0d-4094-a8a3-46fe0adca89b\") " Nov 28 10:52:44 crc kubenswrapper[4838]: I1128 10:52:44.464406 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3ee39750-6a0d-4094-a8a3-46fe0adca89b-logs" (OuterVolumeSpecName: "logs") pod "3ee39750-6a0d-4094-a8a3-46fe0adca89b" (UID: "3ee39750-6a0d-4094-a8a3-46fe0adca89b"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 10:52:44 crc kubenswrapper[4838]: I1128 10:52:44.470015 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3ee39750-6a0d-4094-a8a3-46fe0adca89b-kube-api-access-mqg8v" (OuterVolumeSpecName: "kube-api-access-mqg8v") pod "3ee39750-6a0d-4094-a8a3-46fe0adca89b" (UID: "3ee39750-6a0d-4094-a8a3-46fe0adca89b"). InnerVolumeSpecName "kube-api-access-mqg8v". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:52:44 crc kubenswrapper[4838]: I1128 10:52:44.478110 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3ee39750-6a0d-4094-a8a3-46fe0adca89b-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "3ee39750-6a0d-4094-a8a3-46fe0adca89b" (UID: "3ee39750-6a0d-4094-a8a3-46fe0adca89b"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:52:44 crc kubenswrapper[4838]: I1128 10:52:44.489498 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3ee39750-6a0d-4094-a8a3-46fe0adca89b-scripts" (OuterVolumeSpecName: "scripts") pod "3ee39750-6a0d-4094-a8a3-46fe0adca89b" (UID: "3ee39750-6a0d-4094-a8a3-46fe0adca89b"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 10:52:44 crc kubenswrapper[4838]: I1128 10:52:44.506965 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3ee39750-6a0d-4094-a8a3-46fe0adca89b-config-data" (OuterVolumeSpecName: "config-data") pod "3ee39750-6a0d-4094-a8a3-46fe0adca89b" (UID: "3ee39750-6a0d-4094-a8a3-46fe0adca89b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 10:52:44 crc kubenswrapper[4838]: I1128 10:52:44.565933 4838 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/3ee39750-6a0d-4094-a8a3-46fe0adca89b-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 10:52:44 crc kubenswrapper[4838]: I1128 10:52:44.565962 4838 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/3ee39750-6a0d-4094-a8a3-46fe0adca89b-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 10:52:44 crc kubenswrapper[4838]: I1128 10:52:44.565971 4838 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3ee39750-6a0d-4094-a8a3-46fe0adca89b-logs\") on node \"crc\" DevicePath \"\"" Nov 28 10:52:44 crc kubenswrapper[4838]: I1128 10:52:44.565981 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mqg8v\" (UniqueName: \"kubernetes.io/projected/3ee39750-6a0d-4094-a8a3-46fe0adca89b-kube-api-access-mqg8v\") on node \"crc\" DevicePath \"\"" Nov 28 10:52:44 crc kubenswrapper[4838]: I1128 10:52:44.565992 4838 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/3ee39750-6a0d-4094-a8a3-46fe0adca89b-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Nov 28 10:52:44 crc kubenswrapper[4838]: I1128 10:52:44.585093 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/horizon-77d65cd94d-8f62l" Nov 28 10:52:44 crc kubenswrapper[4838]: I1128 10:52:44.586008 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/horizon-565ff4b848-b45gx" Nov 28 10:52:45 crc kubenswrapper[4838]: I1128 10:52:45.160935 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-7c95bbbd67-4qj7h" Nov 28 10:52:45 crc kubenswrapper[4838]: I1128 10:52:45.162848 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-7c95bbbd67-4qj7h" event={"ID":"3ee39750-6a0d-4094-a8a3-46fe0adca89b","Type":"ContainerDied","Data":"6818a6308eb11da60d655a942aa252e940270858f11f8f7d0bd52cdf58b6f580"} Nov 28 10:52:45 crc kubenswrapper[4838]: I1128 10:52:45.162889 4838 scope.go:117] "RemoveContainer" containerID="fc769ce7d3ce2a3c24558a35eaf0fb4fe219535d9899d70519dee6cff9aa4b33" Nov 28 10:52:45 crc kubenswrapper[4838]: I1128 10:52:45.167181 4838 generic.go:334] "Generic (PLEG): container finished" podID="12b8e3d4-b4f7-4179-83d3-a038f09e4682" containerID="08c16acb87cf0772907a56f04ecf22c5d6e58669df25dd13e2ff4ee02b36df62" exitCode=137 Nov 28 10:52:45 crc kubenswrapper[4838]: I1128 10:52:45.167212 4838 generic.go:334] "Generic (PLEG): container finished" podID="12b8e3d4-b4f7-4179-83d3-a038f09e4682" containerID="9e844b64ce84e1d7f0621982ed710538c49fb1124e85b7ac63ba827a96c90e13" exitCode=137 Nov 28 10:52:45 crc kubenswrapper[4838]: I1128 10:52:45.168247 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-6dfdc6b877-4j92h" event={"ID":"12b8e3d4-b4f7-4179-83d3-a038f09e4682","Type":"ContainerDied","Data":"08c16acb87cf0772907a56f04ecf22c5d6e58669df25dd13e2ff4ee02b36df62"} Nov 28 10:52:45 crc kubenswrapper[4838]: I1128 10:52:45.168509 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-6dfdc6b877-4j92h" event={"ID":"12b8e3d4-b4f7-4179-83d3-a038f09e4682","Type":"ContainerDied","Data":"9e844b64ce84e1d7f0621982ed710538c49fb1124e85b7ac63ba827a96c90e13"} Nov 28 10:52:45 crc kubenswrapper[4838]: I1128 10:52:45.208126 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-7c95bbbd67-4qj7h"] Nov 28 10:52:45 crc kubenswrapper[4838]: I1128 10:52:45.221594 4838 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-7c95bbbd67-4qj7h"] Nov 28 10:52:45 crc kubenswrapper[4838]: I1128 10:52:45.274979 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-6dfdc6b877-4j92h" Nov 28 10:52:45 crc kubenswrapper[4838]: I1128 10:52:45.349362 4838 scope.go:117] "RemoveContainer" containerID="2157d9c578fd79a339c867ff1d8dcb621b3e51a485643ac293e5f9a6a61e3c58" Nov 28 10:52:45 crc kubenswrapper[4838]: I1128 10:52:45.381692 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/12b8e3d4-b4f7-4179-83d3-a038f09e4682-scripts\") pod \"12b8e3d4-b4f7-4179-83d3-a038f09e4682\" (UID: \"12b8e3d4-b4f7-4179-83d3-a038f09e4682\") " Nov 28 10:52:45 crc kubenswrapper[4838]: I1128 10:52:45.381870 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/12b8e3d4-b4f7-4179-83d3-a038f09e4682-logs\") pod \"12b8e3d4-b4f7-4179-83d3-a038f09e4682\" (UID: \"12b8e3d4-b4f7-4179-83d3-a038f09e4682\") " Nov 28 10:52:45 crc kubenswrapper[4838]: I1128 10:52:45.381915 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/12b8e3d4-b4f7-4179-83d3-a038f09e4682-config-data\") pod \"12b8e3d4-b4f7-4179-83d3-a038f09e4682\" (UID: \"12b8e3d4-b4f7-4179-83d3-a038f09e4682\") " Nov 28 10:52:45 crc kubenswrapper[4838]: I1128 10:52:45.381991 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/12b8e3d4-b4f7-4179-83d3-a038f09e4682-horizon-secret-key\") pod \"12b8e3d4-b4f7-4179-83d3-a038f09e4682\" (UID: \"12b8e3d4-b4f7-4179-83d3-a038f09e4682\") " Nov 28 10:52:45 crc kubenswrapper[4838]: I1128 10:52:45.382061 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hgvt5\" (UniqueName: \"kubernetes.io/projected/12b8e3d4-b4f7-4179-83d3-a038f09e4682-kube-api-access-hgvt5\") pod \"12b8e3d4-b4f7-4179-83d3-a038f09e4682\" (UID: \"12b8e3d4-b4f7-4179-83d3-a038f09e4682\") " Nov 28 10:52:45 crc kubenswrapper[4838]: I1128 10:52:45.382373 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/12b8e3d4-b4f7-4179-83d3-a038f09e4682-logs" (OuterVolumeSpecName: "logs") pod "12b8e3d4-b4f7-4179-83d3-a038f09e4682" (UID: "12b8e3d4-b4f7-4179-83d3-a038f09e4682"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 10:52:45 crc kubenswrapper[4838]: I1128 10:52:45.382687 4838 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/12b8e3d4-b4f7-4179-83d3-a038f09e4682-logs\") on node \"crc\" DevicePath \"\"" Nov 28 10:52:45 crc kubenswrapper[4838]: I1128 10:52:45.391859 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/12b8e3d4-b4f7-4179-83d3-a038f09e4682-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "12b8e3d4-b4f7-4179-83d3-a038f09e4682" (UID: "12b8e3d4-b4f7-4179-83d3-a038f09e4682"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:52:45 crc kubenswrapper[4838]: I1128 10:52:45.403999 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/12b8e3d4-b4f7-4179-83d3-a038f09e4682-kube-api-access-hgvt5" (OuterVolumeSpecName: "kube-api-access-hgvt5") pod "12b8e3d4-b4f7-4179-83d3-a038f09e4682" (UID: "12b8e3d4-b4f7-4179-83d3-a038f09e4682"). InnerVolumeSpecName "kube-api-access-hgvt5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:52:45 crc kubenswrapper[4838]: I1128 10:52:45.405699 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/12b8e3d4-b4f7-4179-83d3-a038f09e4682-config-data" (OuterVolumeSpecName: "config-data") pod "12b8e3d4-b4f7-4179-83d3-a038f09e4682" (UID: "12b8e3d4-b4f7-4179-83d3-a038f09e4682"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 10:52:45 crc kubenswrapper[4838]: I1128 10:52:45.413782 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/12b8e3d4-b4f7-4179-83d3-a038f09e4682-scripts" (OuterVolumeSpecName: "scripts") pod "12b8e3d4-b4f7-4179-83d3-a038f09e4682" (UID: "12b8e3d4-b4f7-4179-83d3-a038f09e4682"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 10:52:45 crc kubenswrapper[4838]: I1128 10:52:45.484534 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hgvt5\" (UniqueName: \"kubernetes.io/projected/12b8e3d4-b4f7-4179-83d3-a038f09e4682-kube-api-access-hgvt5\") on node \"crc\" DevicePath \"\"" Nov 28 10:52:45 crc kubenswrapper[4838]: I1128 10:52:45.484883 4838 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/12b8e3d4-b4f7-4179-83d3-a038f09e4682-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 10:52:45 crc kubenswrapper[4838]: I1128 10:52:45.484894 4838 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/12b8e3d4-b4f7-4179-83d3-a038f09e4682-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 10:52:45 crc kubenswrapper[4838]: I1128 10:52:45.484905 4838 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/12b8e3d4-b4f7-4179-83d3-a038f09e4682-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Nov 28 10:52:46 crc kubenswrapper[4838]: I1128 10:52:46.114479 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/horizon-77d65cd94d-8f62l" Nov 28 10:52:46 crc kubenswrapper[4838]: I1128 10:52:46.187131 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-6dfdc6b877-4j92h" event={"ID":"12b8e3d4-b4f7-4179-83d3-a038f09e4682","Type":"ContainerDied","Data":"3aa30f439e13087727014b7effabb2e54e4acaf896187b24e0308d362a6a7a58"} Nov 28 10:52:46 crc kubenswrapper[4838]: I1128 10:52:46.187191 4838 scope.go:117] "RemoveContainer" containerID="08c16acb87cf0772907a56f04ecf22c5d6e58669df25dd13e2ff4ee02b36df62" Nov 28 10:52:46 crc kubenswrapper[4838]: I1128 10:52:46.187332 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-6dfdc6b877-4j92h" Nov 28 10:52:46 crc kubenswrapper[4838]: I1128 10:52:46.204046 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-565ff4b848-b45gx"] Nov 28 10:52:46 crc kubenswrapper[4838]: I1128 10:52:46.204801 4838 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-565ff4b848-b45gx" podUID="580cb0e9-6caf-4f93-986e-a0cdd62d8cd2" containerName="horizon-log" containerID="cri-o://07b13478bd4bc4909ab58488a40ca59a947862e788cdd515fce2962940d87199" gracePeriod=30 Nov 28 10:52:46 crc kubenswrapper[4838]: I1128 10:52:46.205519 4838 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-565ff4b848-b45gx" podUID="580cb0e9-6caf-4f93-986e-a0cdd62d8cd2" containerName="horizon" containerID="cri-o://c46d2f5a288bc031deb50b4c4507cac1d9c85391a267a560fda8b5a38f93334f" gracePeriod=30 Nov 28 10:52:46 crc kubenswrapper[4838]: I1128 10:52:46.211831 4838 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-565ff4b848-b45gx" podUID="580cb0e9-6caf-4f93-986e-a0cdd62d8cd2" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.243:8443/dashboard/auth/login/?next=/dashboard/\": EOF" Nov 28 10:52:46 crc kubenswrapper[4838]: I1128 10:52:46.232909 4838 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-565ff4b848-b45gx" podUID="580cb0e9-6caf-4f93-986e-a0cdd62d8cd2" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.243:8443/dashboard/auth/login/?next=/dashboard/\": read tcp 10.217.0.2:51046->10.217.0.243:8443: read: connection reset by peer" Nov 28 10:52:46 crc kubenswrapper[4838]: I1128 10:52:46.252413 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-6dfdc6b877-4j92h"] Nov 28 10:52:46 crc kubenswrapper[4838]: I1128 10:52:46.262384 4838 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-6dfdc6b877-4j92h"] Nov 28 10:52:46 crc kubenswrapper[4838]: I1128 10:52:46.400788 4838 scope.go:117] "RemoveContainer" containerID="9e844b64ce84e1d7f0621982ed710538c49fb1124e85b7ac63ba827a96c90e13" Nov 28 10:52:46 crc kubenswrapper[4838]: I1128 10:52:46.579890 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="12b8e3d4-b4f7-4179-83d3-a038f09e4682" path="/var/lib/kubelet/pods/12b8e3d4-b4f7-4179-83d3-a038f09e4682/volumes" Nov 28 10:52:46 crc kubenswrapper[4838]: I1128 10:52:46.580557 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3ee39750-6a0d-4094-a8a3-46fe0adca89b" path="/var/lib/kubelet/pods/3ee39750-6a0d-4094-a8a3-46fe0adca89b/volumes" Nov 28 10:52:49 crc kubenswrapper[4838]: I1128 10:52:49.976588 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-6gx67" Nov 28 10:52:49 crc kubenswrapper[4838]: I1128 10:52:49.977236 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-6gx67" Nov 28 10:52:51 crc kubenswrapper[4838]: I1128 10:52:51.042558 4838 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-6gx67" podUID="ae733bf5-694c-4e7a-9ea1-34d75ed1fec6" containerName="registry-server" probeResult="failure" output=< Nov 28 10:52:51 crc kubenswrapper[4838]: timeout: failed to connect service ":50051" within 1s Nov 28 10:52:51 crc kubenswrapper[4838]: > Nov 28 10:52:51 crc kubenswrapper[4838]: I1128 10:52:51.278938 4838 generic.go:334] "Generic (PLEG): container finished" podID="580cb0e9-6caf-4f93-986e-a0cdd62d8cd2" containerID="c46d2f5a288bc031deb50b4c4507cac1d9c85391a267a560fda8b5a38f93334f" exitCode=0 Nov 28 10:52:51 crc kubenswrapper[4838]: I1128 10:52:51.279011 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-565ff4b848-b45gx" event={"ID":"580cb0e9-6caf-4f93-986e-a0cdd62d8cd2","Type":"ContainerDied","Data":"c46d2f5a288bc031deb50b4c4507cac1d9c85391a267a560fda8b5a38f93334f"} Nov 28 10:52:52 crc kubenswrapper[4838]: I1128 10:52:52.081401 4838 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-565ff4b848-b45gx" podUID="580cb0e9-6caf-4f93-986e-a0cdd62d8cd2" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.243:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.243:8443: connect: connection refused" Nov 28 10:52:55 crc kubenswrapper[4838]: I1128 10:52:55.329511 4838 generic.go:334] "Generic (PLEG): container finished" podID="05628975-e8c2-42db-b5a0-dc9536eb3e75" containerID="ba9491d4066fe9a83cc6e20b2fe20c40497d9126f96986013abddd70ab38a5d9" exitCode=0 Nov 28 10:52:55 crc kubenswrapper[4838]: I1128 10:52:55.329637 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-db-sync-rnkm7" event={"ID":"05628975-e8c2-42db-b5a0-dc9536eb3e75","Type":"ContainerDied","Data":"ba9491d4066fe9a83cc6e20b2fe20c40497d9126f96986013abddd70ab38a5d9"} Nov 28 10:52:56 crc kubenswrapper[4838]: I1128 10:52:56.563231 4838 scope.go:117] "RemoveContainer" containerID="3d80b1b6cf83170cb103f16c70c6a8b3db90d429f22463d02230864c009beb4b" Nov 28 10:52:56 crc kubenswrapper[4838]: E1128 10:52:56.564131 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-5dxdd_openshift-machine-config-operator(5c3daa53-8c4e-4e30-aeba-146602dd45cd)\"" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" Nov 28 10:52:56 crc kubenswrapper[4838]: I1128 10:52:56.904156 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/manila-db-sync-rnkm7" Nov 28 10:52:56 crc kubenswrapper[4838]: I1128 10:52:56.946893 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"job-config-data\" (UniqueName: \"kubernetes.io/secret/05628975-e8c2-42db-b5a0-dc9536eb3e75-job-config-data\") pod \"05628975-e8c2-42db-b5a0-dc9536eb3e75\" (UID: \"05628975-e8c2-42db-b5a0-dc9536eb3e75\") " Nov 28 10:52:56 crc kubenswrapper[4838]: I1128 10:52:56.947975 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/05628975-e8c2-42db-b5a0-dc9536eb3e75-combined-ca-bundle\") pod \"05628975-e8c2-42db-b5a0-dc9536eb3e75\" (UID: \"05628975-e8c2-42db-b5a0-dc9536eb3e75\") " Nov 28 10:52:56 crc kubenswrapper[4838]: I1128 10:52:56.948301 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/05628975-e8c2-42db-b5a0-dc9536eb3e75-config-data\") pod \"05628975-e8c2-42db-b5a0-dc9536eb3e75\" (UID: \"05628975-e8c2-42db-b5a0-dc9536eb3e75\") " Nov 28 10:52:56 crc kubenswrapper[4838]: I1128 10:52:56.948505 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7v6m7\" (UniqueName: \"kubernetes.io/projected/05628975-e8c2-42db-b5a0-dc9536eb3e75-kube-api-access-7v6m7\") pod \"05628975-e8c2-42db-b5a0-dc9536eb3e75\" (UID: \"05628975-e8c2-42db-b5a0-dc9536eb3e75\") " Nov 28 10:52:56 crc kubenswrapper[4838]: I1128 10:52:56.953677 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/05628975-e8c2-42db-b5a0-dc9536eb3e75-job-config-data" (OuterVolumeSpecName: "job-config-data") pod "05628975-e8c2-42db-b5a0-dc9536eb3e75" (UID: "05628975-e8c2-42db-b5a0-dc9536eb3e75"). InnerVolumeSpecName "job-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:52:56 crc kubenswrapper[4838]: I1128 10:52:56.965897 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/05628975-e8c2-42db-b5a0-dc9536eb3e75-kube-api-access-7v6m7" (OuterVolumeSpecName: "kube-api-access-7v6m7") pod "05628975-e8c2-42db-b5a0-dc9536eb3e75" (UID: "05628975-e8c2-42db-b5a0-dc9536eb3e75"). InnerVolumeSpecName "kube-api-access-7v6m7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:52:56 crc kubenswrapper[4838]: I1128 10:52:56.967745 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/05628975-e8c2-42db-b5a0-dc9536eb3e75-config-data" (OuterVolumeSpecName: "config-data") pod "05628975-e8c2-42db-b5a0-dc9536eb3e75" (UID: "05628975-e8c2-42db-b5a0-dc9536eb3e75"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:52:56 crc kubenswrapper[4838]: I1128 10:52:56.994211 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/05628975-e8c2-42db-b5a0-dc9536eb3e75-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "05628975-e8c2-42db-b5a0-dc9536eb3e75" (UID: "05628975-e8c2-42db-b5a0-dc9536eb3e75"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:52:57 crc kubenswrapper[4838]: I1128 10:52:57.051635 4838 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/05628975-e8c2-42db-b5a0-dc9536eb3e75-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 10:52:57 crc kubenswrapper[4838]: I1128 10:52:57.051671 4838 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/05628975-e8c2-42db-b5a0-dc9536eb3e75-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 10:52:57 crc kubenswrapper[4838]: I1128 10:52:57.051680 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7v6m7\" (UniqueName: \"kubernetes.io/projected/05628975-e8c2-42db-b5a0-dc9536eb3e75-kube-api-access-7v6m7\") on node \"crc\" DevicePath \"\"" Nov 28 10:52:57 crc kubenswrapper[4838]: I1128 10:52:57.051691 4838 reconciler_common.go:293] "Volume detached for volume \"job-config-data\" (UniqueName: \"kubernetes.io/secret/05628975-e8c2-42db-b5a0-dc9536eb3e75-job-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 10:52:57 crc kubenswrapper[4838]: I1128 10:52:57.356978 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-db-sync-rnkm7" event={"ID":"05628975-e8c2-42db-b5a0-dc9536eb3e75","Type":"ContainerDied","Data":"5cb37bd9b1b6f02c2e02463ba113d246acf05fbe28044278b78b80cbe11e8683"} Nov 28 10:52:57 crc kubenswrapper[4838]: I1128 10:52:57.357034 4838 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5cb37bd9b1b6f02c2e02463ba113d246acf05fbe28044278b78b80cbe11e8683" Nov 28 10:52:57 crc kubenswrapper[4838]: I1128 10:52:57.357088 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/manila-db-sync-rnkm7" Nov 28 10:52:57 crc kubenswrapper[4838]: I1128 10:52:57.735309 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/manila-scheduler-0"] Nov 28 10:52:57 crc kubenswrapper[4838]: E1128 10:52:57.735794 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3ee39750-6a0d-4094-a8a3-46fe0adca89b" containerName="horizon" Nov 28 10:52:57 crc kubenswrapper[4838]: I1128 10:52:57.735813 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="3ee39750-6a0d-4094-a8a3-46fe0adca89b" containerName="horizon" Nov 28 10:52:57 crc kubenswrapper[4838]: E1128 10:52:57.735844 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="05628975-e8c2-42db-b5a0-dc9536eb3e75" containerName="manila-db-sync" Nov 28 10:52:57 crc kubenswrapper[4838]: I1128 10:52:57.735852 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="05628975-e8c2-42db-b5a0-dc9536eb3e75" containerName="manila-db-sync" Nov 28 10:52:57 crc kubenswrapper[4838]: E1128 10:52:57.735863 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3ee39750-6a0d-4094-a8a3-46fe0adca89b" containerName="horizon-log" Nov 28 10:52:57 crc kubenswrapper[4838]: I1128 10:52:57.735871 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="3ee39750-6a0d-4094-a8a3-46fe0adca89b" containerName="horizon-log" Nov 28 10:52:57 crc kubenswrapper[4838]: E1128 10:52:57.735882 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="12b8e3d4-b4f7-4179-83d3-a038f09e4682" containerName="horizon-log" Nov 28 10:52:57 crc kubenswrapper[4838]: I1128 10:52:57.735889 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="12b8e3d4-b4f7-4179-83d3-a038f09e4682" containerName="horizon-log" Nov 28 10:52:57 crc kubenswrapper[4838]: E1128 10:52:57.735908 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="12b8e3d4-b4f7-4179-83d3-a038f09e4682" containerName="horizon" Nov 28 10:52:57 crc kubenswrapper[4838]: I1128 10:52:57.735915 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="12b8e3d4-b4f7-4179-83d3-a038f09e4682" containerName="horizon" Nov 28 10:52:57 crc kubenswrapper[4838]: I1128 10:52:57.736146 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="05628975-e8c2-42db-b5a0-dc9536eb3e75" containerName="manila-db-sync" Nov 28 10:52:57 crc kubenswrapper[4838]: I1128 10:52:57.736162 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="3ee39750-6a0d-4094-a8a3-46fe0adca89b" containerName="horizon-log" Nov 28 10:52:57 crc kubenswrapper[4838]: I1128 10:52:57.736177 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="12b8e3d4-b4f7-4179-83d3-a038f09e4682" containerName="horizon" Nov 28 10:52:57 crc kubenswrapper[4838]: I1128 10:52:57.736186 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="3ee39750-6a0d-4094-a8a3-46fe0adca89b" containerName="horizon" Nov 28 10:52:57 crc kubenswrapper[4838]: I1128 10:52:57.736196 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="12b8e3d4-b4f7-4179-83d3-a038f09e4682" containerName="horizon-log" Nov 28 10:52:57 crc kubenswrapper[4838]: I1128 10:52:57.737679 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-scheduler-0" Nov 28 10:52:57 crc kubenswrapper[4838]: I1128 10:52:57.741855 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"manila-config-data" Nov 28 10:52:57 crc kubenswrapper[4838]: I1128 10:52:57.742131 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"manila-scripts" Nov 28 10:52:57 crc kubenswrapper[4838]: I1128 10:52:57.742272 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"manila-manila-dockercfg-c4j65" Nov 28 10:52:57 crc kubenswrapper[4838]: I1128 10:52:57.743067 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"manila-scheduler-config-data" Nov 28 10:52:57 crc kubenswrapper[4838]: I1128 10:52:57.757614 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-scheduler-0"] Nov 28 10:52:57 crc kubenswrapper[4838]: I1128 10:52:57.780131 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ab6b3078-9a91-4592-b0f5-f279b773646a-config-data-custom\") pod \"manila-scheduler-0\" (UID: \"ab6b3078-9a91-4592-b0f5-f279b773646a\") " pod="openstack/manila-scheduler-0" Nov 28 10:52:57 crc kubenswrapper[4838]: I1128 10:52:57.780280 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ab6b3078-9a91-4592-b0f5-f279b773646a-scripts\") pod \"manila-scheduler-0\" (UID: \"ab6b3078-9a91-4592-b0f5-f279b773646a\") " pod="openstack/manila-scheduler-0" Nov 28 10:52:57 crc kubenswrapper[4838]: I1128 10:52:57.780344 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ab6b3078-9a91-4592-b0f5-f279b773646a-combined-ca-bundle\") pod \"manila-scheduler-0\" (UID: \"ab6b3078-9a91-4592-b0f5-f279b773646a\") " pod="openstack/manila-scheduler-0" Nov 28 10:52:57 crc kubenswrapper[4838]: I1128 10:52:57.780476 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tckk2\" (UniqueName: \"kubernetes.io/projected/ab6b3078-9a91-4592-b0f5-f279b773646a-kube-api-access-tckk2\") pod \"manila-scheduler-0\" (UID: \"ab6b3078-9a91-4592-b0f5-f279b773646a\") " pod="openstack/manila-scheduler-0" Nov 28 10:52:57 crc kubenswrapper[4838]: I1128 10:52:57.780527 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ab6b3078-9a91-4592-b0f5-f279b773646a-config-data\") pod \"manila-scheduler-0\" (UID: \"ab6b3078-9a91-4592-b0f5-f279b773646a\") " pod="openstack/manila-scheduler-0" Nov 28 10:52:57 crc kubenswrapper[4838]: I1128 10:52:57.780647 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/ab6b3078-9a91-4592-b0f5-f279b773646a-etc-machine-id\") pod \"manila-scheduler-0\" (UID: \"ab6b3078-9a91-4592-b0f5-f279b773646a\") " pod="openstack/manila-scheduler-0" Nov 28 10:52:57 crc kubenswrapper[4838]: I1128 10:52:57.811417 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/manila-share-share1-0"] Nov 28 10:52:57 crc kubenswrapper[4838]: I1128 10:52:57.813120 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-share-share1-0" Nov 28 10:52:57 crc kubenswrapper[4838]: I1128 10:52:57.819905 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"manila-share-share1-config-data" Nov 28 10:52:57 crc kubenswrapper[4838]: I1128 10:52:57.828912 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-share-share1-0"] Nov 28 10:52:57 crc kubenswrapper[4838]: I1128 10:52:57.846863 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-76b5fdb995-4hbv2"] Nov 28 10:52:57 crc kubenswrapper[4838]: I1128 10:52:57.856998 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-76b5fdb995-4hbv2" Nov 28 10:52:57 crc kubenswrapper[4838]: I1128 10:52:57.884918 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d645d530-c8e1-48c5-9c93-2bf0ef691514-combined-ca-bundle\") pod \"manila-share-share1-0\" (UID: \"d645d530-c8e1-48c5-9c93-2bf0ef691514\") " pod="openstack/manila-share-share1-0" Nov 28 10:52:57 crc kubenswrapper[4838]: I1128 10:52:57.884974 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ab6b3078-9a91-4592-b0f5-f279b773646a-combined-ca-bundle\") pod \"manila-scheduler-0\" (UID: \"ab6b3078-9a91-4592-b0f5-f279b773646a\") " pod="openstack/manila-scheduler-0" Nov 28 10:52:57 crc kubenswrapper[4838]: I1128 10:52:57.885046 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-thzpj\" (UniqueName: \"kubernetes.io/projected/d645d530-c8e1-48c5-9c93-2bf0ef691514-kube-api-access-thzpj\") pod \"manila-share-share1-0\" (UID: \"d645d530-c8e1-48c5-9c93-2bf0ef691514\") " pod="openstack/manila-share-share1-0" Nov 28 10:52:57 crc kubenswrapper[4838]: I1128 10:52:57.885070 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d645d530-c8e1-48c5-9c93-2bf0ef691514-config-data-custom\") pod \"manila-share-share1-0\" (UID: \"d645d530-c8e1-48c5-9c93-2bf0ef691514\") " pod="openstack/manila-share-share1-0" Nov 28 10:52:57 crc kubenswrapper[4838]: I1128 10:52:57.885087 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tckk2\" (UniqueName: \"kubernetes.io/projected/ab6b3078-9a91-4592-b0f5-f279b773646a-kube-api-access-tckk2\") pod \"manila-scheduler-0\" (UID: \"ab6b3078-9a91-4592-b0f5-f279b773646a\") " pod="openstack/manila-scheduler-0" Nov 28 10:52:57 crc kubenswrapper[4838]: I1128 10:52:57.885117 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ab6b3078-9a91-4592-b0f5-f279b773646a-config-data\") pod \"manila-scheduler-0\" (UID: \"ab6b3078-9a91-4592-b0f5-f279b773646a\") " pod="openstack/manila-scheduler-0" Nov 28 10:52:57 crc kubenswrapper[4838]: I1128 10:52:57.885194 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/ab6b3078-9a91-4592-b0f5-f279b773646a-etc-machine-id\") pod \"manila-scheduler-0\" (UID: \"ab6b3078-9a91-4592-b0f5-f279b773646a\") " pod="openstack/manila-scheduler-0" Nov 28 10:52:57 crc kubenswrapper[4838]: I1128 10:52:57.885214 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d645d530-c8e1-48c5-9c93-2bf0ef691514-config-data\") pod \"manila-share-share1-0\" (UID: \"d645d530-c8e1-48c5-9c93-2bf0ef691514\") " pod="openstack/manila-share-share1-0" Nov 28 10:52:57 crc kubenswrapper[4838]: I1128 10:52:57.885235 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-manila\" (UniqueName: \"kubernetes.io/host-path/d645d530-c8e1-48c5-9c93-2bf0ef691514-var-lib-manila\") pod \"manila-share-share1-0\" (UID: \"d645d530-c8e1-48c5-9c93-2bf0ef691514\") " pod="openstack/manila-share-share1-0" Nov 28 10:52:57 crc kubenswrapper[4838]: I1128 10:52:57.885295 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ab6b3078-9a91-4592-b0f5-f279b773646a-config-data-custom\") pod \"manila-scheduler-0\" (UID: \"ab6b3078-9a91-4592-b0f5-f279b773646a\") " pod="openstack/manila-scheduler-0" Nov 28 10:52:57 crc kubenswrapper[4838]: I1128 10:52:57.885322 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/d645d530-c8e1-48c5-9c93-2bf0ef691514-ceph\") pod \"manila-share-share1-0\" (UID: \"d645d530-c8e1-48c5-9c93-2bf0ef691514\") " pod="openstack/manila-share-share1-0" Nov 28 10:52:57 crc kubenswrapper[4838]: I1128 10:52:57.885353 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/d645d530-c8e1-48c5-9c93-2bf0ef691514-etc-machine-id\") pod \"manila-share-share1-0\" (UID: \"d645d530-c8e1-48c5-9c93-2bf0ef691514\") " pod="openstack/manila-share-share1-0" Nov 28 10:52:57 crc kubenswrapper[4838]: I1128 10:52:57.885383 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d645d530-c8e1-48c5-9c93-2bf0ef691514-scripts\") pod \"manila-share-share1-0\" (UID: \"d645d530-c8e1-48c5-9c93-2bf0ef691514\") " pod="openstack/manila-share-share1-0" Nov 28 10:52:57 crc kubenswrapper[4838]: I1128 10:52:57.885409 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ab6b3078-9a91-4592-b0f5-f279b773646a-scripts\") pod \"manila-scheduler-0\" (UID: \"ab6b3078-9a91-4592-b0f5-f279b773646a\") " pod="openstack/manila-scheduler-0" Nov 28 10:52:57 crc kubenswrapper[4838]: I1128 10:52:57.887231 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/ab6b3078-9a91-4592-b0f5-f279b773646a-etc-machine-id\") pod \"manila-scheduler-0\" (UID: \"ab6b3078-9a91-4592-b0f5-f279b773646a\") " pod="openstack/manila-scheduler-0" Nov 28 10:52:57 crc kubenswrapper[4838]: I1128 10:52:57.883882 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-76b5fdb995-4hbv2"] Nov 28 10:52:57 crc kubenswrapper[4838]: I1128 10:52:57.890076 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ab6b3078-9a91-4592-b0f5-f279b773646a-scripts\") pod \"manila-scheduler-0\" (UID: \"ab6b3078-9a91-4592-b0f5-f279b773646a\") " pod="openstack/manila-scheduler-0" Nov 28 10:52:57 crc kubenswrapper[4838]: I1128 10:52:57.894602 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ab6b3078-9a91-4592-b0f5-f279b773646a-combined-ca-bundle\") pod \"manila-scheduler-0\" (UID: \"ab6b3078-9a91-4592-b0f5-f279b773646a\") " pod="openstack/manila-scheduler-0" Nov 28 10:52:57 crc kubenswrapper[4838]: I1128 10:52:57.895315 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ab6b3078-9a91-4592-b0f5-f279b773646a-config-data-custom\") pod \"manila-scheduler-0\" (UID: \"ab6b3078-9a91-4592-b0f5-f279b773646a\") " pod="openstack/manila-scheduler-0" Nov 28 10:52:57 crc kubenswrapper[4838]: I1128 10:52:57.895431 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ab6b3078-9a91-4592-b0f5-f279b773646a-config-data\") pod \"manila-scheduler-0\" (UID: \"ab6b3078-9a91-4592-b0f5-f279b773646a\") " pod="openstack/manila-scheduler-0" Nov 28 10:52:57 crc kubenswrapper[4838]: I1128 10:52:57.917191 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tckk2\" (UniqueName: \"kubernetes.io/projected/ab6b3078-9a91-4592-b0f5-f279b773646a-kube-api-access-tckk2\") pod \"manila-scheduler-0\" (UID: \"ab6b3078-9a91-4592-b0f5-f279b773646a\") " pod="openstack/manila-scheduler-0" Nov 28 10:52:57 crc kubenswrapper[4838]: I1128 10:52:57.986709 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d645d530-c8e1-48c5-9c93-2bf0ef691514-config-data\") pod \"manila-share-share1-0\" (UID: \"d645d530-c8e1-48c5-9c93-2bf0ef691514\") " pod="openstack/manila-share-share1-0" Nov 28 10:52:57 crc kubenswrapper[4838]: I1128 10:52:57.987231 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-manila\" (UniqueName: \"kubernetes.io/host-path/d645d530-c8e1-48c5-9c93-2bf0ef691514-var-lib-manila\") pod \"manila-share-share1-0\" (UID: \"d645d530-c8e1-48c5-9c93-2bf0ef691514\") " pod="openstack/manila-share-share1-0" Nov 28 10:52:57 crc kubenswrapper[4838]: I1128 10:52:57.987264 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/05ad03f2-cfab-4825-9740-5c405550e376-config\") pod \"dnsmasq-dns-76b5fdb995-4hbv2\" (UID: \"05ad03f2-cfab-4825-9740-5c405550e376\") " pod="openstack/dnsmasq-dns-76b5fdb995-4hbv2" Nov 28 10:52:57 crc kubenswrapper[4838]: I1128 10:52:57.987303 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/05ad03f2-cfab-4825-9740-5c405550e376-dns-svc\") pod \"dnsmasq-dns-76b5fdb995-4hbv2\" (UID: \"05ad03f2-cfab-4825-9740-5c405550e376\") " pod="openstack/dnsmasq-dns-76b5fdb995-4hbv2" Nov 28 10:52:57 crc kubenswrapper[4838]: I1128 10:52:57.987473 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-manila\" (UniqueName: \"kubernetes.io/host-path/d645d530-c8e1-48c5-9c93-2bf0ef691514-var-lib-manila\") pod \"manila-share-share1-0\" (UID: \"d645d530-c8e1-48c5-9c93-2bf0ef691514\") " pod="openstack/manila-share-share1-0" Nov 28 10:52:57 crc kubenswrapper[4838]: I1128 10:52:57.987329 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/05ad03f2-cfab-4825-9740-5c405550e376-ovsdbserver-nb\") pod \"dnsmasq-dns-76b5fdb995-4hbv2\" (UID: \"05ad03f2-cfab-4825-9740-5c405550e376\") " pod="openstack/dnsmasq-dns-76b5fdb995-4hbv2" Nov 28 10:52:57 crc kubenswrapper[4838]: I1128 10:52:57.987571 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/d645d530-c8e1-48c5-9c93-2bf0ef691514-ceph\") pod \"manila-share-share1-0\" (UID: \"d645d530-c8e1-48c5-9c93-2bf0ef691514\") " pod="openstack/manila-share-share1-0" Nov 28 10:52:57 crc kubenswrapper[4838]: I1128 10:52:57.987598 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/d645d530-c8e1-48c5-9c93-2bf0ef691514-etc-machine-id\") pod \"manila-share-share1-0\" (UID: \"d645d530-c8e1-48c5-9c93-2bf0ef691514\") " pod="openstack/manila-share-share1-0" Nov 28 10:52:57 crc kubenswrapper[4838]: I1128 10:52:57.987629 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d645d530-c8e1-48c5-9c93-2bf0ef691514-scripts\") pod \"manila-share-share1-0\" (UID: \"d645d530-c8e1-48c5-9c93-2bf0ef691514\") " pod="openstack/manila-share-share1-0" Nov 28 10:52:57 crc kubenswrapper[4838]: I1128 10:52:57.987648 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/05ad03f2-cfab-4825-9740-5c405550e376-ovsdbserver-sb\") pod \"dnsmasq-dns-76b5fdb995-4hbv2\" (UID: \"05ad03f2-cfab-4825-9740-5c405550e376\") " pod="openstack/dnsmasq-dns-76b5fdb995-4hbv2" Nov 28 10:52:57 crc kubenswrapper[4838]: I1128 10:52:57.987670 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-89lm8\" (UniqueName: \"kubernetes.io/projected/05ad03f2-cfab-4825-9740-5c405550e376-kube-api-access-89lm8\") pod \"dnsmasq-dns-76b5fdb995-4hbv2\" (UID: \"05ad03f2-cfab-4825-9740-5c405550e376\") " pod="openstack/dnsmasq-dns-76b5fdb995-4hbv2" Nov 28 10:52:57 crc kubenswrapper[4838]: I1128 10:52:57.987696 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d645d530-c8e1-48c5-9c93-2bf0ef691514-combined-ca-bundle\") pod \"manila-share-share1-0\" (UID: \"d645d530-c8e1-48c5-9c93-2bf0ef691514\") " pod="openstack/manila-share-share1-0" Nov 28 10:52:57 crc kubenswrapper[4838]: I1128 10:52:57.987780 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-thzpj\" (UniqueName: \"kubernetes.io/projected/d645d530-c8e1-48c5-9c93-2bf0ef691514-kube-api-access-thzpj\") pod \"manila-share-share1-0\" (UID: \"d645d530-c8e1-48c5-9c93-2bf0ef691514\") " pod="openstack/manila-share-share1-0" Nov 28 10:52:57 crc kubenswrapper[4838]: I1128 10:52:57.987822 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d645d530-c8e1-48c5-9c93-2bf0ef691514-config-data-custom\") pod \"manila-share-share1-0\" (UID: \"d645d530-c8e1-48c5-9c93-2bf0ef691514\") " pod="openstack/manila-share-share1-0" Nov 28 10:52:57 crc kubenswrapper[4838]: I1128 10:52:57.988074 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/d645d530-c8e1-48c5-9c93-2bf0ef691514-etc-machine-id\") pod \"manila-share-share1-0\" (UID: \"d645d530-c8e1-48c5-9c93-2bf0ef691514\") " pod="openstack/manila-share-share1-0" Nov 28 10:52:57 crc kubenswrapper[4838]: I1128 10:52:57.988383 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/05ad03f2-cfab-4825-9740-5c405550e376-openstack-edpm-ipam\") pod \"dnsmasq-dns-76b5fdb995-4hbv2\" (UID: \"05ad03f2-cfab-4825-9740-5c405550e376\") " pod="openstack/dnsmasq-dns-76b5fdb995-4hbv2" Nov 28 10:52:57 crc kubenswrapper[4838]: I1128 10:52:57.991363 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d645d530-c8e1-48c5-9c93-2bf0ef691514-config-data-custom\") pod \"manila-share-share1-0\" (UID: \"d645d530-c8e1-48c5-9c93-2bf0ef691514\") " pod="openstack/manila-share-share1-0" Nov 28 10:52:57 crc kubenswrapper[4838]: I1128 10:52:57.991647 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d645d530-c8e1-48c5-9c93-2bf0ef691514-config-data\") pod \"manila-share-share1-0\" (UID: \"d645d530-c8e1-48c5-9c93-2bf0ef691514\") " pod="openstack/manila-share-share1-0" Nov 28 10:52:57 crc kubenswrapper[4838]: I1128 10:52:57.992015 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d645d530-c8e1-48c5-9c93-2bf0ef691514-combined-ca-bundle\") pod \"manila-share-share1-0\" (UID: \"d645d530-c8e1-48c5-9c93-2bf0ef691514\") " pod="openstack/manila-share-share1-0" Nov 28 10:52:57 crc kubenswrapper[4838]: I1128 10:52:57.992822 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d645d530-c8e1-48c5-9c93-2bf0ef691514-scripts\") pod \"manila-share-share1-0\" (UID: \"d645d530-c8e1-48c5-9c93-2bf0ef691514\") " pod="openstack/manila-share-share1-0" Nov 28 10:52:58 crc kubenswrapper[4838]: I1128 10:52:58.002170 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/d645d530-c8e1-48c5-9c93-2bf0ef691514-ceph\") pod \"manila-share-share1-0\" (UID: \"d645d530-c8e1-48c5-9c93-2bf0ef691514\") " pod="openstack/manila-share-share1-0" Nov 28 10:52:58 crc kubenswrapper[4838]: I1128 10:52:58.012911 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-thzpj\" (UniqueName: \"kubernetes.io/projected/d645d530-c8e1-48c5-9c93-2bf0ef691514-kube-api-access-thzpj\") pod \"manila-share-share1-0\" (UID: \"d645d530-c8e1-48c5-9c93-2bf0ef691514\") " pod="openstack/manila-share-share1-0" Nov 28 10:52:58 crc kubenswrapper[4838]: I1128 10:52:58.071551 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-scheduler-0" Nov 28 10:52:58 crc kubenswrapper[4838]: I1128 10:52:58.090069 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/05ad03f2-cfab-4825-9740-5c405550e376-config\") pod \"dnsmasq-dns-76b5fdb995-4hbv2\" (UID: \"05ad03f2-cfab-4825-9740-5c405550e376\") " pod="openstack/dnsmasq-dns-76b5fdb995-4hbv2" Nov 28 10:52:58 crc kubenswrapper[4838]: I1128 10:52:58.090129 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/05ad03f2-cfab-4825-9740-5c405550e376-dns-svc\") pod \"dnsmasq-dns-76b5fdb995-4hbv2\" (UID: \"05ad03f2-cfab-4825-9740-5c405550e376\") " pod="openstack/dnsmasq-dns-76b5fdb995-4hbv2" Nov 28 10:52:58 crc kubenswrapper[4838]: I1128 10:52:58.090153 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/05ad03f2-cfab-4825-9740-5c405550e376-ovsdbserver-nb\") pod \"dnsmasq-dns-76b5fdb995-4hbv2\" (UID: \"05ad03f2-cfab-4825-9740-5c405550e376\") " pod="openstack/dnsmasq-dns-76b5fdb995-4hbv2" Nov 28 10:52:58 crc kubenswrapper[4838]: I1128 10:52:58.090191 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/05ad03f2-cfab-4825-9740-5c405550e376-ovsdbserver-sb\") pod \"dnsmasq-dns-76b5fdb995-4hbv2\" (UID: \"05ad03f2-cfab-4825-9740-5c405550e376\") " pod="openstack/dnsmasq-dns-76b5fdb995-4hbv2" Nov 28 10:52:58 crc kubenswrapper[4838]: I1128 10:52:58.090209 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-89lm8\" (UniqueName: \"kubernetes.io/projected/05ad03f2-cfab-4825-9740-5c405550e376-kube-api-access-89lm8\") pod \"dnsmasq-dns-76b5fdb995-4hbv2\" (UID: \"05ad03f2-cfab-4825-9740-5c405550e376\") " pod="openstack/dnsmasq-dns-76b5fdb995-4hbv2" Nov 28 10:52:58 crc kubenswrapper[4838]: I1128 10:52:58.090282 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/05ad03f2-cfab-4825-9740-5c405550e376-openstack-edpm-ipam\") pod \"dnsmasq-dns-76b5fdb995-4hbv2\" (UID: \"05ad03f2-cfab-4825-9740-5c405550e376\") " pod="openstack/dnsmasq-dns-76b5fdb995-4hbv2" Nov 28 10:52:58 crc kubenswrapper[4838]: I1128 10:52:58.091091 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/05ad03f2-cfab-4825-9740-5c405550e376-openstack-edpm-ipam\") pod \"dnsmasq-dns-76b5fdb995-4hbv2\" (UID: \"05ad03f2-cfab-4825-9740-5c405550e376\") " pod="openstack/dnsmasq-dns-76b5fdb995-4hbv2" Nov 28 10:52:58 crc kubenswrapper[4838]: I1128 10:52:58.091581 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/05ad03f2-cfab-4825-9740-5c405550e376-config\") pod \"dnsmasq-dns-76b5fdb995-4hbv2\" (UID: \"05ad03f2-cfab-4825-9740-5c405550e376\") " pod="openstack/dnsmasq-dns-76b5fdb995-4hbv2" Nov 28 10:52:58 crc kubenswrapper[4838]: I1128 10:52:58.092066 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/05ad03f2-cfab-4825-9740-5c405550e376-dns-svc\") pod \"dnsmasq-dns-76b5fdb995-4hbv2\" (UID: \"05ad03f2-cfab-4825-9740-5c405550e376\") " pod="openstack/dnsmasq-dns-76b5fdb995-4hbv2" Nov 28 10:52:58 crc kubenswrapper[4838]: I1128 10:52:58.092560 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/05ad03f2-cfab-4825-9740-5c405550e376-ovsdbserver-nb\") pod \"dnsmasq-dns-76b5fdb995-4hbv2\" (UID: \"05ad03f2-cfab-4825-9740-5c405550e376\") " pod="openstack/dnsmasq-dns-76b5fdb995-4hbv2" Nov 28 10:52:58 crc kubenswrapper[4838]: I1128 10:52:58.093074 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/05ad03f2-cfab-4825-9740-5c405550e376-ovsdbserver-sb\") pod \"dnsmasq-dns-76b5fdb995-4hbv2\" (UID: \"05ad03f2-cfab-4825-9740-5c405550e376\") " pod="openstack/dnsmasq-dns-76b5fdb995-4hbv2" Nov 28 10:52:58 crc kubenswrapper[4838]: I1128 10:52:58.096076 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/manila-api-0"] Nov 28 10:52:58 crc kubenswrapper[4838]: I1128 10:52:58.097746 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-api-0" Nov 28 10:52:58 crc kubenswrapper[4838]: I1128 10:52:58.103030 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"manila-api-config-data" Nov 28 10:52:58 crc kubenswrapper[4838]: I1128 10:52:58.103171 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-api-0"] Nov 28 10:52:58 crc kubenswrapper[4838]: I1128 10:52:58.118763 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-89lm8\" (UniqueName: \"kubernetes.io/projected/05ad03f2-cfab-4825-9740-5c405550e376-kube-api-access-89lm8\") pod \"dnsmasq-dns-76b5fdb995-4hbv2\" (UID: \"05ad03f2-cfab-4825-9740-5c405550e376\") " pod="openstack/dnsmasq-dns-76b5fdb995-4hbv2" Nov 28 10:52:58 crc kubenswrapper[4838]: I1128 10:52:58.142616 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-share-share1-0" Nov 28 10:52:58 crc kubenswrapper[4838]: I1128 10:52:58.177452 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-76b5fdb995-4hbv2" Nov 28 10:52:58 crc kubenswrapper[4838]: I1128 10:52:58.192248 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/37712620-36f3-4290-a269-f8e62bf725a6-logs\") pod \"manila-api-0\" (UID: \"37712620-36f3-4290-a269-f8e62bf725a6\") " pod="openstack/manila-api-0" Nov 28 10:52:58 crc kubenswrapper[4838]: I1128 10:52:58.192290 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/37712620-36f3-4290-a269-f8e62bf725a6-config-data-custom\") pod \"manila-api-0\" (UID: \"37712620-36f3-4290-a269-f8e62bf725a6\") " pod="openstack/manila-api-0" Nov 28 10:52:58 crc kubenswrapper[4838]: I1128 10:52:58.192328 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/37712620-36f3-4290-a269-f8e62bf725a6-config-data\") pod \"manila-api-0\" (UID: \"37712620-36f3-4290-a269-f8e62bf725a6\") " pod="openstack/manila-api-0" Nov 28 10:52:58 crc kubenswrapper[4838]: I1128 10:52:58.192351 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/37712620-36f3-4290-a269-f8e62bf725a6-scripts\") pod \"manila-api-0\" (UID: \"37712620-36f3-4290-a269-f8e62bf725a6\") " pod="openstack/manila-api-0" Nov 28 10:52:58 crc kubenswrapper[4838]: I1128 10:52:58.192367 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/37712620-36f3-4290-a269-f8e62bf725a6-etc-machine-id\") pod \"manila-api-0\" (UID: \"37712620-36f3-4290-a269-f8e62bf725a6\") " pod="openstack/manila-api-0" Nov 28 10:52:58 crc kubenswrapper[4838]: I1128 10:52:58.192402 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/37712620-36f3-4290-a269-f8e62bf725a6-combined-ca-bundle\") pod \"manila-api-0\" (UID: \"37712620-36f3-4290-a269-f8e62bf725a6\") " pod="openstack/manila-api-0" Nov 28 10:52:58 crc kubenswrapper[4838]: I1128 10:52:58.192434 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jr5j9\" (UniqueName: \"kubernetes.io/projected/37712620-36f3-4290-a269-f8e62bf725a6-kube-api-access-jr5j9\") pod \"manila-api-0\" (UID: \"37712620-36f3-4290-a269-f8e62bf725a6\") " pod="openstack/manila-api-0" Nov 28 10:52:58 crc kubenswrapper[4838]: I1128 10:52:58.293915 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/37712620-36f3-4290-a269-f8e62bf725a6-logs\") pod \"manila-api-0\" (UID: \"37712620-36f3-4290-a269-f8e62bf725a6\") " pod="openstack/manila-api-0" Nov 28 10:52:58 crc kubenswrapper[4838]: I1128 10:52:58.294146 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/37712620-36f3-4290-a269-f8e62bf725a6-config-data-custom\") pod \"manila-api-0\" (UID: \"37712620-36f3-4290-a269-f8e62bf725a6\") " pod="openstack/manila-api-0" Nov 28 10:52:58 crc kubenswrapper[4838]: I1128 10:52:58.294178 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/37712620-36f3-4290-a269-f8e62bf725a6-config-data\") pod \"manila-api-0\" (UID: \"37712620-36f3-4290-a269-f8e62bf725a6\") " pod="openstack/manila-api-0" Nov 28 10:52:58 crc kubenswrapper[4838]: I1128 10:52:58.294204 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/37712620-36f3-4290-a269-f8e62bf725a6-scripts\") pod \"manila-api-0\" (UID: \"37712620-36f3-4290-a269-f8e62bf725a6\") " pod="openstack/manila-api-0" Nov 28 10:52:58 crc kubenswrapper[4838]: I1128 10:52:58.294220 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/37712620-36f3-4290-a269-f8e62bf725a6-etc-machine-id\") pod \"manila-api-0\" (UID: \"37712620-36f3-4290-a269-f8e62bf725a6\") " pod="openstack/manila-api-0" Nov 28 10:52:58 crc kubenswrapper[4838]: I1128 10:52:58.294255 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/37712620-36f3-4290-a269-f8e62bf725a6-combined-ca-bundle\") pod \"manila-api-0\" (UID: \"37712620-36f3-4290-a269-f8e62bf725a6\") " pod="openstack/manila-api-0" Nov 28 10:52:58 crc kubenswrapper[4838]: I1128 10:52:58.294277 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jr5j9\" (UniqueName: \"kubernetes.io/projected/37712620-36f3-4290-a269-f8e62bf725a6-kube-api-access-jr5j9\") pod \"manila-api-0\" (UID: \"37712620-36f3-4290-a269-f8e62bf725a6\") " pod="openstack/manila-api-0" Nov 28 10:52:58 crc kubenswrapper[4838]: I1128 10:52:58.294354 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/37712620-36f3-4290-a269-f8e62bf725a6-logs\") pod \"manila-api-0\" (UID: \"37712620-36f3-4290-a269-f8e62bf725a6\") " pod="openstack/manila-api-0" Nov 28 10:52:58 crc kubenswrapper[4838]: I1128 10:52:58.294605 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/37712620-36f3-4290-a269-f8e62bf725a6-etc-machine-id\") pod \"manila-api-0\" (UID: \"37712620-36f3-4290-a269-f8e62bf725a6\") " pod="openstack/manila-api-0" Nov 28 10:52:58 crc kubenswrapper[4838]: I1128 10:52:58.300308 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/37712620-36f3-4290-a269-f8e62bf725a6-scripts\") pod \"manila-api-0\" (UID: \"37712620-36f3-4290-a269-f8e62bf725a6\") " pod="openstack/manila-api-0" Nov 28 10:52:58 crc kubenswrapper[4838]: I1128 10:52:58.303827 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/37712620-36f3-4290-a269-f8e62bf725a6-combined-ca-bundle\") pod \"manila-api-0\" (UID: \"37712620-36f3-4290-a269-f8e62bf725a6\") " pod="openstack/manila-api-0" Nov 28 10:52:58 crc kubenswrapper[4838]: I1128 10:52:58.305409 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/37712620-36f3-4290-a269-f8e62bf725a6-config-data\") pod \"manila-api-0\" (UID: \"37712620-36f3-4290-a269-f8e62bf725a6\") " pod="openstack/manila-api-0" Nov 28 10:52:58 crc kubenswrapper[4838]: I1128 10:52:58.306198 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/37712620-36f3-4290-a269-f8e62bf725a6-config-data-custom\") pod \"manila-api-0\" (UID: \"37712620-36f3-4290-a269-f8e62bf725a6\") " pod="openstack/manila-api-0" Nov 28 10:52:58 crc kubenswrapper[4838]: I1128 10:52:58.324931 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jr5j9\" (UniqueName: \"kubernetes.io/projected/37712620-36f3-4290-a269-f8e62bf725a6-kube-api-access-jr5j9\") pod \"manila-api-0\" (UID: \"37712620-36f3-4290-a269-f8e62bf725a6\") " pod="openstack/manila-api-0" Nov 28 10:52:58 crc kubenswrapper[4838]: I1128 10:52:58.461858 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-api-0" Nov 28 10:52:58 crc kubenswrapper[4838]: I1128 10:52:58.641887 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-scheduler-0"] Nov 28 10:52:58 crc kubenswrapper[4838]: I1128 10:52:58.708384 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-share-share1-0"] Nov 28 10:52:58 crc kubenswrapper[4838]: I1128 10:52:58.739364 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-76b5fdb995-4hbv2"] Nov 28 10:52:58 crc kubenswrapper[4838]: W1128 10:52:58.750945 4838 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod05ad03f2_cfab_4825_9740_5c405550e376.slice/crio-e6da71bf4243aeec7e52f08a259c1dd90cae295b45917ebdabc956a903ec432d WatchSource:0}: Error finding container e6da71bf4243aeec7e52f08a259c1dd90cae295b45917ebdabc956a903ec432d: Status 404 returned error can't find the container with id e6da71bf4243aeec7e52f08a259c1dd90cae295b45917ebdabc956a903ec432d Nov 28 10:52:59 crc kubenswrapper[4838]: I1128 10:52:59.051288 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-api-0"] Nov 28 10:52:59 crc kubenswrapper[4838]: I1128 10:52:59.440204 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-scheduler-0" event={"ID":"ab6b3078-9a91-4592-b0f5-f279b773646a","Type":"ContainerStarted","Data":"1e1d00299522f53136b6ea5f9f4161406d07342f0f96e5c3c2177a49f2871d30"} Nov 28 10:52:59 crc kubenswrapper[4838]: I1128 10:52:59.451860 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-share-share1-0" event={"ID":"d645d530-c8e1-48c5-9c93-2bf0ef691514","Type":"ContainerStarted","Data":"6d4e0f5b068987f1f9927e46485c9e21712f9ac86d5d26022915224512abd53a"} Nov 28 10:52:59 crc kubenswrapper[4838]: I1128 10:52:59.458010 4838 generic.go:334] "Generic (PLEG): container finished" podID="05ad03f2-cfab-4825-9740-5c405550e376" containerID="c33610425ea1dbc7c1a75aead25b166164b12ebcdfb2021592571ef016866972" exitCode=0 Nov 28 10:52:59 crc kubenswrapper[4838]: I1128 10:52:59.458102 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-76b5fdb995-4hbv2" event={"ID":"05ad03f2-cfab-4825-9740-5c405550e376","Type":"ContainerDied","Data":"c33610425ea1dbc7c1a75aead25b166164b12ebcdfb2021592571ef016866972"} Nov 28 10:52:59 crc kubenswrapper[4838]: I1128 10:52:59.458135 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-76b5fdb995-4hbv2" event={"ID":"05ad03f2-cfab-4825-9740-5c405550e376","Type":"ContainerStarted","Data":"e6da71bf4243aeec7e52f08a259c1dd90cae295b45917ebdabc956a903ec432d"} Nov 28 10:52:59 crc kubenswrapper[4838]: I1128 10:52:59.461954 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-api-0" event={"ID":"37712620-36f3-4290-a269-f8e62bf725a6","Type":"ContainerStarted","Data":"c88dac80fb81a4881e6266e89bf193f4ab07a254662d156c8745b46093198799"} Nov 28 10:53:00 crc kubenswrapper[4838]: I1128 10:53:00.047951 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-6gx67" Nov 28 10:53:00 crc kubenswrapper[4838]: I1128 10:53:00.118080 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-6gx67" Nov 28 10:53:00 crc kubenswrapper[4838]: I1128 10:53:00.283469 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-6gx67"] Nov 28 10:53:00 crc kubenswrapper[4838]: I1128 10:53:00.373576 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/manila-api-0"] Nov 28 10:53:00 crc kubenswrapper[4838]: I1128 10:53:00.504852 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-76b5fdb995-4hbv2" event={"ID":"05ad03f2-cfab-4825-9740-5c405550e376","Type":"ContainerStarted","Data":"2a2d1ba77188c9f2ba4e7c3d1c8031c9823b4b5e8e9d50117ffd60aec8480819"} Nov 28 10:53:00 crc kubenswrapper[4838]: I1128 10:53:00.505316 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-76b5fdb995-4hbv2" Nov 28 10:53:00 crc kubenswrapper[4838]: I1128 10:53:00.521900 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-api-0" event={"ID":"37712620-36f3-4290-a269-f8e62bf725a6","Type":"ContainerStarted","Data":"27c44033cec4ac1228636245ea7944f909f29babfa71b01cdef79c0dc4a8895e"} Nov 28 10:53:00 crc kubenswrapper[4838]: I1128 10:53:00.521945 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-api-0" event={"ID":"37712620-36f3-4290-a269-f8e62bf725a6","Type":"ContainerStarted","Data":"695e24c651cc898126ae95b79f910af7c31d7039ea375494eca0496455f9e061"} Nov 28 10:53:00 crc kubenswrapper[4838]: I1128 10:53:00.521962 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/manila-api-0" Nov 28 10:53:00 crc kubenswrapper[4838]: I1128 10:53:00.531703 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-scheduler-0" event={"ID":"ab6b3078-9a91-4592-b0f5-f279b773646a","Type":"ContainerStarted","Data":"02660319b311fd3fd97999dced12d9e4d3638b623efded2148a24a9dbfb649dc"} Nov 28 10:53:00 crc kubenswrapper[4838]: I1128 10:53:00.531756 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-scheduler-0" event={"ID":"ab6b3078-9a91-4592-b0f5-f279b773646a","Type":"ContainerStarted","Data":"3a1d7adf84a359bf3b0eff48483654dd2a6526292140a1755c8155d2ed4c7c6a"} Nov 28 10:53:00 crc kubenswrapper[4838]: I1128 10:53:00.549084 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-76b5fdb995-4hbv2" podStartSLOduration=3.549066051 podStartE2EDuration="3.549066051s" podCreationTimestamp="2025-11-28 10:52:57 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 10:53:00.532198123 +0000 UTC m=+3352.231172293" watchObservedRunningTime="2025-11-28 10:53:00.549066051 +0000 UTC m=+3352.248040221" Nov 28 10:53:00 crc kubenswrapper[4838]: I1128 10:53:00.560928 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/manila-scheduler-0" podStartSLOduration=2.900472995 podStartE2EDuration="3.560904813s" podCreationTimestamp="2025-11-28 10:52:57 +0000 UTC" firstStartedPulling="2025-11-28 10:52:58.645892453 +0000 UTC m=+3350.344866623" lastFinishedPulling="2025-11-28 10:52:59.306324271 +0000 UTC m=+3351.005298441" observedRunningTime="2025-11-28 10:53:00.549418191 +0000 UTC m=+3352.248392361" watchObservedRunningTime="2025-11-28 10:53:00.560904813 +0000 UTC m=+3352.259878983" Nov 28 10:53:00 crc kubenswrapper[4838]: I1128 10:53:00.581399 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/manila-api-0" podStartSLOduration=2.581381169 podStartE2EDuration="2.581381169s" podCreationTimestamp="2025-11-28 10:52:58 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 10:53:00.569757224 +0000 UTC m=+3352.268731394" watchObservedRunningTime="2025-11-28 10:53:00.581381169 +0000 UTC m=+3352.280355339" Nov 28 10:53:01 crc kubenswrapper[4838]: I1128 10:53:01.546187 4838 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-6gx67" podUID="ae733bf5-694c-4e7a-9ea1-34d75ed1fec6" containerName="registry-server" containerID="cri-o://8c3c1250351f50db84e8a94645530d01aac7074e6b07934600d8b19c10bc3c5f" gracePeriod=2 Nov 28 10:53:01 crc kubenswrapper[4838]: I1128 10:53:01.548023 4838 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/manila-api-0" podUID="37712620-36f3-4290-a269-f8e62bf725a6" containerName="manila-api-log" containerID="cri-o://695e24c651cc898126ae95b79f910af7c31d7039ea375494eca0496455f9e061" gracePeriod=30 Nov 28 10:53:01 crc kubenswrapper[4838]: I1128 10:53:01.548156 4838 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/manila-api-0" podUID="37712620-36f3-4290-a269-f8e62bf725a6" containerName="manila-api" containerID="cri-o://27c44033cec4ac1228636245ea7944f909f29babfa71b01cdef79c0dc4a8895e" gracePeriod=30 Nov 28 10:53:02 crc kubenswrapper[4838]: I1128 10:53:02.082211 4838 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-565ff4b848-b45gx" podUID="580cb0e9-6caf-4f93-986e-a0cdd62d8cd2" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.243:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.243:8443: connect: connection refused" Nov 28 10:53:02 crc kubenswrapper[4838]: I1128 10:53:02.088191 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-6gx67" Nov 28 10:53:02 crc kubenswrapper[4838]: I1128 10:53:02.181892 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ae733bf5-694c-4e7a-9ea1-34d75ed1fec6-utilities\") pod \"ae733bf5-694c-4e7a-9ea1-34d75ed1fec6\" (UID: \"ae733bf5-694c-4e7a-9ea1-34d75ed1fec6\") " Nov 28 10:53:02 crc kubenswrapper[4838]: I1128 10:53:02.182166 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xbggl\" (UniqueName: \"kubernetes.io/projected/ae733bf5-694c-4e7a-9ea1-34d75ed1fec6-kube-api-access-xbggl\") pod \"ae733bf5-694c-4e7a-9ea1-34d75ed1fec6\" (UID: \"ae733bf5-694c-4e7a-9ea1-34d75ed1fec6\") " Nov 28 10:53:02 crc kubenswrapper[4838]: I1128 10:53:02.182394 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ae733bf5-694c-4e7a-9ea1-34d75ed1fec6-catalog-content\") pod \"ae733bf5-694c-4e7a-9ea1-34d75ed1fec6\" (UID: \"ae733bf5-694c-4e7a-9ea1-34d75ed1fec6\") " Nov 28 10:53:02 crc kubenswrapper[4838]: I1128 10:53:02.182981 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ae733bf5-694c-4e7a-9ea1-34d75ed1fec6-utilities" (OuterVolumeSpecName: "utilities") pod "ae733bf5-694c-4e7a-9ea1-34d75ed1fec6" (UID: "ae733bf5-694c-4e7a-9ea1-34d75ed1fec6"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 10:53:02 crc kubenswrapper[4838]: I1128 10:53:02.186882 4838 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ae733bf5-694c-4e7a-9ea1-34d75ed1fec6-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 10:53:02 crc kubenswrapper[4838]: I1128 10:53:02.205187 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ae733bf5-694c-4e7a-9ea1-34d75ed1fec6-kube-api-access-xbggl" (OuterVolumeSpecName: "kube-api-access-xbggl") pod "ae733bf5-694c-4e7a-9ea1-34d75ed1fec6" (UID: "ae733bf5-694c-4e7a-9ea1-34d75ed1fec6"). InnerVolumeSpecName "kube-api-access-xbggl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:53:02 crc kubenswrapper[4838]: I1128 10:53:02.255833 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/manila-api-0" Nov 28 10:53:02 crc kubenswrapper[4838]: I1128 10:53:02.288760 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/37712620-36f3-4290-a269-f8e62bf725a6-combined-ca-bundle\") pod \"37712620-36f3-4290-a269-f8e62bf725a6\" (UID: \"37712620-36f3-4290-a269-f8e62bf725a6\") " Nov 28 10:53:02 crc kubenswrapper[4838]: I1128 10:53:02.288885 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/37712620-36f3-4290-a269-f8e62bf725a6-etc-machine-id\") pod \"37712620-36f3-4290-a269-f8e62bf725a6\" (UID: \"37712620-36f3-4290-a269-f8e62bf725a6\") " Nov 28 10:53:02 crc kubenswrapper[4838]: I1128 10:53:02.288906 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/37712620-36f3-4290-a269-f8e62bf725a6-config-data\") pod \"37712620-36f3-4290-a269-f8e62bf725a6\" (UID: \"37712620-36f3-4290-a269-f8e62bf725a6\") " Nov 28 10:53:02 crc kubenswrapper[4838]: I1128 10:53:02.288976 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/37712620-36f3-4290-a269-f8e62bf725a6-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "37712620-36f3-4290-a269-f8e62bf725a6" (UID: "37712620-36f3-4290-a269-f8e62bf725a6"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 10:53:02 crc kubenswrapper[4838]: I1128 10:53:02.289463 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jr5j9\" (UniqueName: \"kubernetes.io/projected/37712620-36f3-4290-a269-f8e62bf725a6-kube-api-access-jr5j9\") pod \"37712620-36f3-4290-a269-f8e62bf725a6\" (UID: \"37712620-36f3-4290-a269-f8e62bf725a6\") " Nov 28 10:53:02 crc kubenswrapper[4838]: I1128 10:53:02.289509 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/37712620-36f3-4290-a269-f8e62bf725a6-logs\") pod \"37712620-36f3-4290-a269-f8e62bf725a6\" (UID: \"37712620-36f3-4290-a269-f8e62bf725a6\") " Nov 28 10:53:02 crc kubenswrapper[4838]: I1128 10:53:02.289582 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/37712620-36f3-4290-a269-f8e62bf725a6-scripts\") pod \"37712620-36f3-4290-a269-f8e62bf725a6\" (UID: \"37712620-36f3-4290-a269-f8e62bf725a6\") " Nov 28 10:53:02 crc kubenswrapper[4838]: I1128 10:53:02.289634 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/37712620-36f3-4290-a269-f8e62bf725a6-config-data-custom\") pod \"37712620-36f3-4290-a269-f8e62bf725a6\" (UID: \"37712620-36f3-4290-a269-f8e62bf725a6\") " Nov 28 10:53:02 crc kubenswrapper[4838]: I1128 10:53:02.290141 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xbggl\" (UniqueName: \"kubernetes.io/projected/ae733bf5-694c-4e7a-9ea1-34d75ed1fec6-kube-api-access-xbggl\") on node \"crc\" DevicePath \"\"" Nov 28 10:53:02 crc kubenswrapper[4838]: I1128 10:53:02.290239 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/37712620-36f3-4290-a269-f8e62bf725a6-logs" (OuterVolumeSpecName: "logs") pod "37712620-36f3-4290-a269-f8e62bf725a6" (UID: "37712620-36f3-4290-a269-f8e62bf725a6"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 10:53:02 crc kubenswrapper[4838]: I1128 10:53:02.293664 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/37712620-36f3-4290-a269-f8e62bf725a6-scripts" (OuterVolumeSpecName: "scripts") pod "37712620-36f3-4290-a269-f8e62bf725a6" (UID: "37712620-36f3-4290-a269-f8e62bf725a6"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:53:02 crc kubenswrapper[4838]: I1128 10:53:02.294357 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/37712620-36f3-4290-a269-f8e62bf725a6-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "37712620-36f3-4290-a269-f8e62bf725a6" (UID: "37712620-36f3-4290-a269-f8e62bf725a6"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:53:02 crc kubenswrapper[4838]: I1128 10:53:02.307573 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ae733bf5-694c-4e7a-9ea1-34d75ed1fec6-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "ae733bf5-694c-4e7a-9ea1-34d75ed1fec6" (UID: "ae733bf5-694c-4e7a-9ea1-34d75ed1fec6"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 10:53:02 crc kubenswrapper[4838]: I1128 10:53:02.310570 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/37712620-36f3-4290-a269-f8e62bf725a6-kube-api-access-jr5j9" (OuterVolumeSpecName: "kube-api-access-jr5j9") pod "37712620-36f3-4290-a269-f8e62bf725a6" (UID: "37712620-36f3-4290-a269-f8e62bf725a6"). InnerVolumeSpecName "kube-api-access-jr5j9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:53:02 crc kubenswrapper[4838]: I1128 10:53:02.334031 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/37712620-36f3-4290-a269-f8e62bf725a6-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "37712620-36f3-4290-a269-f8e62bf725a6" (UID: "37712620-36f3-4290-a269-f8e62bf725a6"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:53:02 crc kubenswrapper[4838]: I1128 10:53:02.368126 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/37712620-36f3-4290-a269-f8e62bf725a6-config-data" (OuterVolumeSpecName: "config-data") pod "37712620-36f3-4290-a269-f8e62bf725a6" (UID: "37712620-36f3-4290-a269-f8e62bf725a6"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:53:02 crc kubenswrapper[4838]: I1128 10:53:02.391746 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jr5j9\" (UniqueName: \"kubernetes.io/projected/37712620-36f3-4290-a269-f8e62bf725a6-kube-api-access-jr5j9\") on node \"crc\" DevicePath \"\"" Nov 28 10:53:02 crc kubenswrapper[4838]: I1128 10:53:02.391814 4838 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/37712620-36f3-4290-a269-f8e62bf725a6-logs\") on node \"crc\" DevicePath \"\"" Nov 28 10:53:02 crc kubenswrapper[4838]: I1128 10:53:02.391826 4838 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/37712620-36f3-4290-a269-f8e62bf725a6-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 10:53:02 crc kubenswrapper[4838]: I1128 10:53:02.391837 4838 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/37712620-36f3-4290-a269-f8e62bf725a6-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 28 10:53:02 crc kubenswrapper[4838]: I1128 10:53:02.391845 4838 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ae733bf5-694c-4e7a-9ea1-34d75ed1fec6-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 10:53:02 crc kubenswrapper[4838]: I1128 10:53:02.391853 4838 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/37712620-36f3-4290-a269-f8e62bf725a6-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 10:53:02 crc kubenswrapper[4838]: I1128 10:53:02.391864 4838 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/37712620-36f3-4290-a269-f8e62bf725a6-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 28 10:53:02 crc kubenswrapper[4838]: I1128 10:53:02.391874 4838 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/37712620-36f3-4290-a269-f8e62bf725a6-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 10:53:02 crc kubenswrapper[4838]: I1128 10:53:02.565884 4838 generic.go:334] "Generic (PLEG): container finished" podID="ae733bf5-694c-4e7a-9ea1-34d75ed1fec6" containerID="8c3c1250351f50db84e8a94645530d01aac7074e6b07934600d8b19c10bc3c5f" exitCode=0 Nov 28 10:53:02 crc kubenswrapper[4838]: I1128 10:53:02.566009 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-6gx67" Nov 28 10:53:02 crc kubenswrapper[4838]: I1128 10:53:02.570814 4838 generic.go:334] "Generic (PLEG): container finished" podID="37712620-36f3-4290-a269-f8e62bf725a6" containerID="27c44033cec4ac1228636245ea7944f909f29babfa71b01cdef79c0dc4a8895e" exitCode=0 Nov 28 10:53:02 crc kubenswrapper[4838]: I1128 10:53:02.570856 4838 generic.go:334] "Generic (PLEG): container finished" podID="37712620-36f3-4290-a269-f8e62bf725a6" containerID="695e24c651cc898126ae95b79f910af7c31d7039ea375494eca0496455f9e061" exitCode=143 Nov 28 10:53:02 crc kubenswrapper[4838]: I1128 10:53:02.570885 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/manila-api-0" Nov 28 10:53:02 crc kubenswrapper[4838]: I1128 10:53:02.581735 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6gx67" event={"ID":"ae733bf5-694c-4e7a-9ea1-34d75ed1fec6","Type":"ContainerDied","Data":"8c3c1250351f50db84e8a94645530d01aac7074e6b07934600d8b19c10bc3c5f"} Nov 28 10:53:02 crc kubenswrapper[4838]: I1128 10:53:02.581775 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6gx67" event={"ID":"ae733bf5-694c-4e7a-9ea1-34d75ed1fec6","Type":"ContainerDied","Data":"fcbe26671a44097643303f999902cc35535e541c883dcc876aa894b68f4c6d73"} Nov 28 10:53:02 crc kubenswrapper[4838]: I1128 10:53:02.581787 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-api-0" event={"ID":"37712620-36f3-4290-a269-f8e62bf725a6","Type":"ContainerDied","Data":"27c44033cec4ac1228636245ea7944f909f29babfa71b01cdef79c0dc4a8895e"} Nov 28 10:53:02 crc kubenswrapper[4838]: I1128 10:53:02.581797 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-api-0" event={"ID":"37712620-36f3-4290-a269-f8e62bf725a6","Type":"ContainerDied","Data":"695e24c651cc898126ae95b79f910af7c31d7039ea375494eca0496455f9e061"} Nov 28 10:53:02 crc kubenswrapper[4838]: I1128 10:53:02.581808 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-api-0" event={"ID":"37712620-36f3-4290-a269-f8e62bf725a6","Type":"ContainerDied","Data":"c88dac80fb81a4881e6266e89bf193f4ab07a254662d156c8745b46093198799"} Nov 28 10:53:02 crc kubenswrapper[4838]: I1128 10:53:02.581825 4838 scope.go:117] "RemoveContainer" containerID="8c3c1250351f50db84e8a94645530d01aac7074e6b07934600d8b19c10bc3c5f" Nov 28 10:53:02 crc kubenswrapper[4838]: I1128 10:53:02.611316 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/manila-api-0"] Nov 28 10:53:02 crc kubenswrapper[4838]: I1128 10:53:02.619304 4838 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/manila-api-0"] Nov 28 10:53:02 crc kubenswrapper[4838]: I1128 10:53:02.642483 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-6gx67"] Nov 28 10:53:02 crc kubenswrapper[4838]: I1128 10:53:02.643882 4838 scope.go:117] "RemoveContainer" containerID="1a6fdc945bb9857a53036ce5ce0b96af3192aa7bc06e48e023b8ae8ed9f02696" Nov 28 10:53:02 crc kubenswrapper[4838]: I1128 10:53:02.651155 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/manila-api-0"] Nov 28 10:53:02 crc kubenswrapper[4838]: E1128 10:53:02.651556 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ae733bf5-694c-4e7a-9ea1-34d75ed1fec6" containerName="extract-content" Nov 28 10:53:02 crc kubenswrapper[4838]: I1128 10:53:02.651574 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="ae733bf5-694c-4e7a-9ea1-34d75ed1fec6" containerName="extract-content" Nov 28 10:53:02 crc kubenswrapper[4838]: E1128 10:53:02.651602 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="37712620-36f3-4290-a269-f8e62bf725a6" containerName="manila-api" Nov 28 10:53:02 crc kubenswrapper[4838]: I1128 10:53:02.651611 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="37712620-36f3-4290-a269-f8e62bf725a6" containerName="manila-api" Nov 28 10:53:02 crc kubenswrapper[4838]: E1128 10:53:02.651627 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ae733bf5-694c-4e7a-9ea1-34d75ed1fec6" containerName="extract-utilities" Nov 28 10:53:02 crc kubenswrapper[4838]: I1128 10:53:02.651634 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="ae733bf5-694c-4e7a-9ea1-34d75ed1fec6" containerName="extract-utilities" Nov 28 10:53:02 crc kubenswrapper[4838]: E1128 10:53:02.651652 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ae733bf5-694c-4e7a-9ea1-34d75ed1fec6" containerName="registry-server" Nov 28 10:53:02 crc kubenswrapper[4838]: I1128 10:53:02.651659 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="ae733bf5-694c-4e7a-9ea1-34d75ed1fec6" containerName="registry-server" Nov 28 10:53:02 crc kubenswrapper[4838]: E1128 10:53:02.651677 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="37712620-36f3-4290-a269-f8e62bf725a6" containerName="manila-api-log" Nov 28 10:53:02 crc kubenswrapper[4838]: I1128 10:53:02.651682 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="37712620-36f3-4290-a269-f8e62bf725a6" containerName="manila-api-log" Nov 28 10:53:02 crc kubenswrapper[4838]: I1128 10:53:02.651878 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="ae733bf5-694c-4e7a-9ea1-34d75ed1fec6" containerName="registry-server" Nov 28 10:53:02 crc kubenswrapper[4838]: I1128 10:53:02.651888 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="37712620-36f3-4290-a269-f8e62bf725a6" containerName="manila-api" Nov 28 10:53:02 crc kubenswrapper[4838]: I1128 10:53:02.651908 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="37712620-36f3-4290-a269-f8e62bf725a6" containerName="manila-api-log" Nov 28 10:53:02 crc kubenswrapper[4838]: I1128 10:53:02.669104 4838 scope.go:117] "RemoveContainer" containerID="749cac8e221bff3977c38a3e35012fa1b8ebda61e0c10cda4e15c37773748334" Nov 28 10:53:02 crc kubenswrapper[4838]: I1128 10:53:02.685240 4838 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-6gx67"] Nov 28 10:53:02 crc kubenswrapper[4838]: I1128 10:53:02.685343 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-api-0" Nov 28 10:53:02 crc kubenswrapper[4838]: I1128 10:53:02.687139 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"manila-api-config-data" Nov 28 10:53:02 crc kubenswrapper[4838]: I1128 10:53:02.687340 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-manila-public-svc" Nov 28 10:53:02 crc kubenswrapper[4838]: I1128 10:53:02.687514 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-manila-internal-svc" Nov 28 10:53:02 crc kubenswrapper[4838]: I1128 10:53:02.754705 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-api-0"] Nov 28 10:53:02 crc kubenswrapper[4838]: I1128 10:53:02.772420 4838 scope.go:117] "RemoveContainer" containerID="8c3c1250351f50db84e8a94645530d01aac7074e6b07934600d8b19c10bc3c5f" Nov 28 10:53:02 crc kubenswrapper[4838]: E1128 10:53:02.775771 4838 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8c3c1250351f50db84e8a94645530d01aac7074e6b07934600d8b19c10bc3c5f\": container with ID starting with 8c3c1250351f50db84e8a94645530d01aac7074e6b07934600d8b19c10bc3c5f not found: ID does not exist" containerID="8c3c1250351f50db84e8a94645530d01aac7074e6b07934600d8b19c10bc3c5f" Nov 28 10:53:02 crc kubenswrapper[4838]: I1128 10:53:02.775837 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8c3c1250351f50db84e8a94645530d01aac7074e6b07934600d8b19c10bc3c5f"} err="failed to get container status \"8c3c1250351f50db84e8a94645530d01aac7074e6b07934600d8b19c10bc3c5f\": rpc error: code = NotFound desc = could not find container \"8c3c1250351f50db84e8a94645530d01aac7074e6b07934600d8b19c10bc3c5f\": container with ID starting with 8c3c1250351f50db84e8a94645530d01aac7074e6b07934600d8b19c10bc3c5f not found: ID does not exist" Nov 28 10:53:02 crc kubenswrapper[4838]: I1128 10:53:02.775866 4838 scope.go:117] "RemoveContainer" containerID="1a6fdc945bb9857a53036ce5ce0b96af3192aa7bc06e48e023b8ae8ed9f02696" Nov 28 10:53:02 crc kubenswrapper[4838]: E1128 10:53:02.781378 4838 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1a6fdc945bb9857a53036ce5ce0b96af3192aa7bc06e48e023b8ae8ed9f02696\": container with ID starting with 1a6fdc945bb9857a53036ce5ce0b96af3192aa7bc06e48e023b8ae8ed9f02696 not found: ID does not exist" containerID="1a6fdc945bb9857a53036ce5ce0b96af3192aa7bc06e48e023b8ae8ed9f02696" Nov 28 10:53:02 crc kubenswrapper[4838]: I1128 10:53:02.781415 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1a6fdc945bb9857a53036ce5ce0b96af3192aa7bc06e48e023b8ae8ed9f02696"} err="failed to get container status \"1a6fdc945bb9857a53036ce5ce0b96af3192aa7bc06e48e023b8ae8ed9f02696\": rpc error: code = NotFound desc = could not find container \"1a6fdc945bb9857a53036ce5ce0b96af3192aa7bc06e48e023b8ae8ed9f02696\": container with ID starting with 1a6fdc945bb9857a53036ce5ce0b96af3192aa7bc06e48e023b8ae8ed9f02696 not found: ID does not exist" Nov 28 10:53:02 crc kubenswrapper[4838]: I1128 10:53:02.781435 4838 scope.go:117] "RemoveContainer" containerID="749cac8e221bff3977c38a3e35012fa1b8ebda61e0c10cda4e15c37773748334" Nov 28 10:53:02 crc kubenswrapper[4838]: E1128 10:53:02.781803 4838 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"749cac8e221bff3977c38a3e35012fa1b8ebda61e0c10cda4e15c37773748334\": container with ID starting with 749cac8e221bff3977c38a3e35012fa1b8ebda61e0c10cda4e15c37773748334 not found: ID does not exist" containerID="749cac8e221bff3977c38a3e35012fa1b8ebda61e0c10cda4e15c37773748334" Nov 28 10:53:02 crc kubenswrapper[4838]: I1128 10:53:02.781826 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"749cac8e221bff3977c38a3e35012fa1b8ebda61e0c10cda4e15c37773748334"} err="failed to get container status \"749cac8e221bff3977c38a3e35012fa1b8ebda61e0c10cda4e15c37773748334\": rpc error: code = NotFound desc = could not find container \"749cac8e221bff3977c38a3e35012fa1b8ebda61e0c10cda4e15c37773748334\": container with ID starting with 749cac8e221bff3977c38a3e35012fa1b8ebda61e0c10cda4e15c37773748334 not found: ID does not exist" Nov 28 10:53:02 crc kubenswrapper[4838]: I1128 10:53:02.781839 4838 scope.go:117] "RemoveContainer" containerID="27c44033cec4ac1228636245ea7944f909f29babfa71b01cdef79c0dc4a8895e" Nov 28 10:53:02 crc kubenswrapper[4838]: E1128 10:53:02.797370 4838 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod37712620_36f3_4290_a269_f8e62bf725a6.slice/crio-c88dac80fb81a4881e6266e89bf193f4ab07a254662d156c8745b46093198799\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podae733bf5_694c_4e7a_9ea1_34d75ed1fec6.slice/crio-fcbe26671a44097643303f999902cc35535e541c883dcc876aa894b68f4c6d73\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podae733bf5_694c_4e7a_9ea1_34d75ed1fec6.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod37712620_36f3_4290_a269_f8e62bf725a6.slice\": RecentStats: unable to find data in memory cache]" Nov 28 10:53:02 crc kubenswrapper[4838]: I1128 10:53:02.805958 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/a16e601a-3619-49a9-82d1-67129c2e2413-etc-machine-id\") pod \"manila-api-0\" (UID: \"a16e601a-3619-49a9-82d1-67129c2e2413\") " pod="openstack/manila-api-0" Nov 28 10:53:02 crc kubenswrapper[4838]: I1128 10:53:02.806034 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/a16e601a-3619-49a9-82d1-67129c2e2413-public-tls-certs\") pod \"manila-api-0\" (UID: \"a16e601a-3619-49a9-82d1-67129c2e2413\") " pod="openstack/manila-api-0" Nov 28 10:53:02 crc kubenswrapper[4838]: I1128 10:53:02.806071 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/a16e601a-3619-49a9-82d1-67129c2e2413-internal-tls-certs\") pod \"manila-api-0\" (UID: \"a16e601a-3619-49a9-82d1-67129c2e2413\") " pod="openstack/manila-api-0" Nov 28 10:53:02 crc kubenswrapper[4838]: I1128 10:53:02.806101 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a16e601a-3619-49a9-82d1-67129c2e2413-scripts\") pod \"manila-api-0\" (UID: \"a16e601a-3619-49a9-82d1-67129c2e2413\") " pod="openstack/manila-api-0" Nov 28 10:53:02 crc kubenswrapper[4838]: I1128 10:53:02.806153 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a16e601a-3619-49a9-82d1-67129c2e2413-config-data\") pod \"manila-api-0\" (UID: \"a16e601a-3619-49a9-82d1-67129c2e2413\") " pod="openstack/manila-api-0" Nov 28 10:53:02 crc kubenswrapper[4838]: I1128 10:53:02.806306 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a16e601a-3619-49a9-82d1-67129c2e2413-config-data-custom\") pod \"manila-api-0\" (UID: \"a16e601a-3619-49a9-82d1-67129c2e2413\") " pod="openstack/manila-api-0" Nov 28 10:53:02 crc kubenswrapper[4838]: I1128 10:53:02.806385 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a16e601a-3619-49a9-82d1-67129c2e2413-combined-ca-bundle\") pod \"manila-api-0\" (UID: \"a16e601a-3619-49a9-82d1-67129c2e2413\") " pod="openstack/manila-api-0" Nov 28 10:53:02 crc kubenswrapper[4838]: I1128 10:53:02.806417 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-frppw\" (UniqueName: \"kubernetes.io/projected/a16e601a-3619-49a9-82d1-67129c2e2413-kube-api-access-frppw\") pod \"manila-api-0\" (UID: \"a16e601a-3619-49a9-82d1-67129c2e2413\") " pod="openstack/manila-api-0" Nov 28 10:53:02 crc kubenswrapper[4838]: I1128 10:53:02.806504 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a16e601a-3619-49a9-82d1-67129c2e2413-logs\") pod \"manila-api-0\" (UID: \"a16e601a-3619-49a9-82d1-67129c2e2413\") " pod="openstack/manila-api-0" Nov 28 10:53:02 crc kubenswrapper[4838]: I1128 10:53:02.807743 4838 scope.go:117] "RemoveContainer" containerID="695e24c651cc898126ae95b79f910af7c31d7039ea375494eca0496455f9e061" Nov 28 10:53:02 crc kubenswrapper[4838]: I1128 10:53:02.832629 4838 scope.go:117] "RemoveContainer" containerID="27c44033cec4ac1228636245ea7944f909f29babfa71b01cdef79c0dc4a8895e" Nov 28 10:53:02 crc kubenswrapper[4838]: E1128 10:53:02.833275 4838 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"27c44033cec4ac1228636245ea7944f909f29babfa71b01cdef79c0dc4a8895e\": container with ID starting with 27c44033cec4ac1228636245ea7944f909f29babfa71b01cdef79c0dc4a8895e not found: ID does not exist" containerID="27c44033cec4ac1228636245ea7944f909f29babfa71b01cdef79c0dc4a8895e" Nov 28 10:53:02 crc kubenswrapper[4838]: I1128 10:53:02.833299 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"27c44033cec4ac1228636245ea7944f909f29babfa71b01cdef79c0dc4a8895e"} err="failed to get container status \"27c44033cec4ac1228636245ea7944f909f29babfa71b01cdef79c0dc4a8895e\": rpc error: code = NotFound desc = could not find container \"27c44033cec4ac1228636245ea7944f909f29babfa71b01cdef79c0dc4a8895e\": container with ID starting with 27c44033cec4ac1228636245ea7944f909f29babfa71b01cdef79c0dc4a8895e not found: ID does not exist" Nov 28 10:53:02 crc kubenswrapper[4838]: I1128 10:53:02.833317 4838 scope.go:117] "RemoveContainer" containerID="695e24c651cc898126ae95b79f910af7c31d7039ea375494eca0496455f9e061" Nov 28 10:53:02 crc kubenswrapper[4838]: E1128 10:53:02.835559 4838 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"695e24c651cc898126ae95b79f910af7c31d7039ea375494eca0496455f9e061\": container with ID starting with 695e24c651cc898126ae95b79f910af7c31d7039ea375494eca0496455f9e061 not found: ID does not exist" containerID="695e24c651cc898126ae95b79f910af7c31d7039ea375494eca0496455f9e061" Nov 28 10:53:02 crc kubenswrapper[4838]: I1128 10:53:02.835589 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"695e24c651cc898126ae95b79f910af7c31d7039ea375494eca0496455f9e061"} err="failed to get container status \"695e24c651cc898126ae95b79f910af7c31d7039ea375494eca0496455f9e061\": rpc error: code = NotFound desc = could not find container \"695e24c651cc898126ae95b79f910af7c31d7039ea375494eca0496455f9e061\": container with ID starting with 695e24c651cc898126ae95b79f910af7c31d7039ea375494eca0496455f9e061 not found: ID does not exist" Nov 28 10:53:02 crc kubenswrapper[4838]: I1128 10:53:02.835610 4838 scope.go:117] "RemoveContainer" containerID="27c44033cec4ac1228636245ea7944f909f29babfa71b01cdef79c0dc4a8895e" Nov 28 10:53:02 crc kubenswrapper[4838]: I1128 10:53:02.836106 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"27c44033cec4ac1228636245ea7944f909f29babfa71b01cdef79c0dc4a8895e"} err="failed to get container status \"27c44033cec4ac1228636245ea7944f909f29babfa71b01cdef79c0dc4a8895e\": rpc error: code = NotFound desc = could not find container \"27c44033cec4ac1228636245ea7944f909f29babfa71b01cdef79c0dc4a8895e\": container with ID starting with 27c44033cec4ac1228636245ea7944f909f29babfa71b01cdef79c0dc4a8895e not found: ID does not exist" Nov 28 10:53:02 crc kubenswrapper[4838]: I1128 10:53:02.836240 4838 scope.go:117] "RemoveContainer" containerID="695e24c651cc898126ae95b79f910af7c31d7039ea375494eca0496455f9e061" Nov 28 10:53:02 crc kubenswrapper[4838]: I1128 10:53:02.836710 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"695e24c651cc898126ae95b79f910af7c31d7039ea375494eca0496455f9e061"} err="failed to get container status \"695e24c651cc898126ae95b79f910af7c31d7039ea375494eca0496455f9e061\": rpc error: code = NotFound desc = could not find container \"695e24c651cc898126ae95b79f910af7c31d7039ea375494eca0496455f9e061\": container with ID starting with 695e24c651cc898126ae95b79f910af7c31d7039ea375494eca0496455f9e061 not found: ID does not exist" Nov 28 10:53:02 crc kubenswrapper[4838]: I1128 10:53:02.908524 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/a16e601a-3619-49a9-82d1-67129c2e2413-internal-tls-certs\") pod \"manila-api-0\" (UID: \"a16e601a-3619-49a9-82d1-67129c2e2413\") " pod="openstack/manila-api-0" Nov 28 10:53:02 crc kubenswrapper[4838]: I1128 10:53:02.908607 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a16e601a-3619-49a9-82d1-67129c2e2413-scripts\") pod \"manila-api-0\" (UID: \"a16e601a-3619-49a9-82d1-67129c2e2413\") " pod="openstack/manila-api-0" Nov 28 10:53:02 crc kubenswrapper[4838]: I1128 10:53:02.908687 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a16e601a-3619-49a9-82d1-67129c2e2413-config-data\") pod \"manila-api-0\" (UID: \"a16e601a-3619-49a9-82d1-67129c2e2413\") " pod="openstack/manila-api-0" Nov 28 10:53:02 crc kubenswrapper[4838]: I1128 10:53:02.908767 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a16e601a-3619-49a9-82d1-67129c2e2413-config-data-custom\") pod \"manila-api-0\" (UID: \"a16e601a-3619-49a9-82d1-67129c2e2413\") " pod="openstack/manila-api-0" Nov 28 10:53:02 crc kubenswrapper[4838]: I1128 10:53:02.908816 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a16e601a-3619-49a9-82d1-67129c2e2413-combined-ca-bundle\") pod \"manila-api-0\" (UID: \"a16e601a-3619-49a9-82d1-67129c2e2413\") " pod="openstack/manila-api-0" Nov 28 10:53:02 crc kubenswrapper[4838]: I1128 10:53:02.908845 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-frppw\" (UniqueName: \"kubernetes.io/projected/a16e601a-3619-49a9-82d1-67129c2e2413-kube-api-access-frppw\") pod \"manila-api-0\" (UID: \"a16e601a-3619-49a9-82d1-67129c2e2413\") " pod="openstack/manila-api-0" Nov 28 10:53:02 crc kubenswrapper[4838]: I1128 10:53:02.908901 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a16e601a-3619-49a9-82d1-67129c2e2413-logs\") pod \"manila-api-0\" (UID: \"a16e601a-3619-49a9-82d1-67129c2e2413\") " pod="openstack/manila-api-0" Nov 28 10:53:02 crc kubenswrapper[4838]: I1128 10:53:02.908959 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/a16e601a-3619-49a9-82d1-67129c2e2413-etc-machine-id\") pod \"manila-api-0\" (UID: \"a16e601a-3619-49a9-82d1-67129c2e2413\") " pod="openstack/manila-api-0" Nov 28 10:53:02 crc kubenswrapper[4838]: I1128 10:53:02.909020 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/a16e601a-3619-49a9-82d1-67129c2e2413-public-tls-certs\") pod \"manila-api-0\" (UID: \"a16e601a-3619-49a9-82d1-67129c2e2413\") " pod="openstack/manila-api-0" Nov 28 10:53:02 crc kubenswrapper[4838]: I1128 10:53:02.909099 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/a16e601a-3619-49a9-82d1-67129c2e2413-etc-machine-id\") pod \"manila-api-0\" (UID: \"a16e601a-3619-49a9-82d1-67129c2e2413\") " pod="openstack/manila-api-0" Nov 28 10:53:02 crc kubenswrapper[4838]: I1128 10:53:02.909344 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a16e601a-3619-49a9-82d1-67129c2e2413-logs\") pod \"manila-api-0\" (UID: \"a16e601a-3619-49a9-82d1-67129c2e2413\") " pod="openstack/manila-api-0" Nov 28 10:53:02 crc kubenswrapper[4838]: I1128 10:53:02.914479 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a16e601a-3619-49a9-82d1-67129c2e2413-combined-ca-bundle\") pod \"manila-api-0\" (UID: \"a16e601a-3619-49a9-82d1-67129c2e2413\") " pod="openstack/manila-api-0" Nov 28 10:53:02 crc kubenswrapper[4838]: I1128 10:53:02.915271 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a16e601a-3619-49a9-82d1-67129c2e2413-config-data-custom\") pod \"manila-api-0\" (UID: \"a16e601a-3619-49a9-82d1-67129c2e2413\") " pod="openstack/manila-api-0" Nov 28 10:53:02 crc kubenswrapper[4838]: I1128 10:53:02.916010 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a16e601a-3619-49a9-82d1-67129c2e2413-scripts\") pod \"manila-api-0\" (UID: \"a16e601a-3619-49a9-82d1-67129c2e2413\") " pod="openstack/manila-api-0" Nov 28 10:53:02 crc kubenswrapper[4838]: I1128 10:53:02.916887 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/a16e601a-3619-49a9-82d1-67129c2e2413-internal-tls-certs\") pod \"manila-api-0\" (UID: \"a16e601a-3619-49a9-82d1-67129c2e2413\") " pod="openstack/manila-api-0" Nov 28 10:53:02 crc kubenswrapper[4838]: I1128 10:53:02.917674 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a16e601a-3619-49a9-82d1-67129c2e2413-config-data\") pod \"manila-api-0\" (UID: \"a16e601a-3619-49a9-82d1-67129c2e2413\") " pod="openstack/manila-api-0" Nov 28 10:53:02 crc kubenswrapper[4838]: I1128 10:53:02.928201 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/a16e601a-3619-49a9-82d1-67129c2e2413-public-tls-certs\") pod \"manila-api-0\" (UID: \"a16e601a-3619-49a9-82d1-67129c2e2413\") " pod="openstack/manila-api-0" Nov 28 10:53:02 crc kubenswrapper[4838]: I1128 10:53:02.930151 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-frppw\" (UniqueName: \"kubernetes.io/projected/a16e601a-3619-49a9-82d1-67129c2e2413-kube-api-access-frppw\") pod \"manila-api-0\" (UID: \"a16e601a-3619-49a9-82d1-67129c2e2413\") " pod="openstack/manila-api-0" Nov 28 10:53:03 crc kubenswrapper[4838]: I1128 10:53:03.054763 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 28 10:53:03 crc kubenswrapper[4838]: I1128 10:53:03.055025 4838 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="c2e0f281-41b3-4f17-a174-41ae4ef2c53e" containerName="ceilometer-central-agent" containerID="cri-o://77edfdbed06620222b72edf06d1eb7713e03f7de01681ff4a3829524297f906e" gracePeriod=30 Nov 28 10:53:03 crc kubenswrapper[4838]: I1128 10:53:03.055108 4838 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="c2e0f281-41b3-4f17-a174-41ae4ef2c53e" containerName="proxy-httpd" containerID="cri-o://714aabd613ec3e8cc00d9bc3feaa2c0a645d14266b0871be587d939c62ef8fc6" gracePeriod=30 Nov 28 10:53:03 crc kubenswrapper[4838]: I1128 10:53:03.055137 4838 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="c2e0f281-41b3-4f17-a174-41ae4ef2c53e" containerName="sg-core" containerID="cri-o://544a52c665677008f1adc4145d8513585f2965df639fdc1b0e28b4b61bcdad5f" gracePeriod=30 Nov 28 10:53:03 crc kubenswrapper[4838]: I1128 10:53:03.055158 4838 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="c2e0f281-41b3-4f17-a174-41ae4ef2c53e" containerName="ceilometer-notification-agent" containerID="cri-o://d3ee886cd3bec35334f9ae02a3b24c96867dd9914979da62aaa9f71103066c0b" gracePeriod=30 Nov 28 10:53:03 crc kubenswrapper[4838]: I1128 10:53:03.103630 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-api-0" Nov 28 10:53:03 crc kubenswrapper[4838]: I1128 10:53:03.596812 4838 generic.go:334] "Generic (PLEG): container finished" podID="c2e0f281-41b3-4f17-a174-41ae4ef2c53e" containerID="714aabd613ec3e8cc00d9bc3feaa2c0a645d14266b0871be587d939c62ef8fc6" exitCode=0 Nov 28 10:53:03 crc kubenswrapper[4838]: I1128 10:53:03.597190 4838 generic.go:334] "Generic (PLEG): container finished" podID="c2e0f281-41b3-4f17-a174-41ae4ef2c53e" containerID="544a52c665677008f1adc4145d8513585f2965df639fdc1b0e28b4b61bcdad5f" exitCode=2 Nov 28 10:53:03 crc kubenswrapper[4838]: I1128 10:53:03.597203 4838 generic.go:334] "Generic (PLEG): container finished" podID="c2e0f281-41b3-4f17-a174-41ae4ef2c53e" containerID="77edfdbed06620222b72edf06d1eb7713e03f7de01681ff4a3829524297f906e" exitCode=0 Nov 28 10:53:03 crc kubenswrapper[4838]: I1128 10:53:03.597295 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c2e0f281-41b3-4f17-a174-41ae4ef2c53e","Type":"ContainerDied","Data":"714aabd613ec3e8cc00d9bc3feaa2c0a645d14266b0871be587d939c62ef8fc6"} Nov 28 10:53:03 crc kubenswrapper[4838]: I1128 10:53:03.597340 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c2e0f281-41b3-4f17-a174-41ae4ef2c53e","Type":"ContainerDied","Data":"544a52c665677008f1adc4145d8513585f2965df639fdc1b0e28b4b61bcdad5f"} Nov 28 10:53:03 crc kubenswrapper[4838]: I1128 10:53:03.597352 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c2e0f281-41b3-4f17-a174-41ae4ef2c53e","Type":"ContainerDied","Data":"77edfdbed06620222b72edf06d1eb7713e03f7de01681ff4a3829524297f906e"} Nov 28 10:53:03 crc kubenswrapper[4838]: I1128 10:53:03.676965 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-api-0"] Nov 28 10:53:04 crc kubenswrapper[4838]: I1128 10:53:04.575502 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="37712620-36f3-4290-a269-f8e62bf725a6" path="/var/lib/kubelet/pods/37712620-36f3-4290-a269-f8e62bf725a6/volumes" Nov 28 10:53:04 crc kubenswrapper[4838]: I1128 10:53:04.576739 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ae733bf5-694c-4e7a-9ea1-34d75ed1fec6" path="/var/lib/kubelet/pods/ae733bf5-694c-4e7a-9ea1-34d75ed1fec6/volumes" Nov 28 10:53:06 crc kubenswrapper[4838]: I1128 10:53:06.632702 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-share-share1-0" event={"ID":"d645d530-c8e1-48c5-9c93-2bf0ef691514","Type":"ContainerStarted","Data":"1d975085ad4f3ec99a878f8fa1ec1022a1135e14c310b0c83c68d6a9c422fbc8"} Nov 28 10:53:06 crc kubenswrapper[4838]: I1128 10:53:06.634742 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-api-0" event={"ID":"a16e601a-3619-49a9-82d1-67129c2e2413","Type":"ContainerStarted","Data":"d8d91312423e092cee14ed670cdc78a92aa09953adfde4d6fd7048d9e1245f9d"} Nov 28 10:53:06 crc kubenswrapper[4838]: I1128 10:53:06.634804 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-api-0" event={"ID":"a16e601a-3619-49a9-82d1-67129c2e2413","Type":"ContainerStarted","Data":"4c1baaa7bd51809e6a63a0590efd1e4ab1770b79cdd2c6ff84eeb457dd592250"} Nov 28 10:53:07 crc kubenswrapper[4838]: I1128 10:53:07.650403 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-share-share1-0" event={"ID":"d645d530-c8e1-48c5-9c93-2bf0ef691514","Type":"ContainerStarted","Data":"67a7b3d6ebe1d6e5474e23ad22300ec84f8a144b1f94fa71bc684024a71ad351"} Nov 28 10:53:07 crc kubenswrapper[4838]: I1128 10:53:07.653374 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-api-0" event={"ID":"a16e601a-3619-49a9-82d1-67129c2e2413","Type":"ContainerStarted","Data":"4642117b4c1519a4742558fd8781685b01b16dadb17300be328b7b396349993a"} Nov 28 10:53:07 crc kubenswrapper[4838]: I1128 10:53:07.654028 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/manila-api-0" Nov 28 10:53:07 crc kubenswrapper[4838]: I1128 10:53:07.672674 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/manila-share-share1-0" podStartSLOduration=3.442923577 podStartE2EDuration="10.672655051s" podCreationTimestamp="2025-11-28 10:52:57 +0000 UTC" firstStartedPulling="2025-11-28 10:52:58.716862332 +0000 UTC m=+3350.415836502" lastFinishedPulling="2025-11-28 10:53:05.946593806 +0000 UTC m=+3357.645567976" observedRunningTime="2025-11-28 10:53:07.664657614 +0000 UTC m=+3359.363631774" watchObservedRunningTime="2025-11-28 10:53:07.672655051 +0000 UTC m=+3359.371629221" Nov 28 10:53:07 crc kubenswrapper[4838]: I1128 10:53:07.700933 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/manila-api-0" podStartSLOduration=5.700907599 podStartE2EDuration="5.700907599s" podCreationTimestamp="2025-11-28 10:53:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 10:53:07.684601307 +0000 UTC m=+3359.383575477" watchObservedRunningTime="2025-11-28 10:53:07.700907599 +0000 UTC m=+3359.399881769" Nov 28 10:53:08 crc kubenswrapper[4838]: I1128 10:53:08.072673 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/manila-scheduler-0" Nov 28 10:53:08 crc kubenswrapper[4838]: I1128 10:53:08.144101 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/manila-share-share1-0" Nov 28 10:53:08 crc kubenswrapper[4838]: I1128 10:53:08.179848 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-76b5fdb995-4hbv2" Nov 28 10:53:08 crc kubenswrapper[4838]: I1128 10:53:08.263354 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-864d5fc68c-z59nl"] Nov 28 10:53:08 crc kubenswrapper[4838]: I1128 10:53:08.263647 4838 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-864d5fc68c-z59nl" podUID="cb064300-abd5-4c16-ab31-cf82ff261ac8" containerName="dnsmasq-dns" containerID="cri-o://96050f858f233fd7f06c24d049ac366b4bc0d4bec0dd8b61fec920eb960e234a" gracePeriod=10 Nov 28 10:53:08 crc kubenswrapper[4838]: I1128 10:53:08.673948 4838 generic.go:334] "Generic (PLEG): container finished" podID="cb064300-abd5-4c16-ab31-cf82ff261ac8" containerID="96050f858f233fd7f06c24d049ac366b4bc0d4bec0dd8b61fec920eb960e234a" exitCode=0 Nov 28 10:53:08 crc kubenswrapper[4838]: I1128 10:53:08.674029 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-864d5fc68c-z59nl" event={"ID":"cb064300-abd5-4c16-ab31-cf82ff261ac8","Type":"ContainerDied","Data":"96050f858f233fd7f06c24d049ac366b4bc0d4bec0dd8b61fec920eb960e234a"} Nov 28 10:53:08 crc kubenswrapper[4838]: I1128 10:53:08.762382 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-864d5fc68c-z59nl" Nov 28 10:53:08 crc kubenswrapper[4838]: I1128 10:53:08.833493 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/cb064300-abd5-4c16-ab31-cf82ff261ac8-ovsdbserver-nb\") pod \"cb064300-abd5-4c16-ab31-cf82ff261ac8\" (UID: \"cb064300-abd5-4c16-ab31-cf82ff261ac8\") " Nov 28 10:53:08 crc kubenswrapper[4838]: I1128 10:53:08.833549 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/cb064300-abd5-4c16-ab31-cf82ff261ac8-ovsdbserver-sb\") pod \"cb064300-abd5-4c16-ab31-cf82ff261ac8\" (UID: \"cb064300-abd5-4c16-ab31-cf82ff261ac8\") " Nov 28 10:53:08 crc kubenswrapper[4838]: I1128 10:53:08.833568 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/cb064300-abd5-4c16-ab31-cf82ff261ac8-dns-svc\") pod \"cb064300-abd5-4c16-ab31-cf82ff261ac8\" (UID: \"cb064300-abd5-4c16-ab31-cf82ff261ac8\") " Nov 28 10:53:08 crc kubenswrapper[4838]: I1128 10:53:08.833613 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cb064300-abd5-4c16-ab31-cf82ff261ac8-config\") pod \"cb064300-abd5-4c16-ab31-cf82ff261ac8\" (UID: \"cb064300-abd5-4c16-ab31-cf82ff261ac8\") " Nov 28 10:53:08 crc kubenswrapper[4838]: I1128 10:53:08.833812 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sn2p2\" (UniqueName: \"kubernetes.io/projected/cb064300-abd5-4c16-ab31-cf82ff261ac8-kube-api-access-sn2p2\") pod \"cb064300-abd5-4c16-ab31-cf82ff261ac8\" (UID: \"cb064300-abd5-4c16-ab31-cf82ff261ac8\") " Nov 28 10:53:08 crc kubenswrapper[4838]: I1128 10:53:08.833832 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/cb064300-abd5-4c16-ab31-cf82ff261ac8-openstack-edpm-ipam\") pod \"cb064300-abd5-4c16-ab31-cf82ff261ac8\" (UID: \"cb064300-abd5-4c16-ab31-cf82ff261ac8\") " Nov 28 10:53:08 crc kubenswrapper[4838]: I1128 10:53:08.864002 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cb064300-abd5-4c16-ab31-cf82ff261ac8-kube-api-access-sn2p2" (OuterVolumeSpecName: "kube-api-access-sn2p2") pod "cb064300-abd5-4c16-ab31-cf82ff261ac8" (UID: "cb064300-abd5-4c16-ab31-cf82ff261ac8"). InnerVolumeSpecName "kube-api-access-sn2p2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:53:08 crc kubenswrapper[4838]: I1128 10:53:08.941910 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sn2p2\" (UniqueName: \"kubernetes.io/projected/cb064300-abd5-4c16-ab31-cf82ff261ac8-kube-api-access-sn2p2\") on node \"crc\" DevicePath \"\"" Nov 28 10:53:08 crc kubenswrapper[4838]: I1128 10:53:08.990457 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cb064300-abd5-4c16-ab31-cf82ff261ac8-config" (OuterVolumeSpecName: "config") pod "cb064300-abd5-4c16-ab31-cf82ff261ac8" (UID: "cb064300-abd5-4c16-ab31-cf82ff261ac8"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 10:53:08 crc kubenswrapper[4838]: I1128 10:53:08.999452 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cb064300-abd5-4c16-ab31-cf82ff261ac8-openstack-edpm-ipam" (OuterVolumeSpecName: "openstack-edpm-ipam") pod "cb064300-abd5-4c16-ab31-cf82ff261ac8" (UID: "cb064300-abd5-4c16-ab31-cf82ff261ac8"). InnerVolumeSpecName "openstack-edpm-ipam". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 10:53:09 crc kubenswrapper[4838]: I1128 10:53:09.002586 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cb064300-abd5-4c16-ab31-cf82ff261ac8-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "cb064300-abd5-4c16-ab31-cf82ff261ac8" (UID: "cb064300-abd5-4c16-ab31-cf82ff261ac8"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 10:53:09 crc kubenswrapper[4838]: I1128 10:53:09.003413 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cb064300-abd5-4c16-ab31-cf82ff261ac8-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "cb064300-abd5-4c16-ab31-cf82ff261ac8" (UID: "cb064300-abd5-4c16-ab31-cf82ff261ac8"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 10:53:09 crc kubenswrapper[4838]: I1128 10:53:09.004161 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cb064300-abd5-4c16-ab31-cf82ff261ac8-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "cb064300-abd5-4c16-ab31-cf82ff261ac8" (UID: "cb064300-abd5-4c16-ab31-cf82ff261ac8"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 10:53:09 crc kubenswrapper[4838]: I1128 10:53:09.043636 4838 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/cb064300-abd5-4c16-ab31-cf82ff261ac8-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Nov 28 10:53:09 crc kubenswrapper[4838]: I1128 10:53:09.043663 4838 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/cb064300-abd5-4c16-ab31-cf82ff261ac8-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 28 10:53:09 crc kubenswrapper[4838]: I1128 10:53:09.043673 4838 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/cb064300-abd5-4c16-ab31-cf82ff261ac8-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 28 10:53:09 crc kubenswrapper[4838]: I1128 10:53:09.043684 4838 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/cb064300-abd5-4c16-ab31-cf82ff261ac8-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 10:53:09 crc kubenswrapper[4838]: I1128 10:53:09.043692 4838 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cb064300-abd5-4c16-ab31-cf82ff261ac8-config\") on node \"crc\" DevicePath \"\"" Nov 28 10:53:09 crc kubenswrapper[4838]: I1128 10:53:09.687252 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-864d5fc68c-z59nl" Nov 28 10:53:09 crc kubenswrapper[4838]: I1128 10:53:09.687867 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-864d5fc68c-z59nl" event={"ID":"cb064300-abd5-4c16-ab31-cf82ff261ac8","Type":"ContainerDied","Data":"42c78ec8530fc50c1b56d2d15be290ca2b82f5a7e0408e19087fe663fdfa3c84"} Nov 28 10:53:09 crc kubenswrapper[4838]: I1128 10:53:09.687927 4838 scope.go:117] "RemoveContainer" containerID="96050f858f233fd7f06c24d049ac366b4bc0d4bec0dd8b61fec920eb960e234a" Nov 28 10:53:09 crc kubenswrapper[4838]: I1128 10:53:09.715016 4838 scope.go:117] "RemoveContainer" containerID="da5267b9bb23fd94888d48e99b1c570ed3913790b5209a85ad4cfafc3c2b287d" Nov 28 10:53:09 crc kubenswrapper[4838]: I1128 10:53:09.750625 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-864d5fc68c-z59nl"] Nov 28 10:53:09 crc kubenswrapper[4838]: I1128 10:53:09.764963 4838 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-864d5fc68c-z59nl"] Nov 28 10:53:10 crc kubenswrapper[4838]: I1128 10:53:10.580841 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cb064300-abd5-4c16-ab31-cf82ff261ac8" path="/var/lib/kubelet/pods/cb064300-abd5-4c16-ab31-cf82ff261ac8/volumes" Nov 28 10:53:11 crc kubenswrapper[4838]: I1128 10:53:11.332846 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 10:53:11 crc kubenswrapper[4838]: I1128 10:53:11.389238 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c2e0f281-41b3-4f17-a174-41ae4ef2c53e-scripts\") pod \"c2e0f281-41b3-4f17-a174-41ae4ef2c53e\" (UID: \"c2e0f281-41b3-4f17-a174-41ae4ef2c53e\") " Nov 28 10:53:11 crc kubenswrapper[4838]: I1128 10:53:11.389351 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c2e0f281-41b3-4f17-a174-41ae4ef2c53e-sg-core-conf-yaml\") pod \"c2e0f281-41b3-4f17-a174-41ae4ef2c53e\" (UID: \"c2e0f281-41b3-4f17-a174-41ae4ef2c53e\") " Nov 28 10:53:11 crc kubenswrapper[4838]: I1128 10:53:11.389389 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c2e0f281-41b3-4f17-a174-41ae4ef2c53e-log-httpd\") pod \"c2e0f281-41b3-4f17-a174-41ae4ef2c53e\" (UID: \"c2e0f281-41b3-4f17-a174-41ae4ef2c53e\") " Nov 28 10:53:11 crc kubenswrapper[4838]: I1128 10:53:11.389420 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c2e0f281-41b3-4f17-a174-41ae4ef2c53e-config-data\") pod \"c2e0f281-41b3-4f17-a174-41ae4ef2c53e\" (UID: \"c2e0f281-41b3-4f17-a174-41ae4ef2c53e\") " Nov 28 10:53:11 crc kubenswrapper[4838]: I1128 10:53:11.389479 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/c2e0f281-41b3-4f17-a174-41ae4ef2c53e-ceilometer-tls-certs\") pod \"c2e0f281-41b3-4f17-a174-41ae4ef2c53e\" (UID: \"c2e0f281-41b3-4f17-a174-41ae4ef2c53e\") " Nov 28 10:53:11 crc kubenswrapper[4838]: I1128 10:53:11.389523 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c2e0f281-41b3-4f17-a174-41ae4ef2c53e-run-httpd\") pod \"c2e0f281-41b3-4f17-a174-41ae4ef2c53e\" (UID: \"c2e0f281-41b3-4f17-a174-41ae4ef2c53e\") " Nov 28 10:53:11 crc kubenswrapper[4838]: I1128 10:53:11.389577 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-97wqq\" (UniqueName: \"kubernetes.io/projected/c2e0f281-41b3-4f17-a174-41ae4ef2c53e-kube-api-access-97wqq\") pod \"c2e0f281-41b3-4f17-a174-41ae4ef2c53e\" (UID: \"c2e0f281-41b3-4f17-a174-41ae4ef2c53e\") " Nov 28 10:53:11 crc kubenswrapper[4838]: I1128 10:53:11.389748 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c2e0f281-41b3-4f17-a174-41ae4ef2c53e-combined-ca-bundle\") pod \"c2e0f281-41b3-4f17-a174-41ae4ef2c53e\" (UID: \"c2e0f281-41b3-4f17-a174-41ae4ef2c53e\") " Nov 28 10:53:11 crc kubenswrapper[4838]: I1128 10:53:11.389869 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c2e0f281-41b3-4f17-a174-41ae4ef2c53e-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "c2e0f281-41b3-4f17-a174-41ae4ef2c53e" (UID: "c2e0f281-41b3-4f17-a174-41ae4ef2c53e"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 10:53:11 crc kubenswrapper[4838]: I1128 10:53:11.389890 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c2e0f281-41b3-4f17-a174-41ae4ef2c53e-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "c2e0f281-41b3-4f17-a174-41ae4ef2c53e" (UID: "c2e0f281-41b3-4f17-a174-41ae4ef2c53e"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 10:53:11 crc kubenswrapper[4838]: I1128 10:53:11.390310 4838 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c2e0f281-41b3-4f17-a174-41ae4ef2c53e-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 28 10:53:11 crc kubenswrapper[4838]: I1128 10:53:11.390334 4838 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c2e0f281-41b3-4f17-a174-41ae4ef2c53e-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 28 10:53:11 crc kubenswrapper[4838]: I1128 10:53:11.395854 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c2e0f281-41b3-4f17-a174-41ae4ef2c53e-kube-api-access-97wqq" (OuterVolumeSpecName: "kube-api-access-97wqq") pod "c2e0f281-41b3-4f17-a174-41ae4ef2c53e" (UID: "c2e0f281-41b3-4f17-a174-41ae4ef2c53e"). InnerVolumeSpecName "kube-api-access-97wqq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:53:11 crc kubenswrapper[4838]: I1128 10:53:11.409312 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c2e0f281-41b3-4f17-a174-41ae4ef2c53e-scripts" (OuterVolumeSpecName: "scripts") pod "c2e0f281-41b3-4f17-a174-41ae4ef2c53e" (UID: "c2e0f281-41b3-4f17-a174-41ae4ef2c53e"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:53:11 crc kubenswrapper[4838]: I1128 10:53:11.423544 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c2e0f281-41b3-4f17-a174-41ae4ef2c53e-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "c2e0f281-41b3-4f17-a174-41ae4ef2c53e" (UID: "c2e0f281-41b3-4f17-a174-41ae4ef2c53e"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:53:11 crc kubenswrapper[4838]: I1128 10:53:11.455616 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c2e0f281-41b3-4f17-a174-41ae4ef2c53e-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "c2e0f281-41b3-4f17-a174-41ae4ef2c53e" (UID: "c2e0f281-41b3-4f17-a174-41ae4ef2c53e"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:53:11 crc kubenswrapper[4838]: I1128 10:53:11.473742 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c2e0f281-41b3-4f17-a174-41ae4ef2c53e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c2e0f281-41b3-4f17-a174-41ae4ef2c53e" (UID: "c2e0f281-41b3-4f17-a174-41ae4ef2c53e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:53:11 crc kubenswrapper[4838]: I1128 10:53:11.491888 4838 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c2e0f281-41b3-4f17-a174-41ae4ef2c53e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 10:53:11 crc kubenswrapper[4838]: I1128 10:53:11.491920 4838 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c2e0f281-41b3-4f17-a174-41ae4ef2c53e-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 10:53:11 crc kubenswrapper[4838]: I1128 10:53:11.491931 4838 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c2e0f281-41b3-4f17-a174-41ae4ef2c53e-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 28 10:53:11 crc kubenswrapper[4838]: I1128 10:53:11.491941 4838 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/c2e0f281-41b3-4f17-a174-41ae4ef2c53e-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 10:53:11 crc kubenswrapper[4838]: I1128 10:53:11.491949 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-97wqq\" (UniqueName: \"kubernetes.io/projected/c2e0f281-41b3-4f17-a174-41ae4ef2c53e-kube-api-access-97wqq\") on node \"crc\" DevicePath \"\"" Nov 28 10:53:11 crc kubenswrapper[4838]: I1128 10:53:11.497003 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c2e0f281-41b3-4f17-a174-41ae4ef2c53e-config-data" (OuterVolumeSpecName: "config-data") pod "c2e0f281-41b3-4f17-a174-41ae4ef2c53e" (UID: "c2e0f281-41b3-4f17-a174-41ae4ef2c53e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:53:11 crc kubenswrapper[4838]: I1128 10:53:11.562212 4838 scope.go:117] "RemoveContainer" containerID="3d80b1b6cf83170cb103f16c70c6a8b3db90d429f22463d02230864c009beb4b" Nov 28 10:53:11 crc kubenswrapper[4838]: E1128 10:53:11.562605 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-5dxdd_openshift-machine-config-operator(5c3daa53-8c4e-4e30-aeba-146602dd45cd)\"" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" Nov 28 10:53:11 crc kubenswrapper[4838]: I1128 10:53:11.593015 4838 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c2e0f281-41b3-4f17-a174-41ae4ef2c53e-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 10:53:11 crc kubenswrapper[4838]: I1128 10:53:11.717130 4838 generic.go:334] "Generic (PLEG): container finished" podID="c2e0f281-41b3-4f17-a174-41ae4ef2c53e" containerID="d3ee886cd3bec35334f9ae02a3b24c96867dd9914979da62aaa9f71103066c0b" exitCode=0 Nov 28 10:53:11 crc kubenswrapper[4838]: I1128 10:53:11.717174 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c2e0f281-41b3-4f17-a174-41ae4ef2c53e","Type":"ContainerDied","Data":"d3ee886cd3bec35334f9ae02a3b24c96867dd9914979da62aaa9f71103066c0b"} Nov 28 10:53:11 crc kubenswrapper[4838]: I1128 10:53:11.717208 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c2e0f281-41b3-4f17-a174-41ae4ef2c53e","Type":"ContainerDied","Data":"2a4b6677704a43ccabd6b2c733c318551ddd51e78452ce9485f099d3e9073a0f"} Nov 28 10:53:11 crc kubenswrapper[4838]: I1128 10:53:11.717232 4838 scope.go:117] "RemoveContainer" containerID="714aabd613ec3e8cc00d9bc3feaa2c0a645d14266b0871be587d939c62ef8fc6" Nov 28 10:53:11 crc kubenswrapper[4838]: I1128 10:53:11.717289 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 10:53:11 crc kubenswrapper[4838]: I1128 10:53:11.759077 4838 scope.go:117] "RemoveContainer" containerID="544a52c665677008f1adc4145d8513585f2965df639fdc1b0e28b4b61bcdad5f" Nov 28 10:53:11 crc kubenswrapper[4838]: I1128 10:53:11.799302 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 28 10:53:11 crc kubenswrapper[4838]: I1128 10:53:11.816257 4838 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 28 10:53:11 crc kubenswrapper[4838]: I1128 10:53:11.825786 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 28 10:53:11 crc kubenswrapper[4838]: E1128 10:53:11.826277 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c2e0f281-41b3-4f17-a174-41ae4ef2c53e" containerName="sg-core" Nov 28 10:53:11 crc kubenswrapper[4838]: I1128 10:53:11.826294 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="c2e0f281-41b3-4f17-a174-41ae4ef2c53e" containerName="sg-core" Nov 28 10:53:11 crc kubenswrapper[4838]: E1128 10:53:11.826305 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c2e0f281-41b3-4f17-a174-41ae4ef2c53e" containerName="ceilometer-central-agent" Nov 28 10:53:11 crc kubenswrapper[4838]: I1128 10:53:11.826313 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="c2e0f281-41b3-4f17-a174-41ae4ef2c53e" containerName="ceilometer-central-agent" Nov 28 10:53:11 crc kubenswrapper[4838]: E1128 10:53:11.826334 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c2e0f281-41b3-4f17-a174-41ae4ef2c53e" containerName="ceilometer-notification-agent" Nov 28 10:53:11 crc kubenswrapper[4838]: I1128 10:53:11.826344 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="c2e0f281-41b3-4f17-a174-41ae4ef2c53e" containerName="ceilometer-notification-agent" Nov 28 10:53:11 crc kubenswrapper[4838]: E1128 10:53:11.826357 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cb064300-abd5-4c16-ab31-cf82ff261ac8" containerName="dnsmasq-dns" Nov 28 10:53:11 crc kubenswrapper[4838]: I1128 10:53:11.826364 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="cb064300-abd5-4c16-ab31-cf82ff261ac8" containerName="dnsmasq-dns" Nov 28 10:53:11 crc kubenswrapper[4838]: E1128 10:53:11.826383 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c2e0f281-41b3-4f17-a174-41ae4ef2c53e" containerName="proxy-httpd" Nov 28 10:53:11 crc kubenswrapper[4838]: I1128 10:53:11.826390 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="c2e0f281-41b3-4f17-a174-41ae4ef2c53e" containerName="proxy-httpd" Nov 28 10:53:11 crc kubenswrapper[4838]: E1128 10:53:11.826411 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cb064300-abd5-4c16-ab31-cf82ff261ac8" containerName="init" Nov 28 10:53:11 crc kubenswrapper[4838]: I1128 10:53:11.826419 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="cb064300-abd5-4c16-ab31-cf82ff261ac8" containerName="init" Nov 28 10:53:11 crc kubenswrapper[4838]: I1128 10:53:11.826658 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="cb064300-abd5-4c16-ab31-cf82ff261ac8" containerName="dnsmasq-dns" Nov 28 10:53:11 crc kubenswrapper[4838]: I1128 10:53:11.826678 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="c2e0f281-41b3-4f17-a174-41ae4ef2c53e" containerName="ceilometer-central-agent" Nov 28 10:53:11 crc kubenswrapper[4838]: I1128 10:53:11.826688 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="c2e0f281-41b3-4f17-a174-41ae4ef2c53e" containerName="ceilometer-notification-agent" Nov 28 10:53:11 crc kubenswrapper[4838]: I1128 10:53:11.826701 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="c2e0f281-41b3-4f17-a174-41ae4ef2c53e" containerName="proxy-httpd" Nov 28 10:53:11 crc kubenswrapper[4838]: I1128 10:53:11.826709 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="c2e0f281-41b3-4f17-a174-41ae4ef2c53e" containerName="sg-core" Nov 28 10:53:11 crc kubenswrapper[4838]: I1128 10:53:11.828813 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 10:53:11 crc kubenswrapper[4838]: I1128 10:53:11.834047 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 28 10:53:11 crc kubenswrapper[4838]: I1128 10:53:11.834245 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Nov 28 10:53:11 crc kubenswrapper[4838]: I1128 10:53:11.834366 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 28 10:53:11 crc kubenswrapper[4838]: I1128 10:53:11.841164 4838 scope.go:117] "RemoveContainer" containerID="d3ee886cd3bec35334f9ae02a3b24c96867dd9914979da62aaa9f71103066c0b" Nov 28 10:53:11 crc kubenswrapper[4838]: I1128 10:53:11.866376 4838 scope.go:117] "RemoveContainer" containerID="77edfdbed06620222b72edf06d1eb7713e03f7de01681ff4a3829524297f906e" Nov 28 10:53:11 crc kubenswrapper[4838]: I1128 10:53:11.870356 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 28 10:53:11 crc kubenswrapper[4838]: I1128 10:53:11.883792 4838 scope.go:117] "RemoveContainer" containerID="714aabd613ec3e8cc00d9bc3feaa2c0a645d14266b0871be587d939c62ef8fc6" Nov 28 10:53:11 crc kubenswrapper[4838]: E1128 10:53:11.884248 4838 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"714aabd613ec3e8cc00d9bc3feaa2c0a645d14266b0871be587d939c62ef8fc6\": container with ID starting with 714aabd613ec3e8cc00d9bc3feaa2c0a645d14266b0871be587d939c62ef8fc6 not found: ID does not exist" containerID="714aabd613ec3e8cc00d9bc3feaa2c0a645d14266b0871be587d939c62ef8fc6" Nov 28 10:53:11 crc kubenswrapper[4838]: I1128 10:53:11.884294 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"714aabd613ec3e8cc00d9bc3feaa2c0a645d14266b0871be587d939c62ef8fc6"} err="failed to get container status \"714aabd613ec3e8cc00d9bc3feaa2c0a645d14266b0871be587d939c62ef8fc6\": rpc error: code = NotFound desc = could not find container \"714aabd613ec3e8cc00d9bc3feaa2c0a645d14266b0871be587d939c62ef8fc6\": container with ID starting with 714aabd613ec3e8cc00d9bc3feaa2c0a645d14266b0871be587d939c62ef8fc6 not found: ID does not exist" Nov 28 10:53:11 crc kubenswrapper[4838]: I1128 10:53:11.884351 4838 scope.go:117] "RemoveContainer" containerID="544a52c665677008f1adc4145d8513585f2965df639fdc1b0e28b4b61bcdad5f" Nov 28 10:53:11 crc kubenswrapper[4838]: E1128 10:53:11.884652 4838 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"544a52c665677008f1adc4145d8513585f2965df639fdc1b0e28b4b61bcdad5f\": container with ID starting with 544a52c665677008f1adc4145d8513585f2965df639fdc1b0e28b4b61bcdad5f not found: ID does not exist" containerID="544a52c665677008f1adc4145d8513585f2965df639fdc1b0e28b4b61bcdad5f" Nov 28 10:53:11 crc kubenswrapper[4838]: I1128 10:53:11.884675 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"544a52c665677008f1adc4145d8513585f2965df639fdc1b0e28b4b61bcdad5f"} err="failed to get container status \"544a52c665677008f1adc4145d8513585f2965df639fdc1b0e28b4b61bcdad5f\": rpc error: code = NotFound desc = could not find container \"544a52c665677008f1adc4145d8513585f2965df639fdc1b0e28b4b61bcdad5f\": container with ID starting with 544a52c665677008f1adc4145d8513585f2965df639fdc1b0e28b4b61bcdad5f not found: ID does not exist" Nov 28 10:53:11 crc kubenswrapper[4838]: I1128 10:53:11.884689 4838 scope.go:117] "RemoveContainer" containerID="d3ee886cd3bec35334f9ae02a3b24c96867dd9914979da62aaa9f71103066c0b" Nov 28 10:53:11 crc kubenswrapper[4838]: E1128 10:53:11.884906 4838 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d3ee886cd3bec35334f9ae02a3b24c96867dd9914979da62aaa9f71103066c0b\": container with ID starting with d3ee886cd3bec35334f9ae02a3b24c96867dd9914979da62aaa9f71103066c0b not found: ID does not exist" containerID="d3ee886cd3bec35334f9ae02a3b24c96867dd9914979da62aaa9f71103066c0b" Nov 28 10:53:11 crc kubenswrapper[4838]: I1128 10:53:11.884932 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d3ee886cd3bec35334f9ae02a3b24c96867dd9914979da62aaa9f71103066c0b"} err="failed to get container status \"d3ee886cd3bec35334f9ae02a3b24c96867dd9914979da62aaa9f71103066c0b\": rpc error: code = NotFound desc = could not find container \"d3ee886cd3bec35334f9ae02a3b24c96867dd9914979da62aaa9f71103066c0b\": container with ID starting with d3ee886cd3bec35334f9ae02a3b24c96867dd9914979da62aaa9f71103066c0b not found: ID does not exist" Nov 28 10:53:11 crc kubenswrapper[4838]: I1128 10:53:11.884952 4838 scope.go:117] "RemoveContainer" containerID="77edfdbed06620222b72edf06d1eb7713e03f7de01681ff4a3829524297f906e" Nov 28 10:53:11 crc kubenswrapper[4838]: E1128 10:53:11.885209 4838 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"77edfdbed06620222b72edf06d1eb7713e03f7de01681ff4a3829524297f906e\": container with ID starting with 77edfdbed06620222b72edf06d1eb7713e03f7de01681ff4a3829524297f906e not found: ID does not exist" containerID="77edfdbed06620222b72edf06d1eb7713e03f7de01681ff4a3829524297f906e" Nov 28 10:53:11 crc kubenswrapper[4838]: I1128 10:53:11.885240 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"77edfdbed06620222b72edf06d1eb7713e03f7de01681ff4a3829524297f906e"} err="failed to get container status \"77edfdbed06620222b72edf06d1eb7713e03f7de01681ff4a3829524297f906e\": rpc error: code = NotFound desc = could not find container \"77edfdbed06620222b72edf06d1eb7713e03f7de01681ff4a3829524297f906e\": container with ID starting with 77edfdbed06620222b72edf06d1eb7713e03f7de01681ff4a3829524297f906e not found: ID does not exist" Nov 28 10:53:11 crc kubenswrapper[4838]: I1128 10:53:11.901012 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/afc12091-3d32-4b69-8e6c-29d521764b7c-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"afc12091-3d32-4b69-8e6c-29d521764b7c\") " pod="openstack/ceilometer-0" Nov 28 10:53:11 crc kubenswrapper[4838]: I1128 10:53:11.901070 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/afc12091-3d32-4b69-8e6c-29d521764b7c-scripts\") pod \"ceilometer-0\" (UID: \"afc12091-3d32-4b69-8e6c-29d521764b7c\") " pod="openstack/ceilometer-0" Nov 28 10:53:11 crc kubenswrapper[4838]: I1128 10:53:11.901133 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/afc12091-3d32-4b69-8e6c-29d521764b7c-log-httpd\") pod \"ceilometer-0\" (UID: \"afc12091-3d32-4b69-8e6c-29d521764b7c\") " pod="openstack/ceilometer-0" Nov 28 10:53:11 crc kubenswrapper[4838]: I1128 10:53:11.901155 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/afc12091-3d32-4b69-8e6c-29d521764b7c-run-httpd\") pod \"ceilometer-0\" (UID: \"afc12091-3d32-4b69-8e6c-29d521764b7c\") " pod="openstack/ceilometer-0" Nov 28 10:53:11 crc kubenswrapper[4838]: I1128 10:53:11.901260 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/afc12091-3d32-4b69-8e6c-29d521764b7c-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"afc12091-3d32-4b69-8e6c-29d521764b7c\") " pod="openstack/ceilometer-0" Nov 28 10:53:11 crc kubenswrapper[4838]: I1128 10:53:11.901289 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-znb8v\" (UniqueName: \"kubernetes.io/projected/afc12091-3d32-4b69-8e6c-29d521764b7c-kube-api-access-znb8v\") pod \"ceilometer-0\" (UID: \"afc12091-3d32-4b69-8e6c-29d521764b7c\") " pod="openstack/ceilometer-0" Nov 28 10:53:11 crc kubenswrapper[4838]: I1128 10:53:11.901320 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/afc12091-3d32-4b69-8e6c-29d521764b7c-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"afc12091-3d32-4b69-8e6c-29d521764b7c\") " pod="openstack/ceilometer-0" Nov 28 10:53:11 crc kubenswrapper[4838]: I1128 10:53:11.901399 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/afc12091-3d32-4b69-8e6c-29d521764b7c-config-data\") pod \"ceilometer-0\" (UID: \"afc12091-3d32-4b69-8e6c-29d521764b7c\") " pod="openstack/ceilometer-0" Nov 28 10:53:12 crc kubenswrapper[4838]: I1128 10:53:12.003050 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/afc12091-3d32-4b69-8e6c-29d521764b7c-scripts\") pod \"ceilometer-0\" (UID: \"afc12091-3d32-4b69-8e6c-29d521764b7c\") " pod="openstack/ceilometer-0" Nov 28 10:53:12 crc kubenswrapper[4838]: I1128 10:53:12.003137 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/afc12091-3d32-4b69-8e6c-29d521764b7c-log-httpd\") pod \"ceilometer-0\" (UID: \"afc12091-3d32-4b69-8e6c-29d521764b7c\") " pod="openstack/ceilometer-0" Nov 28 10:53:12 crc kubenswrapper[4838]: I1128 10:53:12.003158 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/afc12091-3d32-4b69-8e6c-29d521764b7c-run-httpd\") pod \"ceilometer-0\" (UID: \"afc12091-3d32-4b69-8e6c-29d521764b7c\") " pod="openstack/ceilometer-0" Nov 28 10:53:12 crc kubenswrapper[4838]: I1128 10:53:12.003223 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/afc12091-3d32-4b69-8e6c-29d521764b7c-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"afc12091-3d32-4b69-8e6c-29d521764b7c\") " pod="openstack/ceilometer-0" Nov 28 10:53:12 crc kubenswrapper[4838]: I1128 10:53:12.003255 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-znb8v\" (UniqueName: \"kubernetes.io/projected/afc12091-3d32-4b69-8e6c-29d521764b7c-kube-api-access-znb8v\") pod \"ceilometer-0\" (UID: \"afc12091-3d32-4b69-8e6c-29d521764b7c\") " pod="openstack/ceilometer-0" Nov 28 10:53:12 crc kubenswrapper[4838]: I1128 10:53:12.003285 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/afc12091-3d32-4b69-8e6c-29d521764b7c-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"afc12091-3d32-4b69-8e6c-29d521764b7c\") " pod="openstack/ceilometer-0" Nov 28 10:53:12 crc kubenswrapper[4838]: I1128 10:53:12.003653 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/afc12091-3d32-4b69-8e6c-29d521764b7c-log-httpd\") pod \"ceilometer-0\" (UID: \"afc12091-3d32-4b69-8e6c-29d521764b7c\") " pod="openstack/ceilometer-0" Nov 28 10:53:12 crc kubenswrapper[4838]: I1128 10:53:12.003788 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/afc12091-3d32-4b69-8e6c-29d521764b7c-run-httpd\") pod \"ceilometer-0\" (UID: \"afc12091-3d32-4b69-8e6c-29d521764b7c\") " pod="openstack/ceilometer-0" Nov 28 10:53:12 crc kubenswrapper[4838]: I1128 10:53:12.003832 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/afc12091-3d32-4b69-8e6c-29d521764b7c-config-data\") pod \"ceilometer-0\" (UID: \"afc12091-3d32-4b69-8e6c-29d521764b7c\") " pod="openstack/ceilometer-0" Nov 28 10:53:12 crc kubenswrapper[4838]: I1128 10:53:12.004034 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/afc12091-3d32-4b69-8e6c-29d521764b7c-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"afc12091-3d32-4b69-8e6c-29d521764b7c\") " pod="openstack/ceilometer-0" Nov 28 10:53:12 crc kubenswrapper[4838]: I1128 10:53:12.008284 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/afc12091-3d32-4b69-8e6c-29d521764b7c-scripts\") pod \"ceilometer-0\" (UID: \"afc12091-3d32-4b69-8e6c-29d521764b7c\") " pod="openstack/ceilometer-0" Nov 28 10:53:12 crc kubenswrapper[4838]: I1128 10:53:12.008696 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/afc12091-3d32-4b69-8e6c-29d521764b7c-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"afc12091-3d32-4b69-8e6c-29d521764b7c\") " pod="openstack/ceilometer-0" Nov 28 10:53:12 crc kubenswrapper[4838]: I1128 10:53:12.008998 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/afc12091-3d32-4b69-8e6c-29d521764b7c-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"afc12091-3d32-4b69-8e6c-29d521764b7c\") " pod="openstack/ceilometer-0" Nov 28 10:53:12 crc kubenswrapper[4838]: I1128 10:53:12.010157 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/afc12091-3d32-4b69-8e6c-29d521764b7c-config-data\") pod \"ceilometer-0\" (UID: \"afc12091-3d32-4b69-8e6c-29d521764b7c\") " pod="openstack/ceilometer-0" Nov 28 10:53:12 crc kubenswrapper[4838]: I1128 10:53:12.010498 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/afc12091-3d32-4b69-8e6c-29d521764b7c-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"afc12091-3d32-4b69-8e6c-29d521764b7c\") " pod="openstack/ceilometer-0" Nov 28 10:53:12 crc kubenswrapper[4838]: I1128 10:53:12.024521 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-znb8v\" (UniqueName: \"kubernetes.io/projected/afc12091-3d32-4b69-8e6c-29d521764b7c-kube-api-access-znb8v\") pod \"ceilometer-0\" (UID: \"afc12091-3d32-4b69-8e6c-29d521764b7c\") " pod="openstack/ceilometer-0" Nov 28 10:53:12 crc kubenswrapper[4838]: I1128 10:53:12.095801 4838 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-565ff4b848-b45gx" podUID="580cb0e9-6caf-4f93-986e-a0cdd62d8cd2" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.243:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.243:8443: connect: connection refused" Nov 28 10:53:12 crc kubenswrapper[4838]: I1128 10:53:12.158021 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 10:53:12 crc kubenswrapper[4838]: I1128 10:53:12.573099 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c2e0f281-41b3-4f17-a174-41ae4ef2c53e" path="/var/lib/kubelet/pods/c2e0f281-41b3-4f17-a174-41ae4ef2c53e/volumes" Nov 28 10:53:12 crc kubenswrapper[4838]: I1128 10:53:12.632838 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 28 10:53:12 crc kubenswrapper[4838]: I1128 10:53:12.645014 4838 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 28 10:53:12 crc kubenswrapper[4838]: I1128 10:53:12.730296 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"afc12091-3d32-4b69-8e6c-29d521764b7c","Type":"ContainerStarted","Data":"425e1bfd78ae49bb65b0d5b897189da6d8786e3acfb443730da14566aa8c8372"} Nov 28 10:53:14 crc kubenswrapper[4838]: I1128 10:53:14.758749 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"afc12091-3d32-4b69-8e6c-29d521764b7c","Type":"ContainerStarted","Data":"39988eb63b540614d973782396838a1eb5ba44a7c736c46364424dd4c6224dac"} Nov 28 10:53:15 crc kubenswrapper[4838]: I1128 10:53:15.770058 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"afc12091-3d32-4b69-8e6c-29d521764b7c","Type":"ContainerStarted","Data":"92236ab8c6dde39b720e7c6cfe1676fb1726148b110e87a51bd01eb8c1df11fd"} Nov 28 10:53:15 crc kubenswrapper[4838]: I1128 10:53:15.772377 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"afc12091-3d32-4b69-8e6c-29d521764b7c","Type":"ContainerStarted","Data":"d9c753b80c41c29f696162150877874210de43c9d2f17ba3be3ac33388c868c9"} Nov 28 10:53:16 crc kubenswrapper[4838]: I1128 10:53:16.661887 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-565ff4b848-b45gx" Nov 28 10:53:16 crc kubenswrapper[4838]: I1128 10:53:16.715826 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-86ftg\" (UniqueName: \"kubernetes.io/projected/580cb0e9-6caf-4f93-986e-a0cdd62d8cd2-kube-api-access-86ftg\") pod \"580cb0e9-6caf-4f93-986e-a0cdd62d8cd2\" (UID: \"580cb0e9-6caf-4f93-986e-a0cdd62d8cd2\") " Nov 28 10:53:16 crc kubenswrapper[4838]: I1128 10:53:16.716061 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/580cb0e9-6caf-4f93-986e-a0cdd62d8cd2-horizon-tls-certs\") pod \"580cb0e9-6caf-4f93-986e-a0cdd62d8cd2\" (UID: \"580cb0e9-6caf-4f93-986e-a0cdd62d8cd2\") " Nov 28 10:53:16 crc kubenswrapper[4838]: I1128 10:53:16.716140 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/580cb0e9-6caf-4f93-986e-a0cdd62d8cd2-logs\") pod \"580cb0e9-6caf-4f93-986e-a0cdd62d8cd2\" (UID: \"580cb0e9-6caf-4f93-986e-a0cdd62d8cd2\") " Nov 28 10:53:16 crc kubenswrapper[4838]: I1128 10:53:16.716199 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/580cb0e9-6caf-4f93-986e-a0cdd62d8cd2-scripts\") pod \"580cb0e9-6caf-4f93-986e-a0cdd62d8cd2\" (UID: \"580cb0e9-6caf-4f93-986e-a0cdd62d8cd2\") " Nov 28 10:53:16 crc kubenswrapper[4838]: I1128 10:53:16.716237 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/580cb0e9-6caf-4f93-986e-a0cdd62d8cd2-config-data\") pod \"580cb0e9-6caf-4f93-986e-a0cdd62d8cd2\" (UID: \"580cb0e9-6caf-4f93-986e-a0cdd62d8cd2\") " Nov 28 10:53:16 crc kubenswrapper[4838]: I1128 10:53:16.716267 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/580cb0e9-6caf-4f93-986e-a0cdd62d8cd2-combined-ca-bundle\") pod \"580cb0e9-6caf-4f93-986e-a0cdd62d8cd2\" (UID: \"580cb0e9-6caf-4f93-986e-a0cdd62d8cd2\") " Nov 28 10:53:16 crc kubenswrapper[4838]: I1128 10:53:16.716307 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/580cb0e9-6caf-4f93-986e-a0cdd62d8cd2-horizon-secret-key\") pod \"580cb0e9-6caf-4f93-986e-a0cdd62d8cd2\" (UID: \"580cb0e9-6caf-4f93-986e-a0cdd62d8cd2\") " Nov 28 10:53:16 crc kubenswrapper[4838]: I1128 10:53:16.716624 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/580cb0e9-6caf-4f93-986e-a0cdd62d8cd2-logs" (OuterVolumeSpecName: "logs") pod "580cb0e9-6caf-4f93-986e-a0cdd62d8cd2" (UID: "580cb0e9-6caf-4f93-986e-a0cdd62d8cd2"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 10:53:16 crc kubenswrapper[4838]: I1128 10:53:16.717110 4838 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/580cb0e9-6caf-4f93-986e-a0cdd62d8cd2-logs\") on node \"crc\" DevicePath \"\"" Nov 28 10:53:16 crc kubenswrapper[4838]: I1128 10:53:16.724780 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/580cb0e9-6caf-4f93-986e-a0cdd62d8cd2-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "580cb0e9-6caf-4f93-986e-a0cdd62d8cd2" (UID: "580cb0e9-6caf-4f93-986e-a0cdd62d8cd2"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:53:16 crc kubenswrapper[4838]: I1128 10:53:16.724829 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/580cb0e9-6caf-4f93-986e-a0cdd62d8cd2-kube-api-access-86ftg" (OuterVolumeSpecName: "kube-api-access-86ftg") pod "580cb0e9-6caf-4f93-986e-a0cdd62d8cd2" (UID: "580cb0e9-6caf-4f93-986e-a0cdd62d8cd2"). InnerVolumeSpecName "kube-api-access-86ftg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:53:16 crc kubenswrapper[4838]: I1128 10:53:16.751564 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/580cb0e9-6caf-4f93-986e-a0cdd62d8cd2-config-data" (OuterVolumeSpecName: "config-data") pod "580cb0e9-6caf-4f93-986e-a0cdd62d8cd2" (UID: "580cb0e9-6caf-4f93-986e-a0cdd62d8cd2"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 10:53:16 crc kubenswrapper[4838]: I1128 10:53:16.766944 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/580cb0e9-6caf-4f93-986e-a0cdd62d8cd2-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "580cb0e9-6caf-4f93-986e-a0cdd62d8cd2" (UID: "580cb0e9-6caf-4f93-986e-a0cdd62d8cd2"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:53:16 crc kubenswrapper[4838]: I1128 10:53:16.782871 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/580cb0e9-6caf-4f93-986e-a0cdd62d8cd2-scripts" (OuterVolumeSpecName: "scripts") pod "580cb0e9-6caf-4f93-986e-a0cdd62d8cd2" (UID: "580cb0e9-6caf-4f93-986e-a0cdd62d8cd2"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 10:53:16 crc kubenswrapper[4838]: I1128 10:53:16.783302 4838 generic.go:334] "Generic (PLEG): container finished" podID="580cb0e9-6caf-4f93-986e-a0cdd62d8cd2" containerID="07b13478bd4bc4909ab58488a40ca59a947862e788cdd515fce2962940d87199" exitCode=137 Nov 28 10:53:16 crc kubenswrapper[4838]: I1128 10:53:16.783330 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-565ff4b848-b45gx" event={"ID":"580cb0e9-6caf-4f93-986e-a0cdd62d8cd2","Type":"ContainerDied","Data":"07b13478bd4bc4909ab58488a40ca59a947862e788cdd515fce2962940d87199"} Nov 28 10:53:16 crc kubenswrapper[4838]: I1128 10:53:16.783354 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-565ff4b848-b45gx" event={"ID":"580cb0e9-6caf-4f93-986e-a0cdd62d8cd2","Type":"ContainerDied","Data":"ab6d910e4d5a60a4f62fe7c8ad4e3a565a1b9b0737bf9137fcab68f1b4456b85"} Nov 28 10:53:16 crc kubenswrapper[4838]: I1128 10:53:16.783369 4838 scope.go:117] "RemoveContainer" containerID="c46d2f5a288bc031deb50b4c4507cac1d9c85391a267a560fda8b5a38f93334f" Nov 28 10:53:16 crc kubenswrapper[4838]: I1128 10:53:16.783402 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-565ff4b848-b45gx" Nov 28 10:53:16 crc kubenswrapper[4838]: I1128 10:53:16.796829 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/580cb0e9-6caf-4f93-986e-a0cdd62d8cd2-horizon-tls-certs" (OuterVolumeSpecName: "horizon-tls-certs") pod "580cb0e9-6caf-4f93-986e-a0cdd62d8cd2" (UID: "580cb0e9-6caf-4f93-986e-a0cdd62d8cd2"). InnerVolumeSpecName "horizon-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:53:16 crc kubenswrapper[4838]: I1128 10:53:16.819102 4838 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/580cb0e9-6caf-4f93-986e-a0cdd62d8cd2-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Nov 28 10:53:16 crc kubenswrapper[4838]: I1128 10:53:16.819142 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-86ftg\" (UniqueName: \"kubernetes.io/projected/580cb0e9-6caf-4f93-986e-a0cdd62d8cd2-kube-api-access-86ftg\") on node \"crc\" DevicePath \"\"" Nov 28 10:53:16 crc kubenswrapper[4838]: I1128 10:53:16.819157 4838 reconciler_common.go:293] "Volume detached for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/580cb0e9-6caf-4f93-986e-a0cdd62d8cd2-horizon-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 10:53:16 crc kubenswrapper[4838]: I1128 10:53:16.819171 4838 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/580cb0e9-6caf-4f93-986e-a0cdd62d8cd2-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 10:53:16 crc kubenswrapper[4838]: I1128 10:53:16.819183 4838 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/580cb0e9-6caf-4f93-986e-a0cdd62d8cd2-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 10:53:16 crc kubenswrapper[4838]: I1128 10:53:16.819193 4838 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/580cb0e9-6caf-4f93-986e-a0cdd62d8cd2-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 10:53:17 crc kubenswrapper[4838]: I1128 10:53:17.049524 4838 scope.go:117] "RemoveContainer" containerID="07b13478bd4bc4909ab58488a40ca59a947862e788cdd515fce2962940d87199" Nov 28 10:53:17 crc kubenswrapper[4838]: I1128 10:53:17.072621 4838 scope.go:117] "RemoveContainer" containerID="c46d2f5a288bc031deb50b4c4507cac1d9c85391a267a560fda8b5a38f93334f" Nov 28 10:53:17 crc kubenswrapper[4838]: E1128 10:53:17.073191 4838 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c46d2f5a288bc031deb50b4c4507cac1d9c85391a267a560fda8b5a38f93334f\": container with ID starting with c46d2f5a288bc031deb50b4c4507cac1d9c85391a267a560fda8b5a38f93334f not found: ID does not exist" containerID="c46d2f5a288bc031deb50b4c4507cac1d9c85391a267a560fda8b5a38f93334f" Nov 28 10:53:17 crc kubenswrapper[4838]: I1128 10:53:17.073245 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c46d2f5a288bc031deb50b4c4507cac1d9c85391a267a560fda8b5a38f93334f"} err="failed to get container status \"c46d2f5a288bc031deb50b4c4507cac1d9c85391a267a560fda8b5a38f93334f\": rpc error: code = NotFound desc = could not find container \"c46d2f5a288bc031deb50b4c4507cac1d9c85391a267a560fda8b5a38f93334f\": container with ID starting with c46d2f5a288bc031deb50b4c4507cac1d9c85391a267a560fda8b5a38f93334f not found: ID does not exist" Nov 28 10:53:17 crc kubenswrapper[4838]: I1128 10:53:17.073277 4838 scope.go:117] "RemoveContainer" containerID="07b13478bd4bc4909ab58488a40ca59a947862e788cdd515fce2962940d87199" Nov 28 10:53:17 crc kubenswrapper[4838]: E1128 10:53:17.073704 4838 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"07b13478bd4bc4909ab58488a40ca59a947862e788cdd515fce2962940d87199\": container with ID starting with 07b13478bd4bc4909ab58488a40ca59a947862e788cdd515fce2962940d87199 not found: ID does not exist" containerID="07b13478bd4bc4909ab58488a40ca59a947862e788cdd515fce2962940d87199" Nov 28 10:53:17 crc kubenswrapper[4838]: I1128 10:53:17.073780 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"07b13478bd4bc4909ab58488a40ca59a947862e788cdd515fce2962940d87199"} err="failed to get container status \"07b13478bd4bc4909ab58488a40ca59a947862e788cdd515fce2962940d87199\": rpc error: code = NotFound desc = could not find container \"07b13478bd4bc4909ab58488a40ca59a947862e788cdd515fce2962940d87199\": container with ID starting with 07b13478bd4bc4909ab58488a40ca59a947862e788cdd515fce2962940d87199 not found: ID does not exist" Nov 28 10:53:17 crc kubenswrapper[4838]: I1128 10:53:17.269246 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-565ff4b848-b45gx"] Nov 28 10:53:17 crc kubenswrapper[4838]: I1128 10:53:17.277957 4838 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-565ff4b848-b45gx"] Nov 28 10:53:17 crc kubenswrapper[4838]: I1128 10:53:17.799263 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"afc12091-3d32-4b69-8e6c-29d521764b7c","Type":"ContainerStarted","Data":"c5701d49060a65f5fadb9e55eb5c6cd522516352138851d0b09d73423241c725"} Nov 28 10:53:17 crc kubenswrapper[4838]: I1128 10:53:17.801093 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 28 10:53:17 crc kubenswrapper[4838]: I1128 10:53:17.831622 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.377222532 podStartE2EDuration="6.831597687s" podCreationTimestamp="2025-11-28 10:53:11 +0000 UTC" firstStartedPulling="2025-11-28 10:53:12.644624073 +0000 UTC m=+3364.343598243" lastFinishedPulling="2025-11-28 10:53:17.098999228 +0000 UTC m=+3368.797973398" observedRunningTime="2025-11-28 10:53:17.822265692 +0000 UTC m=+3369.521239862" watchObservedRunningTime="2025-11-28 10:53:17.831597687 +0000 UTC m=+3369.530571897" Nov 28 10:53:18 crc kubenswrapper[4838]: I1128 10:53:18.579530 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="580cb0e9-6caf-4f93-986e-a0cdd62d8cd2" path="/var/lib/kubelet/pods/580cb0e9-6caf-4f93-986e-a0cdd62d8cd2/volumes" Nov 28 10:53:19 crc kubenswrapper[4838]: I1128 10:53:19.596804 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/manila-scheduler-0" Nov 28 10:53:19 crc kubenswrapper[4838]: I1128 10:53:19.665920 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/manila-scheduler-0"] Nov 28 10:53:19 crc kubenswrapper[4838]: I1128 10:53:19.768580 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/manila-share-share1-0" Nov 28 10:53:19 crc kubenswrapper[4838]: I1128 10:53:19.817316 4838 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/manila-scheduler-0" podUID="ab6b3078-9a91-4592-b0f5-f279b773646a" containerName="manila-scheduler" containerID="cri-o://3a1d7adf84a359bf3b0eff48483654dd2a6526292140a1755c8155d2ed4c7c6a" gracePeriod=30 Nov 28 10:53:19 crc kubenswrapper[4838]: I1128 10:53:19.817430 4838 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/manila-scheduler-0" podUID="ab6b3078-9a91-4592-b0f5-f279b773646a" containerName="probe" containerID="cri-o://02660319b311fd3fd97999dced12d9e4d3638b623efded2148a24a9dbfb649dc" gracePeriod=30 Nov 28 10:53:19 crc kubenswrapper[4838]: I1128 10:53:19.859344 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/manila-share-share1-0"] Nov 28 10:53:19 crc kubenswrapper[4838]: I1128 10:53:19.859840 4838 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/manila-share-share1-0" podUID="d645d530-c8e1-48c5-9c93-2bf0ef691514" containerName="manila-share" containerID="cri-o://1d975085ad4f3ec99a878f8fa1ec1022a1135e14c310b0c83c68d6a9c422fbc8" gracePeriod=30 Nov 28 10:53:19 crc kubenswrapper[4838]: I1128 10:53:19.860078 4838 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/manila-share-share1-0" podUID="d645d530-c8e1-48c5-9c93-2bf0ef691514" containerName="probe" containerID="cri-o://67a7b3d6ebe1d6e5474e23ad22300ec84f8a144b1f94fa71bc684024a71ad351" gracePeriod=30 Nov 28 10:53:20 crc kubenswrapper[4838]: I1128 10:53:20.831763 4838 generic.go:334] "Generic (PLEG): container finished" podID="d645d530-c8e1-48c5-9c93-2bf0ef691514" containerID="67a7b3d6ebe1d6e5474e23ad22300ec84f8a144b1f94fa71bc684024a71ad351" exitCode=0 Nov 28 10:53:20 crc kubenswrapper[4838]: I1128 10:53:20.832148 4838 generic.go:334] "Generic (PLEG): container finished" podID="d645d530-c8e1-48c5-9c93-2bf0ef691514" containerID="1d975085ad4f3ec99a878f8fa1ec1022a1135e14c310b0c83c68d6a9c422fbc8" exitCode=1 Nov 28 10:53:20 crc kubenswrapper[4838]: I1128 10:53:20.832207 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-share-share1-0" event={"ID":"d645d530-c8e1-48c5-9c93-2bf0ef691514","Type":"ContainerDied","Data":"67a7b3d6ebe1d6e5474e23ad22300ec84f8a144b1f94fa71bc684024a71ad351"} Nov 28 10:53:20 crc kubenswrapper[4838]: I1128 10:53:20.832244 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-share-share1-0" event={"ID":"d645d530-c8e1-48c5-9c93-2bf0ef691514","Type":"ContainerDied","Data":"1d975085ad4f3ec99a878f8fa1ec1022a1135e14c310b0c83c68d6a9c422fbc8"} Nov 28 10:53:20 crc kubenswrapper[4838]: I1128 10:53:20.832261 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-share-share1-0" event={"ID":"d645d530-c8e1-48c5-9c93-2bf0ef691514","Type":"ContainerDied","Data":"6d4e0f5b068987f1f9927e46485c9e21712f9ac86d5d26022915224512abd53a"} Nov 28 10:53:20 crc kubenswrapper[4838]: I1128 10:53:20.832276 4838 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6d4e0f5b068987f1f9927e46485c9e21712f9ac86d5d26022915224512abd53a" Nov 28 10:53:20 crc kubenswrapper[4838]: I1128 10:53:20.838847 4838 generic.go:334] "Generic (PLEG): container finished" podID="ab6b3078-9a91-4592-b0f5-f279b773646a" containerID="02660319b311fd3fd97999dced12d9e4d3638b623efded2148a24a9dbfb649dc" exitCode=0 Nov 28 10:53:20 crc kubenswrapper[4838]: I1128 10:53:20.838933 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-scheduler-0" event={"ID":"ab6b3078-9a91-4592-b0f5-f279b773646a","Type":"ContainerDied","Data":"02660319b311fd3fd97999dced12d9e4d3638b623efded2148a24a9dbfb649dc"} Nov 28 10:53:20 crc kubenswrapper[4838]: I1128 10:53:20.890348 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/manila-share-share1-0" Nov 28 10:53:21 crc kubenswrapper[4838]: I1128 10:53:21.001852 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d645d530-c8e1-48c5-9c93-2bf0ef691514-config-data\") pod \"d645d530-c8e1-48c5-9c93-2bf0ef691514\" (UID: \"d645d530-c8e1-48c5-9c93-2bf0ef691514\") " Nov 28 10:53:21 crc kubenswrapper[4838]: I1128 10:53:21.001936 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/d645d530-c8e1-48c5-9c93-2bf0ef691514-ceph\") pod \"d645d530-c8e1-48c5-9c93-2bf0ef691514\" (UID: \"d645d530-c8e1-48c5-9c93-2bf0ef691514\") " Nov 28 10:53:21 crc kubenswrapper[4838]: I1128 10:53:21.002022 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d645d530-c8e1-48c5-9c93-2bf0ef691514-config-data-custom\") pod \"d645d530-c8e1-48c5-9c93-2bf0ef691514\" (UID: \"d645d530-c8e1-48c5-9c93-2bf0ef691514\") " Nov 28 10:53:21 crc kubenswrapper[4838]: I1128 10:53:21.002043 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lib-manila\" (UniqueName: \"kubernetes.io/host-path/d645d530-c8e1-48c5-9c93-2bf0ef691514-var-lib-manila\") pod \"d645d530-c8e1-48c5-9c93-2bf0ef691514\" (UID: \"d645d530-c8e1-48c5-9c93-2bf0ef691514\") " Nov 28 10:53:21 crc kubenswrapper[4838]: I1128 10:53:21.002092 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/d645d530-c8e1-48c5-9c93-2bf0ef691514-etc-machine-id\") pod \"d645d530-c8e1-48c5-9c93-2bf0ef691514\" (UID: \"d645d530-c8e1-48c5-9c93-2bf0ef691514\") " Nov 28 10:53:21 crc kubenswrapper[4838]: I1128 10:53:21.002125 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d645d530-c8e1-48c5-9c93-2bf0ef691514-scripts\") pod \"d645d530-c8e1-48c5-9c93-2bf0ef691514\" (UID: \"d645d530-c8e1-48c5-9c93-2bf0ef691514\") " Nov 28 10:53:21 crc kubenswrapper[4838]: I1128 10:53:21.002210 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-thzpj\" (UniqueName: \"kubernetes.io/projected/d645d530-c8e1-48c5-9c93-2bf0ef691514-kube-api-access-thzpj\") pod \"d645d530-c8e1-48c5-9c93-2bf0ef691514\" (UID: \"d645d530-c8e1-48c5-9c93-2bf0ef691514\") " Nov 28 10:53:21 crc kubenswrapper[4838]: I1128 10:53:21.002319 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d645d530-c8e1-48c5-9c93-2bf0ef691514-combined-ca-bundle\") pod \"d645d530-c8e1-48c5-9c93-2bf0ef691514\" (UID: \"d645d530-c8e1-48c5-9c93-2bf0ef691514\") " Nov 28 10:53:21 crc kubenswrapper[4838]: I1128 10:53:21.002681 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/d645d530-c8e1-48c5-9c93-2bf0ef691514-var-lib-manila" (OuterVolumeSpecName: "var-lib-manila") pod "d645d530-c8e1-48c5-9c93-2bf0ef691514" (UID: "d645d530-c8e1-48c5-9c93-2bf0ef691514"). InnerVolumeSpecName "var-lib-manila". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 10:53:21 crc kubenswrapper[4838]: I1128 10:53:21.004005 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/d645d530-c8e1-48c5-9c93-2bf0ef691514-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "d645d530-c8e1-48c5-9c93-2bf0ef691514" (UID: "d645d530-c8e1-48c5-9c93-2bf0ef691514"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 10:53:21 crc kubenswrapper[4838]: I1128 10:53:21.008483 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d645d530-c8e1-48c5-9c93-2bf0ef691514-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "d645d530-c8e1-48c5-9c93-2bf0ef691514" (UID: "d645d530-c8e1-48c5-9c93-2bf0ef691514"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:53:21 crc kubenswrapper[4838]: I1128 10:53:21.010664 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d645d530-c8e1-48c5-9c93-2bf0ef691514-kube-api-access-thzpj" (OuterVolumeSpecName: "kube-api-access-thzpj") pod "d645d530-c8e1-48c5-9c93-2bf0ef691514" (UID: "d645d530-c8e1-48c5-9c93-2bf0ef691514"). InnerVolumeSpecName "kube-api-access-thzpj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:53:21 crc kubenswrapper[4838]: I1128 10:53:21.012286 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d645d530-c8e1-48c5-9c93-2bf0ef691514-ceph" (OuterVolumeSpecName: "ceph") pod "d645d530-c8e1-48c5-9c93-2bf0ef691514" (UID: "d645d530-c8e1-48c5-9c93-2bf0ef691514"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:53:21 crc kubenswrapper[4838]: I1128 10:53:21.012467 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d645d530-c8e1-48c5-9c93-2bf0ef691514-scripts" (OuterVolumeSpecName: "scripts") pod "d645d530-c8e1-48c5-9c93-2bf0ef691514" (UID: "d645d530-c8e1-48c5-9c93-2bf0ef691514"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:53:21 crc kubenswrapper[4838]: I1128 10:53:21.074928 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d645d530-c8e1-48c5-9c93-2bf0ef691514-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d645d530-c8e1-48c5-9c93-2bf0ef691514" (UID: "d645d530-c8e1-48c5-9c93-2bf0ef691514"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:53:21 crc kubenswrapper[4838]: I1128 10:53:21.104212 4838 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/d645d530-c8e1-48c5-9c93-2bf0ef691514-ceph\") on node \"crc\" DevicePath \"\"" Nov 28 10:53:21 crc kubenswrapper[4838]: I1128 10:53:21.104246 4838 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d645d530-c8e1-48c5-9c93-2bf0ef691514-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 28 10:53:21 crc kubenswrapper[4838]: I1128 10:53:21.104261 4838 reconciler_common.go:293] "Volume detached for volume \"var-lib-manila\" (UniqueName: \"kubernetes.io/host-path/d645d530-c8e1-48c5-9c93-2bf0ef691514-var-lib-manila\") on node \"crc\" DevicePath \"\"" Nov 28 10:53:21 crc kubenswrapper[4838]: I1128 10:53:21.104272 4838 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/d645d530-c8e1-48c5-9c93-2bf0ef691514-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 28 10:53:21 crc kubenswrapper[4838]: I1128 10:53:21.104284 4838 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d645d530-c8e1-48c5-9c93-2bf0ef691514-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 10:53:21 crc kubenswrapper[4838]: I1128 10:53:21.104296 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-thzpj\" (UniqueName: \"kubernetes.io/projected/d645d530-c8e1-48c5-9c93-2bf0ef691514-kube-api-access-thzpj\") on node \"crc\" DevicePath \"\"" Nov 28 10:53:21 crc kubenswrapper[4838]: I1128 10:53:21.104308 4838 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d645d530-c8e1-48c5-9c93-2bf0ef691514-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 10:53:21 crc kubenswrapper[4838]: I1128 10:53:21.104402 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d645d530-c8e1-48c5-9c93-2bf0ef691514-config-data" (OuterVolumeSpecName: "config-data") pod "d645d530-c8e1-48c5-9c93-2bf0ef691514" (UID: "d645d530-c8e1-48c5-9c93-2bf0ef691514"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:53:21 crc kubenswrapper[4838]: I1128 10:53:21.206381 4838 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d645d530-c8e1-48c5-9c93-2bf0ef691514-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 10:53:21 crc kubenswrapper[4838]: I1128 10:53:21.849480 4838 generic.go:334] "Generic (PLEG): container finished" podID="ab6b3078-9a91-4592-b0f5-f279b773646a" containerID="3a1d7adf84a359bf3b0eff48483654dd2a6526292140a1755c8155d2ed4c7c6a" exitCode=0 Nov 28 10:53:21 crc kubenswrapper[4838]: I1128 10:53:21.849545 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-scheduler-0" event={"ID":"ab6b3078-9a91-4592-b0f5-f279b773646a","Type":"ContainerDied","Data":"3a1d7adf84a359bf3b0eff48483654dd2a6526292140a1755c8155d2ed4c7c6a"} Nov 28 10:53:21 crc kubenswrapper[4838]: I1128 10:53:21.849580 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/manila-share-share1-0" Nov 28 10:53:21 crc kubenswrapper[4838]: I1128 10:53:21.930579 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/manila-share-share1-0"] Nov 28 10:53:21 crc kubenswrapper[4838]: I1128 10:53:21.947658 4838 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/manila-share-share1-0"] Nov 28 10:53:21 crc kubenswrapper[4838]: I1128 10:53:21.965662 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/manila-share-share1-0"] Nov 28 10:53:21 crc kubenswrapper[4838]: E1128 10:53:21.966297 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="580cb0e9-6caf-4f93-986e-a0cdd62d8cd2" containerName="horizon-log" Nov 28 10:53:21 crc kubenswrapper[4838]: I1128 10:53:21.966323 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="580cb0e9-6caf-4f93-986e-a0cdd62d8cd2" containerName="horizon-log" Nov 28 10:53:21 crc kubenswrapper[4838]: E1128 10:53:21.966352 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="580cb0e9-6caf-4f93-986e-a0cdd62d8cd2" containerName="horizon" Nov 28 10:53:21 crc kubenswrapper[4838]: I1128 10:53:21.966361 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="580cb0e9-6caf-4f93-986e-a0cdd62d8cd2" containerName="horizon" Nov 28 10:53:21 crc kubenswrapper[4838]: E1128 10:53:21.966388 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d645d530-c8e1-48c5-9c93-2bf0ef691514" containerName="manila-share" Nov 28 10:53:21 crc kubenswrapper[4838]: I1128 10:53:21.966397 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="d645d530-c8e1-48c5-9c93-2bf0ef691514" containerName="manila-share" Nov 28 10:53:21 crc kubenswrapper[4838]: E1128 10:53:21.966425 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d645d530-c8e1-48c5-9c93-2bf0ef691514" containerName="probe" Nov 28 10:53:21 crc kubenswrapper[4838]: I1128 10:53:21.966433 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="d645d530-c8e1-48c5-9c93-2bf0ef691514" containerName="probe" Nov 28 10:53:21 crc kubenswrapper[4838]: I1128 10:53:21.966673 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="580cb0e9-6caf-4f93-986e-a0cdd62d8cd2" containerName="horizon-log" Nov 28 10:53:21 crc kubenswrapper[4838]: I1128 10:53:21.966700 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="580cb0e9-6caf-4f93-986e-a0cdd62d8cd2" containerName="horizon" Nov 28 10:53:21 crc kubenswrapper[4838]: I1128 10:53:21.966732 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="d645d530-c8e1-48c5-9c93-2bf0ef691514" containerName="probe" Nov 28 10:53:21 crc kubenswrapper[4838]: I1128 10:53:21.966745 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="d645d530-c8e1-48c5-9c93-2bf0ef691514" containerName="manila-share" Nov 28 10:53:21 crc kubenswrapper[4838]: I1128 10:53:21.967930 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-share-share1-0" Nov 28 10:53:21 crc kubenswrapper[4838]: I1128 10:53:21.980447 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"manila-share-share1-config-data" Nov 28 10:53:21 crc kubenswrapper[4838]: I1128 10:53:21.984357 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-share-share1-0"] Nov 28 10:53:22 crc kubenswrapper[4838]: I1128 10:53:22.052860 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jcbt8\" (UniqueName: \"kubernetes.io/projected/4697ec9a-896b-4703-87c0-84a7741b8724-kube-api-access-jcbt8\") pod \"manila-share-share1-0\" (UID: \"4697ec9a-896b-4703-87c0-84a7741b8724\") " pod="openstack/manila-share-share1-0" Nov 28 10:53:22 crc kubenswrapper[4838]: I1128 10:53:22.052924 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4697ec9a-896b-4703-87c0-84a7741b8724-combined-ca-bundle\") pod \"manila-share-share1-0\" (UID: \"4697ec9a-896b-4703-87c0-84a7741b8724\") " pod="openstack/manila-share-share1-0" Nov 28 10:53:22 crc kubenswrapper[4838]: I1128 10:53:22.052955 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/4697ec9a-896b-4703-87c0-84a7741b8724-ceph\") pod \"manila-share-share1-0\" (UID: \"4697ec9a-896b-4703-87c0-84a7741b8724\") " pod="openstack/manila-share-share1-0" Nov 28 10:53:22 crc kubenswrapper[4838]: I1128 10:53:22.053082 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4697ec9a-896b-4703-87c0-84a7741b8724-scripts\") pod \"manila-share-share1-0\" (UID: \"4697ec9a-896b-4703-87c0-84a7741b8724\") " pod="openstack/manila-share-share1-0" Nov 28 10:53:22 crc kubenswrapper[4838]: I1128 10:53:22.053128 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/4697ec9a-896b-4703-87c0-84a7741b8724-etc-machine-id\") pod \"manila-share-share1-0\" (UID: \"4697ec9a-896b-4703-87c0-84a7741b8724\") " pod="openstack/manila-share-share1-0" Nov 28 10:53:22 crc kubenswrapper[4838]: I1128 10:53:22.053161 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4697ec9a-896b-4703-87c0-84a7741b8724-config-data\") pod \"manila-share-share1-0\" (UID: \"4697ec9a-896b-4703-87c0-84a7741b8724\") " pod="openstack/manila-share-share1-0" Nov 28 10:53:22 crc kubenswrapper[4838]: I1128 10:53:22.053193 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-manila\" (UniqueName: \"kubernetes.io/host-path/4697ec9a-896b-4703-87c0-84a7741b8724-var-lib-manila\") pod \"manila-share-share1-0\" (UID: \"4697ec9a-896b-4703-87c0-84a7741b8724\") " pod="openstack/manila-share-share1-0" Nov 28 10:53:22 crc kubenswrapper[4838]: I1128 10:53:22.053225 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/4697ec9a-896b-4703-87c0-84a7741b8724-config-data-custom\") pod \"manila-share-share1-0\" (UID: \"4697ec9a-896b-4703-87c0-84a7741b8724\") " pod="openstack/manila-share-share1-0" Nov 28 10:53:22 crc kubenswrapper[4838]: I1128 10:53:22.155098 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jcbt8\" (UniqueName: \"kubernetes.io/projected/4697ec9a-896b-4703-87c0-84a7741b8724-kube-api-access-jcbt8\") pod \"manila-share-share1-0\" (UID: \"4697ec9a-896b-4703-87c0-84a7741b8724\") " pod="openstack/manila-share-share1-0" Nov 28 10:53:22 crc kubenswrapper[4838]: I1128 10:53:22.155168 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4697ec9a-896b-4703-87c0-84a7741b8724-combined-ca-bundle\") pod \"manila-share-share1-0\" (UID: \"4697ec9a-896b-4703-87c0-84a7741b8724\") " pod="openstack/manila-share-share1-0" Nov 28 10:53:22 crc kubenswrapper[4838]: I1128 10:53:22.155197 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/4697ec9a-896b-4703-87c0-84a7741b8724-ceph\") pod \"manila-share-share1-0\" (UID: \"4697ec9a-896b-4703-87c0-84a7741b8724\") " pod="openstack/manila-share-share1-0" Nov 28 10:53:22 crc kubenswrapper[4838]: I1128 10:53:22.155311 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4697ec9a-896b-4703-87c0-84a7741b8724-scripts\") pod \"manila-share-share1-0\" (UID: \"4697ec9a-896b-4703-87c0-84a7741b8724\") " pod="openstack/manila-share-share1-0" Nov 28 10:53:22 crc kubenswrapper[4838]: I1128 10:53:22.155367 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/4697ec9a-896b-4703-87c0-84a7741b8724-etc-machine-id\") pod \"manila-share-share1-0\" (UID: \"4697ec9a-896b-4703-87c0-84a7741b8724\") " pod="openstack/manila-share-share1-0" Nov 28 10:53:22 crc kubenswrapper[4838]: I1128 10:53:22.155411 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4697ec9a-896b-4703-87c0-84a7741b8724-config-data\") pod \"manila-share-share1-0\" (UID: \"4697ec9a-896b-4703-87c0-84a7741b8724\") " pod="openstack/manila-share-share1-0" Nov 28 10:53:22 crc kubenswrapper[4838]: I1128 10:53:22.155442 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-manila\" (UniqueName: \"kubernetes.io/host-path/4697ec9a-896b-4703-87c0-84a7741b8724-var-lib-manila\") pod \"manila-share-share1-0\" (UID: \"4697ec9a-896b-4703-87c0-84a7741b8724\") " pod="openstack/manila-share-share1-0" Nov 28 10:53:22 crc kubenswrapper[4838]: I1128 10:53:22.155477 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/4697ec9a-896b-4703-87c0-84a7741b8724-config-data-custom\") pod \"manila-share-share1-0\" (UID: \"4697ec9a-896b-4703-87c0-84a7741b8724\") " pod="openstack/manila-share-share1-0" Nov 28 10:53:22 crc kubenswrapper[4838]: I1128 10:53:22.156804 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/4697ec9a-896b-4703-87c0-84a7741b8724-etc-machine-id\") pod \"manila-share-share1-0\" (UID: \"4697ec9a-896b-4703-87c0-84a7741b8724\") " pod="openstack/manila-share-share1-0" Nov 28 10:53:22 crc kubenswrapper[4838]: I1128 10:53:22.156863 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-manila\" (UniqueName: \"kubernetes.io/host-path/4697ec9a-896b-4703-87c0-84a7741b8724-var-lib-manila\") pod \"manila-share-share1-0\" (UID: \"4697ec9a-896b-4703-87c0-84a7741b8724\") " pod="openstack/manila-share-share1-0" Nov 28 10:53:22 crc kubenswrapper[4838]: I1128 10:53:22.160762 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4697ec9a-896b-4703-87c0-84a7741b8724-scripts\") pod \"manila-share-share1-0\" (UID: \"4697ec9a-896b-4703-87c0-84a7741b8724\") " pod="openstack/manila-share-share1-0" Nov 28 10:53:22 crc kubenswrapper[4838]: I1128 10:53:22.160900 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/4697ec9a-896b-4703-87c0-84a7741b8724-config-data-custom\") pod \"manila-share-share1-0\" (UID: \"4697ec9a-896b-4703-87c0-84a7741b8724\") " pod="openstack/manila-share-share1-0" Nov 28 10:53:22 crc kubenswrapper[4838]: I1128 10:53:22.161695 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4697ec9a-896b-4703-87c0-84a7741b8724-config-data\") pod \"manila-share-share1-0\" (UID: \"4697ec9a-896b-4703-87c0-84a7741b8724\") " pod="openstack/manila-share-share1-0" Nov 28 10:53:22 crc kubenswrapper[4838]: I1128 10:53:22.166081 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4697ec9a-896b-4703-87c0-84a7741b8724-combined-ca-bundle\") pod \"manila-share-share1-0\" (UID: \"4697ec9a-896b-4703-87c0-84a7741b8724\") " pod="openstack/manila-share-share1-0" Nov 28 10:53:22 crc kubenswrapper[4838]: I1128 10:53:22.174418 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/4697ec9a-896b-4703-87c0-84a7741b8724-ceph\") pod \"manila-share-share1-0\" (UID: \"4697ec9a-896b-4703-87c0-84a7741b8724\") " pod="openstack/manila-share-share1-0" Nov 28 10:53:22 crc kubenswrapper[4838]: I1128 10:53:22.177641 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jcbt8\" (UniqueName: \"kubernetes.io/projected/4697ec9a-896b-4703-87c0-84a7741b8724-kube-api-access-jcbt8\") pod \"manila-share-share1-0\" (UID: \"4697ec9a-896b-4703-87c0-84a7741b8724\") " pod="openstack/manila-share-share1-0" Nov 28 10:53:22 crc kubenswrapper[4838]: I1128 10:53:22.269652 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/manila-scheduler-0" Nov 28 10:53:22 crc kubenswrapper[4838]: I1128 10:53:22.302862 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-share-share1-0" Nov 28 10:53:22 crc kubenswrapper[4838]: I1128 10:53:22.361351 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/ab6b3078-9a91-4592-b0f5-f279b773646a-etc-machine-id\") pod \"ab6b3078-9a91-4592-b0f5-f279b773646a\" (UID: \"ab6b3078-9a91-4592-b0f5-f279b773646a\") " Nov 28 10:53:22 crc kubenswrapper[4838]: I1128 10:53:22.361476 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ab6b3078-9a91-4592-b0f5-f279b773646a-scripts\") pod \"ab6b3078-9a91-4592-b0f5-f279b773646a\" (UID: \"ab6b3078-9a91-4592-b0f5-f279b773646a\") " Nov 28 10:53:22 crc kubenswrapper[4838]: I1128 10:53:22.361516 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ab6b3078-9a91-4592-b0f5-f279b773646a-config-data-custom\") pod \"ab6b3078-9a91-4592-b0f5-f279b773646a\" (UID: \"ab6b3078-9a91-4592-b0f5-f279b773646a\") " Nov 28 10:53:22 crc kubenswrapper[4838]: I1128 10:53:22.361617 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ab6b3078-9a91-4592-b0f5-f279b773646a-config-data\") pod \"ab6b3078-9a91-4592-b0f5-f279b773646a\" (UID: \"ab6b3078-9a91-4592-b0f5-f279b773646a\") " Nov 28 10:53:22 crc kubenswrapper[4838]: I1128 10:53:22.361675 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tckk2\" (UniqueName: \"kubernetes.io/projected/ab6b3078-9a91-4592-b0f5-f279b773646a-kube-api-access-tckk2\") pod \"ab6b3078-9a91-4592-b0f5-f279b773646a\" (UID: \"ab6b3078-9a91-4592-b0f5-f279b773646a\") " Nov 28 10:53:22 crc kubenswrapper[4838]: I1128 10:53:22.361694 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ab6b3078-9a91-4592-b0f5-f279b773646a-combined-ca-bundle\") pod \"ab6b3078-9a91-4592-b0f5-f279b773646a\" (UID: \"ab6b3078-9a91-4592-b0f5-f279b773646a\") " Nov 28 10:53:22 crc kubenswrapper[4838]: I1128 10:53:22.362585 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/ab6b3078-9a91-4592-b0f5-f279b773646a-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "ab6b3078-9a91-4592-b0f5-f279b773646a" (UID: "ab6b3078-9a91-4592-b0f5-f279b773646a"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 10:53:22 crc kubenswrapper[4838]: I1128 10:53:22.366899 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ab6b3078-9a91-4592-b0f5-f279b773646a-scripts" (OuterVolumeSpecName: "scripts") pod "ab6b3078-9a91-4592-b0f5-f279b773646a" (UID: "ab6b3078-9a91-4592-b0f5-f279b773646a"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:53:22 crc kubenswrapper[4838]: I1128 10:53:22.366993 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ab6b3078-9a91-4592-b0f5-f279b773646a-kube-api-access-tckk2" (OuterVolumeSpecName: "kube-api-access-tckk2") pod "ab6b3078-9a91-4592-b0f5-f279b773646a" (UID: "ab6b3078-9a91-4592-b0f5-f279b773646a"). InnerVolumeSpecName "kube-api-access-tckk2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:53:22 crc kubenswrapper[4838]: I1128 10:53:22.369859 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ab6b3078-9a91-4592-b0f5-f279b773646a-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "ab6b3078-9a91-4592-b0f5-f279b773646a" (UID: "ab6b3078-9a91-4592-b0f5-f279b773646a"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:53:22 crc kubenswrapper[4838]: I1128 10:53:22.423070 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ab6b3078-9a91-4592-b0f5-f279b773646a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ab6b3078-9a91-4592-b0f5-f279b773646a" (UID: "ab6b3078-9a91-4592-b0f5-f279b773646a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:53:22 crc kubenswrapper[4838]: I1128 10:53:22.469956 4838 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ab6b3078-9a91-4592-b0f5-f279b773646a-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 10:53:22 crc kubenswrapper[4838]: I1128 10:53:22.470014 4838 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ab6b3078-9a91-4592-b0f5-f279b773646a-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 28 10:53:22 crc kubenswrapper[4838]: I1128 10:53:22.470029 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tckk2\" (UniqueName: \"kubernetes.io/projected/ab6b3078-9a91-4592-b0f5-f279b773646a-kube-api-access-tckk2\") on node \"crc\" DevicePath \"\"" Nov 28 10:53:22 crc kubenswrapper[4838]: I1128 10:53:22.470061 4838 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ab6b3078-9a91-4592-b0f5-f279b773646a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 10:53:22 crc kubenswrapper[4838]: I1128 10:53:22.470072 4838 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/ab6b3078-9a91-4592-b0f5-f279b773646a-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 28 10:53:22 crc kubenswrapper[4838]: I1128 10:53:22.519151 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ab6b3078-9a91-4592-b0f5-f279b773646a-config-data" (OuterVolumeSpecName: "config-data") pod "ab6b3078-9a91-4592-b0f5-f279b773646a" (UID: "ab6b3078-9a91-4592-b0f5-f279b773646a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 10:53:22 crc kubenswrapper[4838]: I1128 10:53:22.571733 4838 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ab6b3078-9a91-4592-b0f5-f279b773646a-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 10:53:22 crc kubenswrapper[4838]: I1128 10:53:22.572895 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d645d530-c8e1-48c5-9c93-2bf0ef691514" path="/var/lib/kubelet/pods/d645d530-c8e1-48c5-9c93-2bf0ef691514/volumes" Nov 28 10:53:23 crc kubenswrapper[4838]: I1128 10:53:22.858121 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-scheduler-0" event={"ID":"ab6b3078-9a91-4592-b0f5-f279b773646a","Type":"ContainerDied","Data":"1e1d00299522f53136b6ea5f9f4161406d07342f0f96e5c3c2177a49f2871d30"} Nov 28 10:53:23 crc kubenswrapper[4838]: I1128 10:53:22.858314 4838 scope.go:117] "RemoveContainer" containerID="02660319b311fd3fd97999dced12d9e4d3638b623efded2148a24a9dbfb649dc" Nov 28 10:53:23 crc kubenswrapper[4838]: I1128 10:53:22.858424 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/manila-scheduler-0" Nov 28 10:53:23 crc kubenswrapper[4838]: I1128 10:53:22.859071 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-share-share1-0"] Nov 28 10:53:23 crc kubenswrapper[4838]: I1128 10:53:22.889366 4838 scope.go:117] "RemoveContainer" containerID="3a1d7adf84a359bf3b0eff48483654dd2a6526292140a1755c8155d2ed4c7c6a" Nov 28 10:53:23 crc kubenswrapper[4838]: I1128 10:53:22.907217 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/manila-scheduler-0"] Nov 28 10:53:23 crc kubenswrapper[4838]: I1128 10:53:22.992821 4838 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/manila-scheduler-0"] Nov 28 10:53:23 crc kubenswrapper[4838]: I1128 10:53:23.007808 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/manila-scheduler-0"] Nov 28 10:53:23 crc kubenswrapper[4838]: E1128 10:53:23.008459 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ab6b3078-9a91-4592-b0f5-f279b773646a" containerName="manila-scheduler" Nov 28 10:53:23 crc kubenswrapper[4838]: I1128 10:53:23.008473 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="ab6b3078-9a91-4592-b0f5-f279b773646a" containerName="manila-scheduler" Nov 28 10:53:23 crc kubenswrapper[4838]: E1128 10:53:23.008525 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ab6b3078-9a91-4592-b0f5-f279b773646a" containerName="probe" Nov 28 10:53:23 crc kubenswrapper[4838]: I1128 10:53:23.008531 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="ab6b3078-9a91-4592-b0f5-f279b773646a" containerName="probe" Nov 28 10:53:23 crc kubenswrapper[4838]: I1128 10:53:23.008907 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="ab6b3078-9a91-4592-b0f5-f279b773646a" containerName="manila-scheduler" Nov 28 10:53:23 crc kubenswrapper[4838]: I1128 10:53:23.008940 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="ab6b3078-9a91-4592-b0f5-f279b773646a" containerName="probe" Nov 28 10:53:23 crc kubenswrapper[4838]: I1128 10:53:23.011633 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-scheduler-0" Nov 28 10:53:23 crc kubenswrapper[4838]: I1128 10:53:23.019203 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"manila-scheduler-config-data" Nov 28 10:53:23 crc kubenswrapper[4838]: I1128 10:53:23.022317 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-scheduler-0"] Nov 28 10:53:23 crc kubenswrapper[4838]: I1128 10:53:23.083054 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9b49fbfc-a8f5-48aa-bb7b-96d82967eecb-scripts\") pod \"manila-scheduler-0\" (UID: \"9b49fbfc-a8f5-48aa-bb7b-96d82967eecb\") " pod="openstack/manila-scheduler-0" Nov 28 10:53:23 crc kubenswrapper[4838]: I1128 10:53:23.083164 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/9b49fbfc-a8f5-48aa-bb7b-96d82967eecb-config-data-custom\") pod \"manila-scheduler-0\" (UID: \"9b49fbfc-a8f5-48aa-bb7b-96d82967eecb\") " pod="openstack/manila-scheduler-0" Nov 28 10:53:23 crc kubenswrapper[4838]: I1128 10:53:23.083190 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9b49fbfc-a8f5-48aa-bb7b-96d82967eecb-combined-ca-bundle\") pod \"manila-scheduler-0\" (UID: \"9b49fbfc-a8f5-48aa-bb7b-96d82967eecb\") " pod="openstack/manila-scheduler-0" Nov 28 10:53:23 crc kubenswrapper[4838]: I1128 10:53:23.083310 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9b49fbfc-a8f5-48aa-bb7b-96d82967eecb-config-data\") pod \"manila-scheduler-0\" (UID: \"9b49fbfc-a8f5-48aa-bb7b-96d82967eecb\") " pod="openstack/manila-scheduler-0" Nov 28 10:53:23 crc kubenswrapper[4838]: I1128 10:53:23.083364 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/9b49fbfc-a8f5-48aa-bb7b-96d82967eecb-etc-machine-id\") pod \"manila-scheduler-0\" (UID: \"9b49fbfc-a8f5-48aa-bb7b-96d82967eecb\") " pod="openstack/manila-scheduler-0" Nov 28 10:53:23 crc kubenswrapper[4838]: I1128 10:53:23.083394 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sxmr7\" (UniqueName: \"kubernetes.io/projected/9b49fbfc-a8f5-48aa-bb7b-96d82967eecb-kube-api-access-sxmr7\") pod \"manila-scheduler-0\" (UID: \"9b49fbfc-a8f5-48aa-bb7b-96d82967eecb\") " pod="openstack/manila-scheduler-0" Nov 28 10:53:23 crc kubenswrapper[4838]: I1128 10:53:23.184445 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9b49fbfc-a8f5-48aa-bb7b-96d82967eecb-config-data\") pod \"manila-scheduler-0\" (UID: \"9b49fbfc-a8f5-48aa-bb7b-96d82967eecb\") " pod="openstack/manila-scheduler-0" Nov 28 10:53:23 crc kubenswrapper[4838]: I1128 10:53:23.184528 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/9b49fbfc-a8f5-48aa-bb7b-96d82967eecb-etc-machine-id\") pod \"manila-scheduler-0\" (UID: \"9b49fbfc-a8f5-48aa-bb7b-96d82967eecb\") " pod="openstack/manila-scheduler-0" Nov 28 10:53:23 crc kubenswrapper[4838]: I1128 10:53:23.184564 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sxmr7\" (UniqueName: \"kubernetes.io/projected/9b49fbfc-a8f5-48aa-bb7b-96d82967eecb-kube-api-access-sxmr7\") pod \"manila-scheduler-0\" (UID: \"9b49fbfc-a8f5-48aa-bb7b-96d82967eecb\") " pod="openstack/manila-scheduler-0" Nov 28 10:53:23 crc kubenswrapper[4838]: I1128 10:53:23.184610 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9b49fbfc-a8f5-48aa-bb7b-96d82967eecb-scripts\") pod \"manila-scheduler-0\" (UID: \"9b49fbfc-a8f5-48aa-bb7b-96d82967eecb\") " pod="openstack/manila-scheduler-0" Nov 28 10:53:23 crc kubenswrapper[4838]: I1128 10:53:23.184644 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/9b49fbfc-a8f5-48aa-bb7b-96d82967eecb-etc-machine-id\") pod \"manila-scheduler-0\" (UID: \"9b49fbfc-a8f5-48aa-bb7b-96d82967eecb\") " pod="openstack/manila-scheduler-0" Nov 28 10:53:23 crc kubenswrapper[4838]: I1128 10:53:23.184670 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/9b49fbfc-a8f5-48aa-bb7b-96d82967eecb-config-data-custom\") pod \"manila-scheduler-0\" (UID: \"9b49fbfc-a8f5-48aa-bb7b-96d82967eecb\") " pod="openstack/manila-scheduler-0" Nov 28 10:53:23 crc kubenswrapper[4838]: I1128 10:53:23.184686 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9b49fbfc-a8f5-48aa-bb7b-96d82967eecb-combined-ca-bundle\") pod \"manila-scheduler-0\" (UID: \"9b49fbfc-a8f5-48aa-bb7b-96d82967eecb\") " pod="openstack/manila-scheduler-0" Nov 28 10:53:23 crc kubenswrapper[4838]: I1128 10:53:23.187811 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9b49fbfc-a8f5-48aa-bb7b-96d82967eecb-scripts\") pod \"manila-scheduler-0\" (UID: \"9b49fbfc-a8f5-48aa-bb7b-96d82967eecb\") " pod="openstack/manila-scheduler-0" Nov 28 10:53:23 crc kubenswrapper[4838]: I1128 10:53:23.189841 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9b49fbfc-a8f5-48aa-bb7b-96d82967eecb-config-data\") pod \"manila-scheduler-0\" (UID: \"9b49fbfc-a8f5-48aa-bb7b-96d82967eecb\") " pod="openstack/manila-scheduler-0" Nov 28 10:53:23 crc kubenswrapper[4838]: I1128 10:53:23.190286 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/9b49fbfc-a8f5-48aa-bb7b-96d82967eecb-config-data-custom\") pod \"manila-scheduler-0\" (UID: \"9b49fbfc-a8f5-48aa-bb7b-96d82967eecb\") " pod="openstack/manila-scheduler-0" Nov 28 10:53:23 crc kubenswrapper[4838]: I1128 10:53:23.190700 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9b49fbfc-a8f5-48aa-bb7b-96d82967eecb-combined-ca-bundle\") pod \"manila-scheduler-0\" (UID: \"9b49fbfc-a8f5-48aa-bb7b-96d82967eecb\") " pod="openstack/manila-scheduler-0" Nov 28 10:53:23 crc kubenswrapper[4838]: I1128 10:53:23.210127 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sxmr7\" (UniqueName: \"kubernetes.io/projected/9b49fbfc-a8f5-48aa-bb7b-96d82967eecb-kube-api-access-sxmr7\") pod \"manila-scheduler-0\" (UID: \"9b49fbfc-a8f5-48aa-bb7b-96d82967eecb\") " pod="openstack/manila-scheduler-0" Nov 28 10:53:23 crc kubenswrapper[4838]: I1128 10:53:23.337286 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-scheduler-0" Nov 28 10:53:23 crc kubenswrapper[4838]: I1128 10:53:23.562194 4838 scope.go:117] "RemoveContainer" containerID="3d80b1b6cf83170cb103f16c70c6a8b3db90d429f22463d02230864c009beb4b" Nov 28 10:53:23 crc kubenswrapper[4838]: E1128 10:53:23.562613 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-5dxdd_openshift-machine-config-operator(5c3daa53-8c4e-4e30-aeba-146602dd45cd)\"" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" Nov 28 10:53:23 crc kubenswrapper[4838]: I1128 10:53:23.888878 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-share-share1-0" event={"ID":"4697ec9a-896b-4703-87c0-84a7741b8724","Type":"ContainerStarted","Data":"ec3605b4fb72da69f618ca40617721d8098f70dc82fa26e52c7b52e738494807"} Nov 28 10:53:23 crc kubenswrapper[4838]: I1128 10:53:23.889256 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-share-share1-0" event={"ID":"4697ec9a-896b-4703-87c0-84a7741b8724","Type":"ContainerStarted","Data":"88107e8e3728fee47d37005e5c3b0f7e6c30ef4971505f681328988ece16f28b"} Nov 28 10:53:24 crc kubenswrapper[4838]: I1128 10:53:24.266540 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-scheduler-0"] Nov 28 10:53:24 crc kubenswrapper[4838]: W1128 10:53:24.267792 4838 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9b49fbfc_a8f5_48aa_bb7b_96d82967eecb.slice/crio-719cc4c0957da1040b0bc0e86788467f8f04e4f3c77d89a74eb481e3a1c8daa7 WatchSource:0}: Error finding container 719cc4c0957da1040b0bc0e86788467f8f04e4f3c77d89a74eb481e3a1c8daa7: Status 404 returned error can't find the container with id 719cc4c0957da1040b0bc0e86788467f8f04e4f3c77d89a74eb481e3a1c8daa7 Nov 28 10:53:24 crc kubenswrapper[4838]: I1128 10:53:24.540057 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/manila-api-0" Nov 28 10:53:24 crc kubenswrapper[4838]: I1128 10:53:24.590188 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ab6b3078-9a91-4592-b0f5-f279b773646a" path="/var/lib/kubelet/pods/ab6b3078-9a91-4592-b0f5-f279b773646a/volumes" Nov 28 10:53:24 crc kubenswrapper[4838]: I1128 10:53:24.911690 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-scheduler-0" event={"ID":"9b49fbfc-a8f5-48aa-bb7b-96d82967eecb","Type":"ContainerStarted","Data":"767af8831331eaf8da389f097547e5075e4708e6058a666ca0192631c8a523e5"} Nov 28 10:53:24 crc kubenswrapper[4838]: I1128 10:53:24.912096 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-scheduler-0" event={"ID":"9b49fbfc-a8f5-48aa-bb7b-96d82967eecb","Type":"ContainerStarted","Data":"719cc4c0957da1040b0bc0e86788467f8f04e4f3c77d89a74eb481e3a1c8daa7"} Nov 28 10:53:24 crc kubenswrapper[4838]: I1128 10:53:24.916493 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-share-share1-0" event={"ID":"4697ec9a-896b-4703-87c0-84a7741b8724","Type":"ContainerStarted","Data":"4f8107ec9f3118b2bf07d00b30c2a625df833556c0065af38c9e3cb288d249e0"} Nov 28 10:53:24 crc kubenswrapper[4838]: I1128 10:53:24.943916 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/manila-share-share1-0" podStartSLOduration=3.943893741 podStartE2EDuration="3.943893741s" podCreationTimestamp="2025-11-28 10:53:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 10:53:24.938579566 +0000 UTC m=+3376.637553736" watchObservedRunningTime="2025-11-28 10:53:24.943893741 +0000 UTC m=+3376.642867911" Nov 28 10:53:25 crc kubenswrapper[4838]: I1128 10:53:25.928270 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-scheduler-0" event={"ID":"9b49fbfc-a8f5-48aa-bb7b-96d82967eecb","Type":"ContainerStarted","Data":"90bb8818af852bd898d865ac612fd8ef6b8ecd5dc386c72421e89d7ae371acc1"} Nov 28 10:53:25 crc kubenswrapper[4838]: I1128 10:53:25.955660 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/manila-scheduler-0" podStartSLOduration=3.955631674 podStartE2EDuration="3.955631674s" podCreationTimestamp="2025-11-28 10:53:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 10:53:25.951683476 +0000 UTC m=+3377.650657686" watchObservedRunningTime="2025-11-28 10:53:25.955631674 +0000 UTC m=+3377.654605874" Nov 28 10:53:32 crc kubenswrapper[4838]: I1128 10:53:32.303815 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/manila-share-share1-0" Nov 28 10:53:33 crc kubenswrapper[4838]: I1128 10:53:33.337635 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/manila-scheduler-0" Nov 28 10:53:38 crc kubenswrapper[4838]: I1128 10:53:38.567786 4838 scope.go:117] "RemoveContainer" containerID="3d80b1b6cf83170cb103f16c70c6a8b3db90d429f22463d02230864c009beb4b" Nov 28 10:53:38 crc kubenswrapper[4838]: E1128 10:53:38.568670 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-5dxdd_openshift-machine-config-operator(5c3daa53-8c4e-4e30-aeba-146602dd45cd)\"" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" Nov 28 10:53:42 crc kubenswrapper[4838]: I1128 10:53:42.184747 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Nov 28 10:53:43 crc kubenswrapper[4838]: I1128 10:53:43.864031 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/manila-share-share1-0" Nov 28 10:53:44 crc kubenswrapper[4838]: I1128 10:53:44.791275 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/manila-scheduler-0" Nov 28 10:53:49 crc kubenswrapper[4838]: I1128 10:53:49.563908 4838 scope.go:117] "RemoveContainer" containerID="3d80b1b6cf83170cb103f16c70c6a8b3db90d429f22463d02230864c009beb4b" Nov 28 10:53:49 crc kubenswrapper[4838]: E1128 10:53:49.565172 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-5dxdd_openshift-machine-config-operator(5c3daa53-8c4e-4e30-aeba-146602dd45cd)\"" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" Nov 28 10:54:04 crc kubenswrapper[4838]: I1128 10:54:04.568161 4838 scope.go:117] "RemoveContainer" containerID="3d80b1b6cf83170cb103f16c70c6a8b3db90d429f22463d02230864c009beb4b" Nov 28 10:54:04 crc kubenswrapper[4838]: E1128 10:54:04.569445 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-5dxdd_openshift-machine-config-operator(5c3daa53-8c4e-4e30-aeba-146602dd45cd)\"" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" Nov 28 10:54:19 crc kubenswrapper[4838]: I1128 10:54:19.563557 4838 scope.go:117] "RemoveContainer" containerID="3d80b1b6cf83170cb103f16c70c6a8b3db90d429f22463d02230864c009beb4b" Nov 28 10:54:19 crc kubenswrapper[4838]: E1128 10:54:19.564873 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-5dxdd_openshift-machine-config-operator(5c3daa53-8c4e-4e30-aeba-146602dd45cd)\"" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" Nov 28 10:54:34 crc kubenswrapper[4838]: I1128 10:54:34.562588 4838 scope.go:117] "RemoveContainer" containerID="3d80b1b6cf83170cb103f16c70c6a8b3db90d429f22463d02230864c009beb4b" Nov 28 10:54:34 crc kubenswrapper[4838]: E1128 10:54:34.563806 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-5dxdd_openshift-machine-config-operator(5c3daa53-8c4e-4e30-aeba-146602dd45cd)\"" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" Nov 28 10:54:34 crc kubenswrapper[4838]: I1128 10:54:34.805901 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/tempest-tests-tempest"] Nov 28 10:54:34 crc kubenswrapper[4838]: I1128 10:54:34.807127 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest" Nov 28 10:54:34 crc kubenswrapper[4838]: I1128 10:54:34.811628 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"tempest-tests-tempest-env-vars-s0" Nov 28 10:54:34 crc kubenswrapper[4838]: I1128 10:54:34.812147 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"test-operator-controller-priv-key" Nov 28 10:54:34 crc kubenswrapper[4838]: I1128 10:54:34.812954 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"tempest-tests-tempest-custom-data-s0" Nov 28 10:54:34 crc kubenswrapper[4838]: I1128 10:54:34.817226 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"default-dockercfg-m9l7v" Nov 28 10:54:34 crc kubenswrapper[4838]: I1128 10:54:34.825934 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/tempest-tests-tempest"] Nov 28 10:54:34 crc kubenswrapper[4838]: I1128 10:54:34.910329 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"tempest-tests-tempest\" (UID: \"7899bfa9-2025-457b-9c46-194188b7f52e\") " pod="openstack/tempest-tests-tempest" Nov 28 10:54:34 crc kubenswrapper[4838]: I1128 10:54:34.910542 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/7899bfa9-2025-457b-9c46-194188b7f52e-openstack-config\") pod \"tempest-tests-tempest\" (UID: \"7899bfa9-2025-457b-9c46-194188b7f52e\") " pod="openstack/tempest-tests-tempest" Nov 28 10:54:34 crc kubenswrapper[4838]: I1128 10:54:34.910600 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7899bfa9-2025-457b-9c46-194188b7f52e-ssh-key\") pod \"tempest-tests-tempest\" (UID: \"7899bfa9-2025-457b-9c46-194188b7f52e\") " pod="openstack/tempest-tests-tempest" Nov 28 10:54:34 crc kubenswrapper[4838]: I1128 10:54:34.910638 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/7899bfa9-2025-457b-9c46-194188b7f52e-test-operator-ephemeral-temporary\") pod \"tempest-tests-tempest\" (UID: \"7899bfa9-2025-457b-9c46-194188b7f52e\") " pod="openstack/tempest-tests-tempest" Nov 28 10:54:34 crc kubenswrapper[4838]: I1128 10:54:34.910681 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/7899bfa9-2025-457b-9c46-194188b7f52e-ca-certs\") pod \"tempest-tests-tempest\" (UID: \"7899bfa9-2025-457b-9c46-194188b7f52e\") " pod="openstack/tempest-tests-tempest" Nov 28 10:54:34 crc kubenswrapper[4838]: I1128 10:54:34.911011 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/7899bfa9-2025-457b-9c46-194188b7f52e-openstack-config-secret\") pod \"tempest-tests-tempest\" (UID: \"7899bfa9-2025-457b-9c46-194188b7f52e\") " pod="openstack/tempest-tests-tempest" Nov 28 10:54:34 crc kubenswrapper[4838]: I1128 10:54:34.911188 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/7899bfa9-2025-457b-9c46-194188b7f52e-config-data\") pod \"tempest-tests-tempest\" (UID: \"7899bfa9-2025-457b-9c46-194188b7f52e\") " pod="openstack/tempest-tests-tempest" Nov 28 10:54:34 crc kubenswrapper[4838]: I1128 10:54:34.911306 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7x6r6\" (UniqueName: \"kubernetes.io/projected/7899bfa9-2025-457b-9c46-194188b7f52e-kube-api-access-7x6r6\") pod \"tempest-tests-tempest\" (UID: \"7899bfa9-2025-457b-9c46-194188b7f52e\") " pod="openstack/tempest-tests-tempest" Nov 28 10:54:34 crc kubenswrapper[4838]: I1128 10:54:34.911480 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/7899bfa9-2025-457b-9c46-194188b7f52e-test-operator-ephemeral-workdir\") pod \"tempest-tests-tempest\" (UID: \"7899bfa9-2025-457b-9c46-194188b7f52e\") " pod="openstack/tempest-tests-tempest" Nov 28 10:54:35 crc kubenswrapper[4838]: I1128 10:54:35.014659 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/7899bfa9-2025-457b-9c46-194188b7f52e-openstack-config\") pod \"tempest-tests-tempest\" (UID: \"7899bfa9-2025-457b-9c46-194188b7f52e\") " pod="openstack/tempest-tests-tempest" Nov 28 10:54:35 crc kubenswrapper[4838]: I1128 10:54:35.014787 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7899bfa9-2025-457b-9c46-194188b7f52e-ssh-key\") pod \"tempest-tests-tempest\" (UID: \"7899bfa9-2025-457b-9c46-194188b7f52e\") " pod="openstack/tempest-tests-tempest" Nov 28 10:54:35 crc kubenswrapper[4838]: I1128 10:54:35.014841 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/7899bfa9-2025-457b-9c46-194188b7f52e-test-operator-ephemeral-temporary\") pod \"tempest-tests-tempest\" (UID: \"7899bfa9-2025-457b-9c46-194188b7f52e\") " pod="openstack/tempest-tests-tempest" Nov 28 10:54:35 crc kubenswrapper[4838]: I1128 10:54:35.014903 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/7899bfa9-2025-457b-9c46-194188b7f52e-ca-certs\") pod \"tempest-tests-tempest\" (UID: \"7899bfa9-2025-457b-9c46-194188b7f52e\") " pod="openstack/tempest-tests-tempest" Nov 28 10:54:35 crc kubenswrapper[4838]: I1128 10:54:35.014964 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/7899bfa9-2025-457b-9c46-194188b7f52e-openstack-config-secret\") pod \"tempest-tests-tempest\" (UID: \"7899bfa9-2025-457b-9c46-194188b7f52e\") " pod="openstack/tempest-tests-tempest" Nov 28 10:54:35 crc kubenswrapper[4838]: I1128 10:54:35.015026 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/7899bfa9-2025-457b-9c46-194188b7f52e-config-data\") pod \"tempest-tests-tempest\" (UID: \"7899bfa9-2025-457b-9c46-194188b7f52e\") " pod="openstack/tempest-tests-tempest" Nov 28 10:54:35 crc kubenswrapper[4838]: I1128 10:54:35.015095 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7x6r6\" (UniqueName: \"kubernetes.io/projected/7899bfa9-2025-457b-9c46-194188b7f52e-kube-api-access-7x6r6\") pod \"tempest-tests-tempest\" (UID: \"7899bfa9-2025-457b-9c46-194188b7f52e\") " pod="openstack/tempest-tests-tempest" Nov 28 10:54:35 crc kubenswrapper[4838]: I1128 10:54:35.015147 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/7899bfa9-2025-457b-9c46-194188b7f52e-test-operator-ephemeral-workdir\") pod \"tempest-tests-tempest\" (UID: \"7899bfa9-2025-457b-9c46-194188b7f52e\") " pod="openstack/tempest-tests-tempest" Nov 28 10:54:35 crc kubenswrapper[4838]: I1128 10:54:35.015223 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"tempest-tests-tempest\" (UID: \"7899bfa9-2025-457b-9c46-194188b7f52e\") " pod="openstack/tempest-tests-tempest" Nov 28 10:54:35 crc kubenswrapper[4838]: I1128 10:54:35.015761 4838 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"tempest-tests-tempest\" (UID: \"7899bfa9-2025-457b-9c46-194188b7f52e\") device mount path \"/mnt/openstack/pv10\"" pod="openstack/tempest-tests-tempest" Nov 28 10:54:35 crc kubenswrapper[4838]: I1128 10:54:35.015794 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/7899bfa9-2025-457b-9c46-194188b7f52e-test-operator-ephemeral-temporary\") pod \"tempest-tests-tempest\" (UID: \"7899bfa9-2025-457b-9c46-194188b7f52e\") " pod="openstack/tempest-tests-tempest" Nov 28 10:54:35 crc kubenswrapper[4838]: I1128 10:54:35.016658 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/7899bfa9-2025-457b-9c46-194188b7f52e-openstack-config\") pod \"tempest-tests-tempest\" (UID: \"7899bfa9-2025-457b-9c46-194188b7f52e\") " pod="openstack/tempest-tests-tempest" Nov 28 10:54:35 crc kubenswrapper[4838]: I1128 10:54:35.017832 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/7899bfa9-2025-457b-9c46-194188b7f52e-config-data\") pod \"tempest-tests-tempest\" (UID: \"7899bfa9-2025-457b-9c46-194188b7f52e\") " pod="openstack/tempest-tests-tempest" Nov 28 10:54:35 crc kubenswrapper[4838]: I1128 10:54:35.018072 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/7899bfa9-2025-457b-9c46-194188b7f52e-test-operator-ephemeral-workdir\") pod \"tempest-tests-tempest\" (UID: \"7899bfa9-2025-457b-9c46-194188b7f52e\") " pod="openstack/tempest-tests-tempest" Nov 28 10:54:35 crc kubenswrapper[4838]: I1128 10:54:35.028702 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/7899bfa9-2025-457b-9c46-194188b7f52e-ca-certs\") pod \"tempest-tests-tempest\" (UID: \"7899bfa9-2025-457b-9c46-194188b7f52e\") " pod="openstack/tempest-tests-tempest" Nov 28 10:54:35 crc kubenswrapper[4838]: I1128 10:54:35.029174 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7899bfa9-2025-457b-9c46-194188b7f52e-ssh-key\") pod \"tempest-tests-tempest\" (UID: \"7899bfa9-2025-457b-9c46-194188b7f52e\") " pod="openstack/tempest-tests-tempest" Nov 28 10:54:35 crc kubenswrapper[4838]: I1128 10:54:35.029538 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/7899bfa9-2025-457b-9c46-194188b7f52e-openstack-config-secret\") pod \"tempest-tests-tempest\" (UID: \"7899bfa9-2025-457b-9c46-194188b7f52e\") " pod="openstack/tempest-tests-tempest" Nov 28 10:54:35 crc kubenswrapper[4838]: I1128 10:54:35.037149 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7x6r6\" (UniqueName: \"kubernetes.io/projected/7899bfa9-2025-457b-9c46-194188b7f52e-kube-api-access-7x6r6\") pod \"tempest-tests-tempest\" (UID: \"7899bfa9-2025-457b-9c46-194188b7f52e\") " pod="openstack/tempest-tests-tempest" Nov 28 10:54:35 crc kubenswrapper[4838]: I1128 10:54:35.060084 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"tempest-tests-tempest\" (UID: \"7899bfa9-2025-457b-9c46-194188b7f52e\") " pod="openstack/tempest-tests-tempest" Nov 28 10:54:35 crc kubenswrapper[4838]: I1128 10:54:35.134809 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest" Nov 28 10:54:35 crc kubenswrapper[4838]: I1128 10:54:35.646045 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/tempest-tests-tempest"] Nov 28 10:54:35 crc kubenswrapper[4838]: W1128 10:54:35.649159 4838 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7899bfa9_2025_457b_9c46_194188b7f52e.slice/crio-334abfa476ed6e421b059067199813c4f98c204b04a440eb1bd18af94eb4b8b1 WatchSource:0}: Error finding container 334abfa476ed6e421b059067199813c4f98c204b04a440eb1bd18af94eb4b8b1: Status 404 returned error can't find the container with id 334abfa476ed6e421b059067199813c4f98c204b04a440eb1bd18af94eb4b8b1 Nov 28 10:54:35 crc kubenswrapper[4838]: I1128 10:54:35.691004 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest" event={"ID":"7899bfa9-2025-457b-9c46-194188b7f52e","Type":"ContainerStarted","Data":"334abfa476ed6e421b059067199813c4f98c204b04a440eb1bd18af94eb4b8b1"} Nov 28 10:54:47 crc kubenswrapper[4838]: I1128 10:54:47.562701 4838 scope.go:117] "RemoveContainer" containerID="3d80b1b6cf83170cb103f16c70c6a8b3db90d429f22463d02230864c009beb4b" Nov 28 10:54:47 crc kubenswrapper[4838]: E1128 10:54:47.563951 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-5dxdd_openshift-machine-config-operator(5c3daa53-8c4e-4e30-aeba-146602dd45cd)\"" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" Nov 28 10:55:00 crc kubenswrapper[4838]: I1128 10:55:00.562058 4838 scope.go:117] "RemoveContainer" containerID="3d80b1b6cf83170cb103f16c70c6a8b3db90d429f22463d02230864c009beb4b" Nov 28 10:55:08 crc kubenswrapper[4838]: E1128 10:55:08.396688 4838 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-tempest-all:current-podified" Nov 28 10:55:08 crc kubenswrapper[4838]: E1128 10:55:08.397679 4838 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:tempest-tests-tempest-tests-runner,Image:quay.io/podified-antelope-centos9/openstack-tempest-all:current-podified,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:test-operator-ephemeral-workdir,ReadOnly:false,MountPath:/var/lib/tempest,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:test-operator-ephemeral-temporary,ReadOnly:false,MountPath:/tmp,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/etc/test_operator,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:test-operator-logs,ReadOnly:false,MountPath:/var/lib/tempest/external_files,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:openstack-config,ReadOnly:true,MountPath:/etc/openstack/clouds.yaml,SubPath:clouds.yaml,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:openstack-config,ReadOnly:true,MountPath:/var/lib/tempest/.config/openstack/clouds.yaml,SubPath:clouds.yaml,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:openstack-config-secret,ReadOnly:false,MountPath:/etc/openstack/secure.yaml,SubPath:secure.yaml,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ca-certs,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ssh-key,ReadOnly:false,MountPath:/var/lib/tempest/id_ecdsa,SubPath:ssh_key,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-7x6r6,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42480,RunAsNonRoot:*false,ReadOnlyRootFilesystem:*false,AllowPrivilegeEscalation:*true,RunAsGroup:*42480,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{EnvFromSource{Prefix:,ConfigMapRef:&ConfigMapEnvSource{LocalObjectReference:LocalObjectReference{Name:tempest-tests-tempest-custom-data-s0,},Optional:nil,},SecretRef:nil,},EnvFromSource{Prefix:,ConfigMapRef:&ConfigMapEnvSource{LocalObjectReference:LocalObjectReference{Name:tempest-tests-tempest-env-vars-s0,},Optional:nil,},SecretRef:nil,},},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod tempest-tests-tempest_openstack(7899bfa9-2025-457b-9c46-194188b7f52e): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 28 10:55:08 crc kubenswrapper[4838]: E1128 10:55:08.399812 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"tempest-tests-tempest-tests-runner\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/tempest-tests-tempest" podUID="7899bfa9-2025-457b-9c46-194188b7f52e" Nov 28 10:55:09 crc kubenswrapper[4838]: I1128 10:55:09.052999 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" event={"ID":"5c3daa53-8c4e-4e30-aeba-146602dd45cd","Type":"ContainerStarted","Data":"336a1410d5b7b6661dd9229c5864e9b8a009c3cf2d23e82987c1cf12f8cec0b5"} Nov 28 10:55:09 crc kubenswrapper[4838]: E1128 10:55:09.055778 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"tempest-tests-tempest-tests-runner\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-tempest-all:current-podified\\\"\"" pod="openstack/tempest-tests-tempest" podUID="7899bfa9-2025-457b-9c46-194188b7f52e" Nov 28 10:55:21 crc kubenswrapper[4838]: I1128 10:55:21.306157 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"tempest-tests-tempest-env-vars-s0" Nov 28 10:55:23 crc kubenswrapper[4838]: I1128 10:55:23.275568 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest" event={"ID":"7899bfa9-2025-457b-9c46-194188b7f52e","Type":"ContainerStarted","Data":"905d3dc0edf57d99d810fe900ffea0cfdb08431f7351b793cd5f48f1f515c4ca"} Nov 28 10:55:23 crc kubenswrapper[4838]: I1128 10:55:23.313024 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/tempest-tests-tempest" podStartSLOduration=4.6622702799999995 podStartE2EDuration="50.313000739s" podCreationTimestamp="2025-11-28 10:54:33 +0000 UTC" firstStartedPulling="2025-11-28 10:54:35.652063174 +0000 UTC m=+3447.351037384" lastFinishedPulling="2025-11-28 10:55:21.302793643 +0000 UTC m=+3493.001767843" observedRunningTime="2025-11-28 10:55:23.306636477 +0000 UTC m=+3495.005610687" watchObservedRunningTime="2025-11-28 10:55:23.313000739 +0000 UTC m=+3495.011974929" Nov 28 10:55:44 crc kubenswrapper[4838]: I1128 10:55:44.584996 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-b7cq8"] Nov 28 10:55:44 crc kubenswrapper[4838]: I1128 10:55:44.587922 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-b7cq8" Nov 28 10:55:44 crc kubenswrapper[4838]: I1128 10:55:44.600869 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-b7cq8"] Nov 28 10:55:44 crc kubenswrapper[4838]: I1128 10:55:44.697323 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/97de5484-cf46-42f5-9b58-2cac1c0f2fd1-utilities\") pod \"redhat-marketplace-b7cq8\" (UID: \"97de5484-cf46-42f5-9b58-2cac1c0f2fd1\") " pod="openshift-marketplace/redhat-marketplace-b7cq8" Nov 28 10:55:44 crc kubenswrapper[4838]: I1128 10:55:44.697508 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/97de5484-cf46-42f5-9b58-2cac1c0f2fd1-catalog-content\") pod \"redhat-marketplace-b7cq8\" (UID: \"97de5484-cf46-42f5-9b58-2cac1c0f2fd1\") " pod="openshift-marketplace/redhat-marketplace-b7cq8" Nov 28 10:55:44 crc kubenswrapper[4838]: I1128 10:55:44.697647 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qbkhs\" (UniqueName: \"kubernetes.io/projected/97de5484-cf46-42f5-9b58-2cac1c0f2fd1-kube-api-access-qbkhs\") pod \"redhat-marketplace-b7cq8\" (UID: \"97de5484-cf46-42f5-9b58-2cac1c0f2fd1\") " pod="openshift-marketplace/redhat-marketplace-b7cq8" Nov 28 10:55:44 crc kubenswrapper[4838]: I1128 10:55:44.800193 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qbkhs\" (UniqueName: \"kubernetes.io/projected/97de5484-cf46-42f5-9b58-2cac1c0f2fd1-kube-api-access-qbkhs\") pod \"redhat-marketplace-b7cq8\" (UID: \"97de5484-cf46-42f5-9b58-2cac1c0f2fd1\") " pod="openshift-marketplace/redhat-marketplace-b7cq8" Nov 28 10:55:44 crc kubenswrapper[4838]: I1128 10:55:44.800413 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/97de5484-cf46-42f5-9b58-2cac1c0f2fd1-utilities\") pod \"redhat-marketplace-b7cq8\" (UID: \"97de5484-cf46-42f5-9b58-2cac1c0f2fd1\") " pod="openshift-marketplace/redhat-marketplace-b7cq8" Nov 28 10:55:44 crc kubenswrapper[4838]: I1128 10:55:44.800600 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/97de5484-cf46-42f5-9b58-2cac1c0f2fd1-catalog-content\") pod \"redhat-marketplace-b7cq8\" (UID: \"97de5484-cf46-42f5-9b58-2cac1c0f2fd1\") " pod="openshift-marketplace/redhat-marketplace-b7cq8" Nov 28 10:55:44 crc kubenswrapper[4838]: I1128 10:55:44.800861 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/97de5484-cf46-42f5-9b58-2cac1c0f2fd1-utilities\") pod \"redhat-marketplace-b7cq8\" (UID: \"97de5484-cf46-42f5-9b58-2cac1c0f2fd1\") " pod="openshift-marketplace/redhat-marketplace-b7cq8" Nov 28 10:55:44 crc kubenswrapper[4838]: I1128 10:55:44.801470 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/97de5484-cf46-42f5-9b58-2cac1c0f2fd1-catalog-content\") pod \"redhat-marketplace-b7cq8\" (UID: \"97de5484-cf46-42f5-9b58-2cac1c0f2fd1\") " pod="openshift-marketplace/redhat-marketplace-b7cq8" Nov 28 10:55:44 crc kubenswrapper[4838]: I1128 10:55:44.819412 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qbkhs\" (UniqueName: \"kubernetes.io/projected/97de5484-cf46-42f5-9b58-2cac1c0f2fd1-kube-api-access-qbkhs\") pod \"redhat-marketplace-b7cq8\" (UID: \"97de5484-cf46-42f5-9b58-2cac1c0f2fd1\") " pod="openshift-marketplace/redhat-marketplace-b7cq8" Nov 28 10:55:44 crc kubenswrapper[4838]: I1128 10:55:44.928027 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-b7cq8" Nov 28 10:55:45 crc kubenswrapper[4838]: I1128 10:55:45.414538 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-b7cq8"] Nov 28 10:55:45 crc kubenswrapper[4838]: I1128 10:55:45.541857 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-b7cq8" event={"ID":"97de5484-cf46-42f5-9b58-2cac1c0f2fd1","Type":"ContainerStarted","Data":"8aa46d2b7d16bba0a3efac54b21b85a953eb0ca8f172b7c1a17eec3677986968"} Nov 28 10:55:46 crc kubenswrapper[4838]: I1128 10:55:46.556655 4838 generic.go:334] "Generic (PLEG): container finished" podID="97de5484-cf46-42f5-9b58-2cac1c0f2fd1" containerID="9e279a8cd6cd7d9b533560a6b97c251a1faf00058c7a7bdb132eeabc4fefa784" exitCode=0 Nov 28 10:55:46 crc kubenswrapper[4838]: I1128 10:55:46.556780 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-b7cq8" event={"ID":"97de5484-cf46-42f5-9b58-2cac1c0f2fd1","Type":"ContainerDied","Data":"9e279a8cd6cd7d9b533560a6b97c251a1faf00058c7a7bdb132eeabc4fefa784"} Nov 28 10:55:48 crc kubenswrapper[4838]: I1128 10:55:48.594816 4838 generic.go:334] "Generic (PLEG): container finished" podID="97de5484-cf46-42f5-9b58-2cac1c0f2fd1" containerID="91c478b2f7aefc9dcd61cdc7e901dede516afe6de49a44d971e70a42b9d82ea9" exitCode=0 Nov 28 10:55:48 crc kubenswrapper[4838]: I1128 10:55:48.595641 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-b7cq8" event={"ID":"97de5484-cf46-42f5-9b58-2cac1c0f2fd1","Type":"ContainerDied","Data":"91c478b2f7aefc9dcd61cdc7e901dede516afe6de49a44d971e70a42b9d82ea9"} Nov 28 10:55:49 crc kubenswrapper[4838]: I1128 10:55:49.605967 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-b7cq8" event={"ID":"97de5484-cf46-42f5-9b58-2cac1c0f2fd1","Type":"ContainerStarted","Data":"3c65baec0083bcbd5d6c0d53f8d9ead9e151d49774d526afc10e2e3724d6e5cf"} Nov 28 10:55:49 crc kubenswrapper[4838]: I1128 10:55:49.632143 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-b7cq8" podStartSLOduration=3.123775216 podStartE2EDuration="5.632120147s" podCreationTimestamp="2025-11-28 10:55:44 +0000 UTC" firstStartedPulling="2025-11-28 10:55:46.559637054 +0000 UTC m=+3518.258611234" lastFinishedPulling="2025-11-28 10:55:49.067981985 +0000 UTC m=+3520.766956165" observedRunningTime="2025-11-28 10:55:49.627850791 +0000 UTC m=+3521.326824981" watchObservedRunningTime="2025-11-28 10:55:49.632120147 +0000 UTC m=+3521.331094317" Nov 28 10:55:54 crc kubenswrapper[4838]: I1128 10:55:54.928292 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-b7cq8" Nov 28 10:55:54 crc kubenswrapper[4838]: I1128 10:55:54.928870 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-b7cq8" Nov 28 10:55:54 crc kubenswrapper[4838]: I1128 10:55:54.980977 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-b7cq8" Nov 28 10:55:55 crc kubenswrapper[4838]: I1128 10:55:55.718310 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-b7cq8" Nov 28 10:55:55 crc kubenswrapper[4838]: I1128 10:55:55.774337 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-b7cq8"] Nov 28 10:55:57 crc kubenswrapper[4838]: I1128 10:55:57.688401 4838 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-b7cq8" podUID="97de5484-cf46-42f5-9b58-2cac1c0f2fd1" containerName="registry-server" containerID="cri-o://3c65baec0083bcbd5d6c0d53f8d9ead9e151d49774d526afc10e2e3724d6e5cf" gracePeriod=2 Nov 28 10:55:58 crc kubenswrapper[4838]: I1128 10:55:58.189757 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-b7cq8" Nov 28 10:55:58 crc kubenswrapper[4838]: I1128 10:55:58.343055 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qbkhs\" (UniqueName: \"kubernetes.io/projected/97de5484-cf46-42f5-9b58-2cac1c0f2fd1-kube-api-access-qbkhs\") pod \"97de5484-cf46-42f5-9b58-2cac1c0f2fd1\" (UID: \"97de5484-cf46-42f5-9b58-2cac1c0f2fd1\") " Nov 28 10:55:58 crc kubenswrapper[4838]: I1128 10:55:58.343202 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/97de5484-cf46-42f5-9b58-2cac1c0f2fd1-catalog-content\") pod \"97de5484-cf46-42f5-9b58-2cac1c0f2fd1\" (UID: \"97de5484-cf46-42f5-9b58-2cac1c0f2fd1\") " Nov 28 10:55:58 crc kubenswrapper[4838]: I1128 10:55:58.343368 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/97de5484-cf46-42f5-9b58-2cac1c0f2fd1-utilities\") pod \"97de5484-cf46-42f5-9b58-2cac1c0f2fd1\" (UID: \"97de5484-cf46-42f5-9b58-2cac1c0f2fd1\") " Nov 28 10:55:58 crc kubenswrapper[4838]: I1128 10:55:58.344625 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/97de5484-cf46-42f5-9b58-2cac1c0f2fd1-utilities" (OuterVolumeSpecName: "utilities") pod "97de5484-cf46-42f5-9b58-2cac1c0f2fd1" (UID: "97de5484-cf46-42f5-9b58-2cac1c0f2fd1"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 10:55:58 crc kubenswrapper[4838]: I1128 10:55:58.349559 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/97de5484-cf46-42f5-9b58-2cac1c0f2fd1-kube-api-access-qbkhs" (OuterVolumeSpecName: "kube-api-access-qbkhs") pod "97de5484-cf46-42f5-9b58-2cac1c0f2fd1" (UID: "97de5484-cf46-42f5-9b58-2cac1c0f2fd1"). InnerVolumeSpecName "kube-api-access-qbkhs". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:55:58 crc kubenswrapper[4838]: I1128 10:55:58.366129 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/97de5484-cf46-42f5-9b58-2cac1c0f2fd1-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "97de5484-cf46-42f5-9b58-2cac1c0f2fd1" (UID: "97de5484-cf46-42f5-9b58-2cac1c0f2fd1"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 10:55:58 crc kubenswrapper[4838]: I1128 10:55:58.451941 4838 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/97de5484-cf46-42f5-9b58-2cac1c0f2fd1-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 10:55:58 crc kubenswrapper[4838]: I1128 10:55:58.452010 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qbkhs\" (UniqueName: \"kubernetes.io/projected/97de5484-cf46-42f5-9b58-2cac1c0f2fd1-kube-api-access-qbkhs\") on node \"crc\" DevicePath \"\"" Nov 28 10:55:58 crc kubenswrapper[4838]: I1128 10:55:58.452041 4838 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/97de5484-cf46-42f5-9b58-2cac1c0f2fd1-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 10:55:58 crc kubenswrapper[4838]: I1128 10:55:58.705282 4838 generic.go:334] "Generic (PLEG): container finished" podID="97de5484-cf46-42f5-9b58-2cac1c0f2fd1" containerID="3c65baec0083bcbd5d6c0d53f8d9ead9e151d49774d526afc10e2e3724d6e5cf" exitCode=0 Nov 28 10:55:58 crc kubenswrapper[4838]: I1128 10:55:58.705342 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-b7cq8" event={"ID":"97de5484-cf46-42f5-9b58-2cac1c0f2fd1","Type":"ContainerDied","Data":"3c65baec0083bcbd5d6c0d53f8d9ead9e151d49774d526afc10e2e3724d6e5cf"} Nov 28 10:55:58 crc kubenswrapper[4838]: I1128 10:55:58.705391 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-b7cq8" event={"ID":"97de5484-cf46-42f5-9b58-2cac1c0f2fd1","Type":"ContainerDied","Data":"8aa46d2b7d16bba0a3efac54b21b85a953eb0ca8f172b7c1a17eec3677986968"} Nov 28 10:55:58 crc kubenswrapper[4838]: I1128 10:55:58.705416 4838 scope.go:117] "RemoveContainer" containerID="3c65baec0083bcbd5d6c0d53f8d9ead9e151d49774d526afc10e2e3724d6e5cf" Nov 28 10:55:58 crc kubenswrapper[4838]: I1128 10:55:58.705590 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-b7cq8" Nov 28 10:55:58 crc kubenswrapper[4838]: I1128 10:55:58.740793 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-b7cq8"] Nov 28 10:55:58 crc kubenswrapper[4838]: I1128 10:55:58.745313 4838 scope.go:117] "RemoveContainer" containerID="91c478b2f7aefc9dcd61cdc7e901dede516afe6de49a44d971e70a42b9d82ea9" Nov 28 10:55:58 crc kubenswrapper[4838]: I1128 10:55:58.756319 4838 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-b7cq8"] Nov 28 10:55:58 crc kubenswrapper[4838]: I1128 10:55:58.792622 4838 scope.go:117] "RemoveContainer" containerID="9e279a8cd6cd7d9b533560a6b97c251a1faf00058c7a7bdb132eeabc4fefa784" Nov 28 10:55:58 crc kubenswrapper[4838]: I1128 10:55:58.844494 4838 scope.go:117] "RemoveContainer" containerID="3c65baec0083bcbd5d6c0d53f8d9ead9e151d49774d526afc10e2e3724d6e5cf" Nov 28 10:55:58 crc kubenswrapper[4838]: E1128 10:55:58.845681 4838 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3c65baec0083bcbd5d6c0d53f8d9ead9e151d49774d526afc10e2e3724d6e5cf\": container with ID starting with 3c65baec0083bcbd5d6c0d53f8d9ead9e151d49774d526afc10e2e3724d6e5cf not found: ID does not exist" containerID="3c65baec0083bcbd5d6c0d53f8d9ead9e151d49774d526afc10e2e3724d6e5cf" Nov 28 10:55:58 crc kubenswrapper[4838]: I1128 10:55:58.845841 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3c65baec0083bcbd5d6c0d53f8d9ead9e151d49774d526afc10e2e3724d6e5cf"} err="failed to get container status \"3c65baec0083bcbd5d6c0d53f8d9ead9e151d49774d526afc10e2e3724d6e5cf\": rpc error: code = NotFound desc = could not find container \"3c65baec0083bcbd5d6c0d53f8d9ead9e151d49774d526afc10e2e3724d6e5cf\": container with ID starting with 3c65baec0083bcbd5d6c0d53f8d9ead9e151d49774d526afc10e2e3724d6e5cf not found: ID does not exist" Nov 28 10:55:58 crc kubenswrapper[4838]: I1128 10:55:58.846073 4838 scope.go:117] "RemoveContainer" containerID="91c478b2f7aefc9dcd61cdc7e901dede516afe6de49a44d971e70a42b9d82ea9" Nov 28 10:55:58 crc kubenswrapper[4838]: E1128 10:55:58.846917 4838 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"91c478b2f7aefc9dcd61cdc7e901dede516afe6de49a44d971e70a42b9d82ea9\": container with ID starting with 91c478b2f7aefc9dcd61cdc7e901dede516afe6de49a44d971e70a42b9d82ea9 not found: ID does not exist" containerID="91c478b2f7aefc9dcd61cdc7e901dede516afe6de49a44d971e70a42b9d82ea9" Nov 28 10:55:58 crc kubenswrapper[4838]: I1128 10:55:58.846998 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"91c478b2f7aefc9dcd61cdc7e901dede516afe6de49a44d971e70a42b9d82ea9"} err="failed to get container status \"91c478b2f7aefc9dcd61cdc7e901dede516afe6de49a44d971e70a42b9d82ea9\": rpc error: code = NotFound desc = could not find container \"91c478b2f7aefc9dcd61cdc7e901dede516afe6de49a44d971e70a42b9d82ea9\": container with ID starting with 91c478b2f7aefc9dcd61cdc7e901dede516afe6de49a44d971e70a42b9d82ea9 not found: ID does not exist" Nov 28 10:55:58 crc kubenswrapper[4838]: I1128 10:55:58.847040 4838 scope.go:117] "RemoveContainer" containerID="9e279a8cd6cd7d9b533560a6b97c251a1faf00058c7a7bdb132eeabc4fefa784" Nov 28 10:55:58 crc kubenswrapper[4838]: E1128 10:55:58.847381 4838 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9e279a8cd6cd7d9b533560a6b97c251a1faf00058c7a7bdb132eeabc4fefa784\": container with ID starting with 9e279a8cd6cd7d9b533560a6b97c251a1faf00058c7a7bdb132eeabc4fefa784 not found: ID does not exist" containerID="9e279a8cd6cd7d9b533560a6b97c251a1faf00058c7a7bdb132eeabc4fefa784" Nov 28 10:55:58 crc kubenswrapper[4838]: I1128 10:55:58.847409 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9e279a8cd6cd7d9b533560a6b97c251a1faf00058c7a7bdb132eeabc4fefa784"} err="failed to get container status \"9e279a8cd6cd7d9b533560a6b97c251a1faf00058c7a7bdb132eeabc4fefa784\": rpc error: code = NotFound desc = could not find container \"9e279a8cd6cd7d9b533560a6b97c251a1faf00058c7a7bdb132eeabc4fefa784\": container with ID starting with 9e279a8cd6cd7d9b533560a6b97c251a1faf00058c7a7bdb132eeabc4fefa784 not found: ID does not exist" Nov 28 10:56:00 crc kubenswrapper[4838]: I1128 10:56:00.578114 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="97de5484-cf46-42f5-9b58-2cac1c0f2fd1" path="/var/lib/kubelet/pods/97de5484-cf46-42f5-9b58-2cac1c0f2fd1/volumes" Nov 28 10:56:18 crc kubenswrapper[4838]: I1128 10:56:18.799074 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-tpxjj"] Nov 28 10:56:18 crc kubenswrapper[4838]: E1128 10:56:18.800145 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="97de5484-cf46-42f5-9b58-2cac1c0f2fd1" containerName="extract-utilities" Nov 28 10:56:18 crc kubenswrapper[4838]: I1128 10:56:18.800161 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="97de5484-cf46-42f5-9b58-2cac1c0f2fd1" containerName="extract-utilities" Nov 28 10:56:18 crc kubenswrapper[4838]: E1128 10:56:18.800183 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="97de5484-cf46-42f5-9b58-2cac1c0f2fd1" containerName="extract-content" Nov 28 10:56:18 crc kubenswrapper[4838]: I1128 10:56:18.800191 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="97de5484-cf46-42f5-9b58-2cac1c0f2fd1" containerName="extract-content" Nov 28 10:56:18 crc kubenswrapper[4838]: E1128 10:56:18.800206 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="97de5484-cf46-42f5-9b58-2cac1c0f2fd1" containerName="registry-server" Nov 28 10:56:18 crc kubenswrapper[4838]: I1128 10:56:18.800214 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="97de5484-cf46-42f5-9b58-2cac1c0f2fd1" containerName="registry-server" Nov 28 10:56:18 crc kubenswrapper[4838]: I1128 10:56:18.800442 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="97de5484-cf46-42f5-9b58-2cac1c0f2fd1" containerName="registry-server" Nov 28 10:56:18 crc kubenswrapper[4838]: I1128 10:56:18.802138 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-tpxjj" Nov 28 10:56:18 crc kubenswrapper[4838]: I1128 10:56:18.814056 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-tpxjj"] Nov 28 10:56:18 crc kubenswrapper[4838]: I1128 10:56:18.937232 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/146a1492-d101-403a-a26e-628be3574654-catalog-content\") pod \"certified-operators-tpxjj\" (UID: \"146a1492-d101-403a-a26e-628be3574654\") " pod="openshift-marketplace/certified-operators-tpxjj" Nov 28 10:56:18 crc kubenswrapper[4838]: I1128 10:56:18.937378 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6nrrl\" (UniqueName: \"kubernetes.io/projected/146a1492-d101-403a-a26e-628be3574654-kube-api-access-6nrrl\") pod \"certified-operators-tpxjj\" (UID: \"146a1492-d101-403a-a26e-628be3574654\") " pod="openshift-marketplace/certified-operators-tpxjj" Nov 28 10:56:18 crc kubenswrapper[4838]: I1128 10:56:18.937491 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/146a1492-d101-403a-a26e-628be3574654-utilities\") pod \"certified-operators-tpxjj\" (UID: \"146a1492-d101-403a-a26e-628be3574654\") " pod="openshift-marketplace/certified-operators-tpxjj" Nov 28 10:56:19 crc kubenswrapper[4838]: I1128 10:56:19.040360 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/146a1492-d101-403a-a26e-628be3574654-catalog-content\") pod \"certified-operators-tpxjj\" (UID: \"146a1492-d101-403a-a26e-628be3574654\") " pod="openshift-marketplace/certified-operators-tpxjj" Nov 28 10:56:19 crc kubenswrapper[4838]: I1128 10:56:19.041036 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6nrrl\" (UniqueName: \"kubernetes.io/projected/146a1492-d101-403a-a26e-628be3574654-kube-api-access-6nrrl\") pod \"certified-operators-tpxjj\" (UID: \"146a1492-d101-403a-a26e-628be3574654\") " pod="openshift-marketplace/certified-operators-tpxjj" Nov 28 10:56:19 crc kubenswrapper[4838]: I1128 10:56:19.041154 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/146a1492-d101-403a-a26e-628be3574654-catalog-content\") pod \"certified-operators-tpxjj\" (UID: \"146a1492-d101-403a-a26e-628be3574654\") " pod="openshift-marketplace/certified-operators-tpxjj" Nov 28 10:56:19 crc kubenswrapper[4838]: I1128 10:56:19.042191 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/146a1492-d101-403a-a26e-628be3574654-utilities\") pod \"certified-operators-tpxjj\" (UID: \"146a1492-d101-403a-a26e-628be3574654\") " pod="openshift-marketplace/certified-operators-tpxjj" Nov 28 10:56:19 crc kubenswrapper[4838]: I1128 10:56:19.046229 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/146a1492-d101-403a-a26e-628be3574654-utilities\") pod \"certified-operators-tpxjj\" (UID: \"146a1492-d101-403a-a26e-628be3574654\") " pod="openshift-marketplace/certified-operators-tpxjj" Nov 28 10:56:19 crc kubenswrapper[4838]: I1128 10:56:19.062604 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6nrrl\" (UniqueName: \"kubernetes.io/projected/146a1492-d101-403a-a26e-628be3574654-kube-api-access-6nrrl\") pod \"certified-operators-tpxjj\" (UID: \"146a1492-d101-403a-a26e-628be3574654\") " pod="openshift-marketplace/certified-operators-tpxjj" Nov 28 10:56:19 crc kubenswrapper[4838]: I1128 10:56:19.139461 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-tpxjj" Nov 28 10:56:19 crc kubenswrapper[4838]: I1128 10:56:19.619878 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-tpxjj"] Nov 28 10:56:19 crc kubenswrapper[4838]: I1128 10:56:19.946439 4838 generic.go:334] "Generic (PLEG): container finished" podID="146a1492-d101-403a-a26e-628be3574654" containerID="773853d6af3cae59d8300e7ede0f98257bdc20b3c567c12084df00c146f7795a" exitCode=0 Nov 28 10:56:19 crc kubenswrapper[4838]: I1128 10:56:19.946584 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tpxjj" event={"ID":"146a1492-d101-403a-a26e-628be3574654","Type":"ContainerDied","Data":"773853d6af3cae59d8300e7ede0f98257bdc20b3c567c12084df00c146f7795a"} Nov 28 10:56:19 crc kubenswrapper[4838]: I1128 10:56:19.946749 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tpxjj" event={"ID":"146a1492-d101-403a-a26e-628be3574654","Type":"ContainerStarted","Data":"b9158e1f4e3f2b4cde484b4c21067b46b3299bca243ca84f9872c31abf772f06"} Nov 28 10:56:21 crc kubenswrapper[4838]: I1128 10:56:21.979187 4838 generic.go:334] "Generic (PLEG): container finished" podID="146a1492-d101-403a-a26e-628be3574654" containerID="ab34e76575e12132588cb3c2df77ecd8a8d2de59a476cb9d5e4fd50568f90aeb" exitCode=0 Nov 28 10:56:21 crc kubenswrapper[4838]: I1128 10:56:21.979285 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tpxjj" event={"ID":"146a1492-d101-403a-a26e-628be3574654","Type":"ContainerDied","Data":"ab34e76575e12132588cb3c2df77ecd8a8d2de59a476cb9d5e4fd50568f90aeb"} Nov 28 10:56:22 crc kubenswrapper[4838]: I1128 10:56:22.992993 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tpxjj" event={"ID":"146a1492-d101-403a-a26e-628be3574654","Type":"ContainerStarted","Data":"dbf96c2707248f5d51cfc4b1f496ea854b4e5db8f2a26ec4a02309974aa0690b"} Nov 28 10:56:23 crc kubenswrapper[4838]: I1128 10:56:23.026054 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-tpxjj" podStartSLOduration=2.379912572 podStartE2EDuration="5.026036119s" podCreationTimestamp="2025-11-28 10:56:18 +0000 UTC" firstStartedPulling="2025-11-28 10:56:19.94784238 +0000 UTC m=+3551.646816550" lastFinishedPulling="2025-11-28 10:56:22.593965927 +0000 UTC m=+3554.292940097" observedRunningTime="2025-11-28 10:56:23.023729956 +0000 UTC m=+3554.722704126" watchObservedRunningTime="2025-11-28 10:56:23.026036119 +0000 UTC m=+3554.725010289" Nov 28 10:56:29 crc kubenswrapper[4838]: I1128 10:56:29.140421 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-tpxjj" Nov 28 10:56:29 crc kubenswrapper[4838]: I1128 10:56:29.141259 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-tpxjj" Nov 28 10:56:29 crc kubenswrapper[4838]: I1128 10:56:29.209971 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-tpxjj" Nov 28 10:56:30 crc kubenswrapper[4838]: I1128 10:56:30.143796 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-tpxjj" Nov 28 10:56:30 crc kubenswrapper[4838]: I1128 10:56:30.208879 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-tpxjj"] Nov 28 10:56:31 crc kubenswrapper[4838]: I1128 10:56:31.878378 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-4nmbp"] Nov 28 10:56:31 crc kubenswrapper[4838]: I1128 10:56:31.884178 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-4nmbp" Nov 28 10:56:31 crc kubenswrapper[4838]: I1128 10:56:31.894657 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-4nmbp"] Nov 28 10:56:31 crc kubenswrapper[4838]: I1128 10:56:31.981199 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/24fbdd42-6c0d-430a-81cd-06598d37810c-catalog-content\") pod \"community-operators-4nmbp\" (UID: \"24fbdd42-6c0d-430a-81cd-06598d37810c\") " pod="openshift-marketplace/community-operators-4nmbp" Nov 28 10:56:31 crc kubenswrapper[4838]: I1128 10:56:31.981332 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/24fbdd42-6c0d-430a-81cd-06598d37810c-utilities\") pod \"community-operators-4nmbp\" (UID: \"24fbdd42-6c0d-430a-81cd-06598d37810c\") " pod="openshift-marketplace/community-operators-4nmbp" Nov 28 10:56:31 crc kubenswrapper[4838]: I1128 10:56:31.981540 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tvhlc\" (UniqueName: \"kubernetes.io/projected/24fbdd42-6c0d-430a-81cd-06598d37810c-kube-api-access-tvhlc\") pod \"community-operators-4nmbp\" (UID: \"24fbdd42-6c0d-430a-81cd-06598d37810c\") " pod="openshift-marketplace/community-operators-4nmbp" Nov 28 10:56:32 crc kubenswrapper[4838]: I1128 10:56:32.084251 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/24fbdd42-6c0d-430a-81cd-06598d37810c-utilities\") pod \"community-operators-4nmbp\" (UID: \"24fbdd42-6c0d-430a-81cd-06598d37810c\") " pod="openshift-marketplace/community-operators-4nmbp" Nov 28 10:56:32 crc kubenswrapper[4838]: I1128 10:56:32.084424 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tvhlc\" (UniqueName: \"kubernetes.io/projected/24fbdd42-6c0d-430a-81cd-06598d37810c-kube-api-access-tvhlc\") pod \"community-operators-4nmbp\" (UID: \"24fbdd42-6c0d-430a-81cd-06598d37810c\") " pod="openshift-marketplace/community-operators-4nmbp" Nov 28 10:56:32 crc kubenswrapper[4838]: I1128 10:56:32.084891 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/24fbdd42-6c0d-430a-81cd-06598d37810c-catalog-content\") pod \"community-operators-4nmbp\" (UID: \"24fbdd42-6c0d-430a-81cd-06598d37810c\") " pod="openshift-marketplace/community-operators-4nmbp" Nov 28 10:56:32 crc kubenswrapper[4838]: I1128 10:56:32.085464 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/24fbdd42-6c0d-430a-81cd-06598d37810c-catalog-content\") pod \"community-operators-4nmbp\" (UID: \"24fbdd42-6c0d-430a-81cd-06598d37810c\") " pod="openshift-marketplace/community-operators-4nmbp" Nov 28 10:56:32 crc kubenswrapper[4838]: I1128 10:56:32.085585 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/24fbdd42-6c0d-430a-81cd-06598d37810c-utilities\") pod \"community-operators-4nmbp\" (UID: \"24fbdd42-6c0d-430a-81cd-06598d37810c\") " pod="openshift-marketplace/community-operators-4nmbp" Nov 28 10:56:32 crc kubenswrapper[4838]: I1128 10:56:32.089184 4838 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-tpxjj" podUID="146a1492-d101-403a-a26e-628be3574654" containerName="registry-server" containerID="cri-o://dbf96c2707248f5d51cfc4b1f496ea854b4e5db8f2a26ec4a02309974aa0690b" gracePeriod=2 Nov 28 10:56:32 crc kubenswrapper[4838]: I1128 10:56:32.111406 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tvhlc\" (UniqueName: \"kubernetes.io/projected/24fbdd42-6c0d-430a-81cd-06598d37810c-kube-api-access-tvhlc\") pod \"community-operators-4nmbp\" (UID: \"24fbdd42-6c0d-430a-81cd-06598d37810c\") " pod="openshift-marketplace/community-operators-4nmbp" Nov 28 10:56:32 crc kubenswrapper[4838]: I1128 10:56:32.221951 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-4nmbp" Nov 28 10:56:32 crc kubenswrapper[4838]: I1128 10:56:32.992052 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-4nmbp"] Nov 28 10:56:33 crc kubenswrapper[4838]: I1128 10:56:33.068131 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-tpxjj" Nov 28 10:56:33 crc kubenswrapper[4838]: I1128 10:56:33.115734 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4nmbp" event={"ID":"24fbdd42-6c0d-430a-81cd-06598d37810c","Type":"ContainerStarted","Data":"e44162fb027267cf2e4aa1c85c962a0ac2e30dcb2bb41b95da7f0e2f4bdb230c"} Nov 28 10:56:33 crc kubenswrapper[4838]: I1128 10:56:33.133774 4838 generic.go:334] "Generic (PLEG): container finished" podID="146a1492-d101-403a-a26e-628be3574654" containerID="dbf96c2707248f5d51cfc4b1f496ea854b4e5db8f2a26ec4a02309974aa0690b" exitCode=0 Nov 28 10:56:33 crc kubenswrapper[4838]: I1128 10:56:33.134018 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-tpxjj" Nov 28 10:56:33 crc kubenswrapper[4838]: I1128 10:56:33.134057 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tpxjj" event={"ID":"146a1492-d101-403a-a26e-628be3574654","Type":"ContainerDied","Data":"dbf96c2707248f5d51cfc4b1f496ea854b4e5db8f2a26ec4a02309974aa0690b"} Nov 28 10:56:33 crc kubenswrapper[4838]: I1128 10:56:33.134362 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tpxjj" event={"ID":"146a1492-d101-403a-a26e-628be3574654","Type":"ContainerDied","Data":"b9158e1f4e3f2b4cde484b4c21067b46b3299bca243ca84f9872c31abf772f06"} Nov 28 10:56:33 crc kubenswrapper[4838]: I1128 10:56:33.134411 4838 scope.go:117] "RemoveContainer" containerID="dbf96c2707248f5d51cfc4b1f496ea854b4e5db8f2a26ec4a02309974aa0690b" Nov 28 10:56:33 crc kubenswrapper[4838]: I1128 10:56:33.195330 4838 scope.go:117] "RemoveContainer" containerID="ab34e76575e12132588cb3c2df77ecd8a8d2de59a476cb9d5e4fd50568f90aeb" Nov 28 10:56:33 crc kubenswrapper[4838]: I1128 10:56:33.218606 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/146a1492-d101-403a-a26e-628be3574654-catalog-content\") pod \"146a1492-d101-403a-a26e-628be3574654\" (UID: \"146a1492-d101-403a-a26e-628be3574654\") " Nov 28 10:56:33 crc kubenswrapper[4838]: I1128 10:56:33.218698 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/146a1492-d101-403a-a26e-628be3574654-utilities\") pod \"146a1492-d101-403a-a26e-628be3574654\" (UID: \"146a1492-d101-403a-a26e-628be3574654\") " Nov 28 10:56:33 crc kubenswrapper[4838]: I1128 10:56:33.218782 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6nrrl\" (UniqueName: \"kubernetes.io/projected/146a1492-d101-403a-a26e-628be3574654-kube-api-access-6nrrl\") pod \"146a1492-d101-403a-a26e-628be3574654\" (UID: \"146a1492-d101-403a-a26e-628be3574654\") " Nov 28 10:56:33 crc kubenswrapper[4838]: I1128 10:56:33.220001 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/146a1492-d101-403a-a26e-628be3574654-utilities" (OuterVolumeSpecName: "utilities") pod "146a1492-d101-403a-a26e-628be3574654" (UID: "146a1492-d101-403a-a26e-628be3574654"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 10:56:33 crc kubenswrapper[4838]: I1128 10:56:33.229224 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/146a1492-d101-403a-a26e-628be3574654-kube-api-access-6nrrl" (OuterVolumeSpecName: "kube-api-access-6nrrl") pod "146a1492-d101-403a-a26e-628be3574654" (UID: "146a1492-d101-403a-a26e-628be3574654"). InnerVolumeSpecName "kube-api-access-6nrrl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:56:33 crc kubenswrapper[4838]: I1128 10:56:33.268081 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/146a1492-d101-403a-a26e-628be3574654-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "146a1492-d101-403a-a26e-628be3574654" (UID: "146a1492-d101-403a-a26e-628be3574654"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 10:56:33 crc kubenswrapper[4838]: I1128 10:56:33.321648 4838 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/146a1492-d101-403a-a26e-628be3574654-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 10:56:33 crc kubenswrapper[4838]: I1128 10:56:33.321678 4838 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/146a1492-d101-403a-a26e-628be3574654-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 10:56:33 crc kubenswrapper[4838]: I1128 10:56:33.321690 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6nrrl\" (UniqueName: \"kubernetes.io/projected/146a1492-d101-403a-a26e-628be3574654-kube-api-access-6nrrl\") on node \"crc\" DevicePath \"\"" Nov 28 10:56:33 crc kubenswrapper[4838]: I1128 10:56:33.321653 4838 scope.go:117] "RemoveContainer" containerID="773853d6af3cae59d8300e7ede0f98257bdc20b3c567c12084df00c146f7795a" Nov 28 10:56:33 crc kubenswrapper[4838]: I1128 10:56:33.380205 4838 scope.go:117] "RemoveContainer" containerID="dbf96c2707248f5d51cfc4b1f496ea854b4e5db8f2a26ec4a02309974aa0690b" Nov 28 10:56:33 crc kubenswrapper[4838]: E1128 10:56:33.380836 4838 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"dbf96c2707248f5d51cfc4b1f496ea854b4e5db8f2a26ec4a02309974aa0690b\": container with ID starting with dbf96c2707248f5d51cfc4b1f496ea854b4e5db8f2a26ec4a02309974aa0690b not found: ID does not exist" containerID="dbf96c2707248f5d51cfc4b1f496ea854b4e5db8f2a26ec4a02309974aa0690b" Nov 28 10:56:33 crc kubenswrapper[4838]: I1128 10:56:33.380980 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dbf96c2707248f5d51cfc4b1f496ea854b4e5db8f2a26ec4a02309974aa0690b"} err="failed to get container status \"dbf96c2707248f5d51cfc4b1f496ea854b4e5db8f2a26ec4a02309974aa0690b\": rpc error: code = NotFound desc = could not find container \"dbf96c2707248f5d51cfc4b1f496ea854b4e5db8f2a26ec4a02309974aa0690b\": container with ID starting with dbf96c2707248f5d51cfc4b1f496ea854b4e5db8f2a26ec4a02309974aa0690b not found: ID does not exist" Nov 28 10:56:33 crc kubenswrapper[4838]: I1128 10:56:33.381097 4838 scope.go:117] "RemoveContainer" containerID="ab34e76575e12132588cb3c2df77ecd8a8d2de59a476cb9d5e4fd50568f90aeb" Nov 28 10:56:33 crc kubenswrapper[4838]: E1128 10:56:33.381465 4838 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ab34e76575e12132588cb3c2df77ecd8a8d2de59a476cb9d5e4fd50568f90aeb\": container with ID starting with ab34e76575e12132588cb3c2df77ecd8a8d2de59a476cb9d5e4fd50568f90aeb not found: ID does not exist" containerID="ab34e76575e12132588cb3c2df77ecd8a8d2de59a476cb9d5e4fd50568f90aeb" Nov 28 10:56:33 crc kubenswrapper[4838]: I1128 10:56:33.381562 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ab34e76575e12132588cb3c2df77ecd8a8d2de59a476cb9d5e4fd50568f90aeb"} err="failed to get container status \"ab34e76575e12132588cb3c2df77ecd8a8d2de59a476cb9d5e4fd50568f90aeb\": rpc error: code = NotFound desc = could not find container \"ab34e76575e12132588cb3c2df77ecd8a8d2de59a476cb9d5e4fd50568f90aeb\": container with ID starting with ab34e76575e12132588cb3c2df77ecd8a8d2de59a476cb9d5e4fd50568f90aeb not found: ID does not exist" Nov 28 10:56:33 crc kubenswrapper[4838]: I1128 10:56:33.381637 4838 scope.go:117] "RemoveContainer" containerID="773853d6af3cae59d8300e7ede0f98257bdc20b3c567c12084df00c146f7795a" Nov 28 10:56:33 crc kubenswrapper[4838]: E1128 10:56:33.382309 4838 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"773853d6af3cae59d8300e7ede0f98257bdc20b3c567c12084df00c146f7795a\": container with ID starting with 773853d6af3cae59d8300e7ede0f98257bdc20b3c567c12084df00c146f7795a not found: ID does not exist" containerID="773853d6af3cae59d8300e7ede0f98257bdc20b3c567c12084df00c146f7795a" Nov 28 10:56:33 crc kubenswrapper[4838]: I1128 10:56:33.382415 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"773853d6af3cae59d8300e7ede0f98257bdc20b3c567c12084df00c146f7795a"} err="failed to get container status \"773853d6af3cae59d8300e7ede0f98257bdc20b3c567c12084df00c146f7795a\": rpc error: code = NotFound desc = could not find container \"773853d6af3cae59d8300e7ede0f98257bdc20b3c567c12084df00c146f7795a\": container with ID starting with 773853d6af3cae59d8300e7ede0f98257bdc20b3c567c12084df00c146f7795a not found: ID does not exist" Nov 28 10:56:33 crc kubenswrapper[4838]: I1128 10:56:33.481436 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-tpxjj"] Nov 28 10:56:33 crc kubenswrapper[4838]: I1128 10:56:33.490967 4838 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-tpxjj"] Nov 28 10:56:34 crc kubenswrapper[4838]: I1128 10:56:34.150268 4838 generic.go:334] "Generic (PLEG): container finished" podID="24fbdd42-6c0d-430a-81cd-06598d37810c" containerID="febebb7885f977212c00069853682b7c66c037e65c944565e5b1371b0aea25d8" exitCode=0 Nov 28 10:56:34 crc kubenswrapper[4838]: I1128 10:56:34.150344 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4nmbp" event={"ID":"24fbdd42-6c0d-430a-81cd-06598d37810c","Type":"ContainerDied","Data":"febebb7885f977212c00069853682b7c66c037e65c944565e5b1371b0aea25d8"} Nov 28 10:56:34 crc kubenswrapper[4838]: I1128 10:56:34.573467 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="146a1492-d101-403a-a26e-628be3574654" path="/var/lib/kubelet/pods/146a1492-d101-403a-a26e-628be3574654/volumes" Nov 28 10:56:36 crc kubenswrapper[4838]: I1128 10:56:36.181147 4838 generic.go:334] "Generic (PLEG): container finished" podID="24fbdd42-6c0d-430a-81cd-06598d37810c" containerID="1f3f716b4aa53cfe8627294a61a1b4e66b0001bb5cd276d7faaa66627b940a85" exitCode=0 Nov 28 10:56:36 crc kubenswrapper[4838]: I1128 10:56:36.181249 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4nmbp" event={"ID":"24fbdd42-6c0d-430a-81cd-06598d37810c","Type":"ContainerDied","Data":"1f3f716b4aa53cfe8627294a61a1b4e66b0001bb5cd276d7faaa66627b940a85"} Nov 28 10:56:37 crc kubenswrapper[4838]: I1128 10:56:37.193066 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4nmbp" event={"ID":"24fbdd42-6c0d-430a-81cd-06598d37810c","Type":"ContainerStarted","Data":"1c3cf854ac180b1f9d6666e20611e052c734401af05bfbb7b7713cdee186bcfe"} Nov 28 10:56:37 crc kubenswrapper[4838]: I1128 10:56:37.221967 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-4nmbp" podStartSLOduration=3.574872193 podStartE2EDuration="6.221944576s" podCreationTimestamp="2025-11-28 10:56:31 +0000 UTC" firstStartedPulling="2025-11-28 10:56:34.152996308 +0000 UTC m=+3565.851970488" lastFinishedPulling="2025-11-28 10:56:36.800068701 +0000 UTC m=+3568.499042871" observedRunningTime="2025-11-28 10:56:37.212868139 +0000 UTC m=+3568.911842339" watchObservedRunningTime="2025-11-28 10:56:37.221944576 +0000 UTC m=+3568.920918776" Nov 28 10:56:42 crc kubenswrapper[4838]: I1128 10:56:42.222384 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-4nmbp" Nov 28 10:56:42 crc kubenswrapper[4838]: I1128 10:56:42.223071 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-4nmbp" Nov 28 10:56:42 crc kubenswrapper[4838]: I1128 10:56:42.312292 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-4nmbp" Nov 28 10:56:42 crc kubenswrapper[4838]: I1128 10:56:42.394847 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-4nmbp" Nov 28 10:56:42 crc kubenswrapper[4838]: I1128 10:56:42.555295 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-4nmbp"] Nov 28 10:56:44 crc kubenswrapper[4838]: I1128 10:56:44.259168 4838 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-4nmbp" podUID="24fbdd42-6c0d-430a-81cd-06598d37810c" containerName="registry-server" containerID="cri-o://1c3cf854ac180b1f9d6666e20611e052c734401af05bfbb7b7713cdee186bcfe" gracePeriod=2 Nov 28 10:56:44 crc kubenswrapper[4838]: I1128 10:56:44.807348 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-4nmbp" Nov 28 10:56:44 crc kubenswrapper[4838]: I1128 10:56:44.946395 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/24fbdd42-6c0d-430a-81cd-06598d37810c-utilities\") pod \"24fbdd42-6c0d-430a-81cd-06598d37810c\" (UID: \"24fbdd42-6c0d-430a-81cd-06598d37810c\") " Nov 28 10:56:44 crc kubenswrapper[4838]: I1128 10:56:44.946459 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/24fbdd42-6c0d-430a-81cd-06598d37810c-catalog-content\") pod \"24fbdd42-6c0d-430a-81cd-06598d37810c\" (UID: \"24fbdd42-6c0d-430a-81cd-06598d37810c\") " Nov 28 10:56:44 crc kubenswrapper[4838]: I1128 10:56:44.946556 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tvhlc\" (UniqueName: \"kubernetes.io/projected/24fbdd42-6c0d-430a-81cd-06598d37810c-kube-api-access-tvhlc\") pod \"24fbdd42-6c0d-430a-81cd-06598d37810c\" (UID: \"24fbdd42-6c0d-430a-81cd-06598d37810c\") " Nov 28 10:56:44 crc kubenswrapper[4838]: I1128 10:56:44.947464 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/24fbdd42-6c0d-430a-81cd-06598d37810c-utilities" (OuterVolumeSpecName: "utilities") pod "24fbdd42-6c0d-430a-81cd-06598d37810c" (UID: "24fbdd42-6c0d-430a-81cd-06598d37810c"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 10:56:44 crc kubenswrapper[4838]: I1128 10:56:44.952350 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/24fbdd42-6c0d-430a-81cd-06598d37810c-kube-api-access-tvhlc" (OuterVolumeSpecName: "kube-api-access-tvhlc") pod "24fbdd42-6c0d-430a-81cd-06598d37810c" (UID: "24fbdd42-6c0d-430a-81cd-06598d37810c"). InnerVolumeSpecName "kube-api-access-tvhlc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 10:56:44 crc kubenswrapper[4838]: I1128 10:56:44.999802 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/24fbdd42-6c0d-430a-81cd-06598d37810c-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "24fbdd42-6c0d-430a-81cd-06598d37810c" (UID: "24fbdd42-6c0d-430a-81cd-06598d37810c"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 10:56:45 crc kubenswrapper[4838]: I1128 10:56:45.048818 4838 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/24fbdd42-6c0d-430a-81cd-06598d37810c-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 10:56:45 crc kubenswrapper[4838]: I1128 10:56:45.048872 4838 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/24fbdd42-6c0d-430a-81cd-06598d37810c-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 10:56:45 crc kubenswrapper[4838]: I1128 10:56:45.048891 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tvhlc\" (UniqueName: \"kubernetes.io/projected/24fbdd42-6c0d-430a-81cd-06598d37810c-kube-api-access-tvhlc\") on node \"crc\" DevicePath \"\"" Nov 28 10:56:45 crc kubenswrapper[4838]: I1128 10:56:45.272173 4838 generic.go:334] "Generic (PLEG): container finished" podID="24fbdd42-6c0d-430a-81cd-06598d37810c" containerID="1c3cf854ac180b1f9d6666e20611e052c734401af05bfbb7b7713cdee186bcfe" exitCode=0 Nov 28 10:56:45 crc kubenswrapper[4838]: I1128 10:56:45.272212 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4nmbp" event={"ID":"24fbdd42-6c0d-430a-81cd-06598d37810c","Type":"ContainerDied","Data":"1c3cf854ac180b1f9d6666e20611e052c734401af05bfbb7b7713cdee186bcfe"} Nov 28 10:56:45 crc kubenswrapper[4838]: I1128 10:56:45.272252 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4nmbp" event={"ID":"24fbdd42-6c0d-430a-81cd-06598d37810c","Type":"ContainerDied","Data":"e44162fb027267cf2e4aa1c85c962a0ac2e30dcb2bb41b95da7f0e2f4bdb230c"} Nov 28 10:56:45 crc kubenswrapper[4838]: I1128 10:56:45.272256 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-4nmbp" Nov 28 10:56:45 crc kubenswrapper[4838]: I1128 10:56:45.272268 4838 scope.go:117] "RemoveContainer" containerID="1c3cf854ac180b1f9d6666e20611e052c734401af05bfbb7b7713cdee186bcfe" Nov 28 10:56:45 crc kubenswrapper[4838]: I1128 10:56:45.297394 4838 scope.go:117] "RemoveContainer" containerID="1f3f716b4aa53cfe8627294a61a1b4e66b0001bb5cd276d7faaa66627b940a85" Nov 28 10:56:45 crc kubenswrapper[4838]: I1128 10:56:45.312318 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-4nmbp"] Nov 28 10:56:45 crc kubenswrapper[4838]: I1128 10:56:45.321449 4838 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-4nmbp"] Nov 28 10:56:45 crc kubenswrapper[4838]: I1128 10:56:45.341321 4838 scope.go:117] "RemoveContainer" containerID="febebb7885f977212c00069853682b7c66c037e65c944565e5b1371b0aea25d8" Nov 28 10:56:45 crc kubenswrapper[4838]: I1128 10:56:45.384832 4838 scope.go:117] "RemoveContainer" containerID="1c3cf854ac180b1f9d6666e20611e052c734401af05bfbb7b7713cdee186bcfe" Nov 28 10:56:45 crc kubenswrapper[4838]: E1128 10:56:45.385357 4838 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1c3cf854ac180b1f9d6666e20611e052c734401af05bfbb7b7713cdee186bcfe\": container with ID starting with 1c3cf854ac180b1f9d6666e20611e052c734401af05bfbb7b7713cdee186bcfe not found: ID does not exist" containerID="1c3cf854ac180b1f9d6666e20611e052c734401af05bfbb7b7713cdee186bcfe" Nov 28 10:56:45 crc kubenswrapper[4838]: I1128 10:56:45.385399 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1c3cf854ac180b1f9d6666e20611e052c734401af05bfbb7b7713cdee186bcfe"} err="failed to get container status \"1c3cf854ac180b1f9d6666e20611e052c734401af05bfbb7b7713cdee186bcfe\": rpc error: code = NotFound desc = could not find container \"1c3cf854ac180b1f9d6666e20611e052c734401af05bfbb7b7713cdee186bcfe\": container with ID starting with 1c3cf854ac180b1f9d6666e20611e052c734401af05bfbb7b7713cdee186bcfe not found: ID does not exist" Nov 28 10:56:45 crc kubenswrapper[4838]: I1128 10:56:45.385422 4838 scope.go:117] "RemoveContainer" containerID="1f3f716b4aa53cfe8627294a61a1b4e66b0001bb5cd276d7faaa66627b940a85" Nov 28 10:56:45 crc kubenswrapper[4838]: E1128 10:56:45.385663 4838 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1f3f716b4aa53cfe8627294a61a1b4e66b0001bb5cd276d7faaa66627b940a85\": container with ID starting with 1f3f716b4aa53cfe8627294a61a1b4e66b0001bb5cd276d7faaa66627b940a85 not found: ID does not exist" containerID="1f3f716b4aa53cfe8627294a61a1b4e66b0001bb5cd276d7faaa66627b940a85" Nov 28 10:56:45 crc kubenswrapper[4838]: I1128 10:56:45.385688 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1f3f716b4aa53cfe8627294a61a1b4e66b0001bb5cd276d7faaa66627b940a85"} err="failed to get container status \"1f3f716b4aa53cfe8627294a61a1b4e66b0001bb5cd276d7faaa66627b940a85\": rpc error: code = NotFound desc = could not find container \"1f3f716b4aa53cfe8627294a61a1b4e66b0001bb5cd276d7faaa66627b940a85\": container with ID starting with 1f3f716b4aa53cfe8627294a61a1b4e66b0001bb5cd276d7faaa66627b940a85 not found: ID does not exist" Nov 28 10:56:45 crc kubenswrapper[4838]: I1128 10:56:45.385705 4838 scope.go:117] "RemoveContainer" containerID="febebb7885f977212c00069853682b7c66c037e65c944565e5b1371b0aea25d8" Nov 28 10:56:45 crc kubenswrapper[4838]: E1128 10:56:45.385935 4838 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"febebb7885f977212c00069853682b7c66c037e65c944565e5b1371b0aea25d8\": container with ID starting with febebb7885f977212c00069853682b7c66c037e65c944565e5b1371b0aea25d8 not found: ID does not exist" containerID="febebb7885f977212c00069853682b7c66c037e65c944565e5b1371b0aea25d8" Nov 28 10:56:45 crc kubenswrapper[4838]: I1128 10:56:45.385984 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"febebb7885f977212c00069853682b7c66c037e65c944565e5b1371b0aea25d8"} err="failed to get container status \"febebb7885f977212c00069853682b7c66c037e65c944565e5b1371b0aea25d8\": rpc error: code = NotFound desc = could not find container \"febebb7885f977212c00069853682b7c66c037e65c944565e5b1371b0aea25d8\": container with ID starting with febebb7885f977212c00069853682b7c66c037e65c944565e5b1371b0aea25d8 not found: ID does not exist" Nov 28 10:56:46 crc kubenswrapper[4838]: I1128 10:56:46.582439 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="24fbdd42-6c0d-430a-81cd-06598d37810c" path="/var/lib/kubelet/pods/24fbdd42-6c0d-430a-81cd-06598d37810c/volumes" Nov 28 10:57:23 crc kubenswrapper[4838]: I1128 10:57:23.940843 4838 patch_prober.go:28] interesting pod/machine-config-daemon-5dxdd container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 10:57:23 crc kubenswrapper[4838]: I1128 10:57:23.941661 4838 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 10:57:53 crc kubenswrapper[4838]: I1128 10:57:53.940345 4838 patch_prober.go:28] interesting pod/machine-config-daemon-5dxdd container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 10:57:53 crc kubenswrapper[4838]: I1128 10:57:53.940947 4838 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 10:58:23 crc kubenswrapper[4838]: I1128 10:58:23.939651 4838 patch_prober.go:28] interesting pod/machine-config-daemon-5dxdd container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 10:58:23 crc kubenswrapper[4838]: I1128 10:58:23.940502 4838 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 10:58:23 crc kubenswrapper[4838]: I1128 10:58:23.940586 4838 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" Nov 28 10:58:23 crc kubenswrapper[4838]: I1128 10:58:23.941624 4838 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"336a1410d5b7b6661dd9229c5864e9b8a009c3cf2d23e82987c1cf12f8cec0b5"} pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 10:58:23 crc kubenswrapper[4838]: I1128 10:58:23.941713 4838 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" containerName="machine-config-daemon" containerID="cri-o://336a1410d5b7b6661dd9229c5864e9b8a009c3cf2d23e82987c1cf12f8cec0b5" gracePeriod=600 Nov 28 10:58:24 crc kubenswrapper[4838]: I1128 10:58:24.357210 4838 generic.go:334] "Generic (PLEG): container finished" podID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" containerID="336a1410d5b7b6661dd9229c5864e9b8a009c3cf2d23e82987c1cf12f8cec0b5" exitCode=0 Nov 28 10:58:24 crc kubenswrapper[4838]: I1128 10:58:24.357739 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" event={"ID":"5c3daa53-8c4e-4e30-aeba-146602dd45cd","Type":"ContainerDied","Data":"336a1410d5b7b6661dd9229c5864e9b8a009c3cf2d23e82987c1cf12f8cec0b5"} Nov 28 10:58:24 crc kubenswrapper[4838]: I1128 10:58:24.357791 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" event={"ID":"5c3daa53-8c4e-4e30-aeba-146602dd45cd","Type":"ContainerStarted","Data":"616c42b063d53e2f1fdae5c99959bb3a7ba5c7c89c51bc64c2a6b637b60b6dbb"} Nov 28 10:58:24 crc kubenswrapper[4838]: I1128 10:58:24.357809 4838 scope.go:117] "RemoveContainer" containerID="3d80b1b6cf83170cb103f16c70c6a8b3db90d429f22463d02230864c009beb4b" Nov 28 10:59:28 crc kubenswrapper[4838]: I1128 10:59:28.031255 4838 scope.go:117] "RemoveContainer" containerID="67a7b3d6ebe1d6e5474e23ad22300ec84f8a144b1f94fa71bc684024a71ad351" Nov 28 10:59:28 crc kubenswrapper[4838]: I1128 10:59:28.053575 4838 scope.go:117] "RemoveContainer" containerID="1d975085ad4f3ec99a878f8fa1ec1022a1135e14c310b0c83c68d6a9c422fbc8" Nov 28 11:00:00 crc kubenswrapper[4838]: I1128 11:00:00.173505 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405460-mn7zq"] Nov 28 11:00:00 crc kubenswrapper[4838]: E1128 11:00:00.174752 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="24fbdd42-6c0d-430a-81cd-06598d37810c" containerName="registry-server" Nov 28 11:00:00 crc kubenswrapper[4838]: I1128 11:00:00.174777 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="24fbdd42-6c0d-430a-81cd-06598d37810c" containerName="registry-server" Nov 28 11:00:00 crc kubenswrapper[4838]: E1128 11:00:00.174832 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="146a1492-d101-403a-a26e-628be3574654" containerName="extract-utilities" Nov 28 11:00:00 crc kubenswrapper[4838]: I1128 11:00:00.174843 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="146a1492-d101-403a-a26e-628be3574654" containerName="extract-utilities" Nov 28 11:00:00 crc kubenswrapper[4838]: E1128 11:00:00.174868 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="146a1492-d101-403a-a26e-628be3574654" containerName="registry-server" Nov 28 11:00:00 crc kubenswrapper[4838]: I1128 11:00:00.174883 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="146a1492-d101-403a-a26e-628be3574654" containerName="registry-server" Nov 28 11:00:00 crc kubenswrapper[4838]: E1128 11:00:00.174902 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="146a1492-d101-403a-a26e-628be3574654" containerName="extract-content" Nov 28 11:00:00 crc kubenswrapper[4838]: I1128 11:00:00.174913 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="146a1492-d101-403a-a26e-628be3574654" containerName="extract-content" Nov 28 11:00:00 crc kubenswrapper[4838]: E1128 11:00:00.174936 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="24fbdd42-6c0d-430a-81cd-06598d37810c" containerName="extract-content" Nov 28 11:00:00 crc kubenswrapper[4838]: I1128 11:00:00.174947 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="24fbdd42-6c0d-430a-81cd-06598d37810c" containerName="extract-content" Nov 28 11:00:00 crc kubenswrapper[4838]: E1128 11:00:00.174962 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="24fbdd42-6c0d-430a-81cd-06598d37810c" containerName="extract-utilities" Nov 28 11:00:00 crc kubenswrapper[4838]: I1128 11:00:00.174973 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="24fbdd42-6c0d-430a-81cd-06598d37810c" containerName="extract-utilities" Nov 28 11:00:00 crc kubenswrapper[4838]: I1128 11:00:00.175295 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="24fbdd42-6c0d-430a-81cd-06598d37810c" containerName="registry-server" Nov 28 11:00:00 crc kubenswrapper[4838]: I1128 11:00:00.175327 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="146a1492-d101-403a-a26e-628be3574654" containerName="registry-server" Nov 28 11:00:00 crc kubenswrapper[4838]: I1128 11:00:00.176368 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405460-mn7zq" Nov 28 11:00:00 crc kubenswrapper[4838]: I1128 11:00:00.178640 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 28 11:00:00 crc kubenswrapper[4838]: I1128 11:00:00.178657 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 28 11:00:00 crc kubenswrapper[4838]: I1128 11:00:00.190015 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405460-mn7zq"] Nov 28 11:00:00 crc kubenswrapper[4838]: I1128 11:00:00.327691 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dsmf7\" (UniqueName: \"kubernetes.io/projected/b9a63111-f82b-4371-9cdb-c839e961dd7e-kube-api-access-dsmf7\") pod \"collect-profiles-29405460-mn7zq\" (UID: \"b9a63111-f82b-4371-9cdb-c839e961dd7e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405460-mn7zq" Nov 28 11:00:00 crc kubenswrapper[4838]: I1128 11:00:00.327822 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/b9a63111-f82b-4371-9cdb-c839e961dd7e-secret-volume\") pod \"collect-profiles-29405460-mn7zq\" (UID: \"b9a63111-f82b-4371-9cdb-c839e961dd7e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405460-mn7zq" Nov 28 11:00:00 crc kubenswrapper[4838]: I1128 11:00:00.327888 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b9a63111-f82b-4371-9cdb-c839e961dd7e-config-volume\") pod \"collect-profiles-29405460-mn7zq\" (UID: \"b9a63111-f82b-4371-9cdb-c839e961dd7e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405460-mn7zq" Nov 28 11:00:00 crc kubenswrapper[4838]: I1128 11:00:00.429813 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/b9a63111-f82b-4371-9cdb-c839e961dd7e-secret-volume\") pod \"collect-profiles-29405460-mn7zq\" (UID: \"b9a63111-f82b-4371-9cdb-c839e961dd7e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405460-mn7zq" Nov 28 11:00:00 crc kubenswrapper[4838]: I1128 11:00:00.431076 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b9a63111-f82b-4371-9cdb-c839e961dd7e-config-volume\") pod \"collect-profiles-29405460-mn7zq\" (UID: \"b9a63111-f82b-4371-9cdb-c839e961dd7e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405460-mn7zq" Nov 28 11:00:00 crc kubenswrapper[4838]: I1128 11:00:00.431508 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dsmf7\" (UniqueName: \"kubernetes.io/projected/b9a63111-f82b-4371-9cdb-c839e961dd7e-kube-api-access-dsmf7\") pod \"collect-profiles-29405460-mn7zq\" (UID: \"b9a63111-f82b-4371-9cdb-c839e961dd7e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405460-mn7zq" Nov 28 11:00:00 crc kubenswrapper[4838]: I1128 11:00:00.431955 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b9a63111-f82b-4371-9cdb-c839e961dd7e-config-volume\") pod \"collect-profiles-29405460-mn7zq\" (UID: \"b9a63111-f82b-4371-9cdb-c839e961dd7e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405460-mn7zq" Nov 28 11:00:00 crc kubenswrapper[4838]: I1128 11:00:00.449819 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/b9a63111-f82b-4371-9cdb-c839e961dd7e-secret-volume\") pod \"collect-profiles-29405460-mn7zq\" (UID: \"b9a63111-f82b-4371-9cdb-c839e961dd7e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405460-mn7zq" Nov 28 11:00:00 crc kubenswrapper[4838]: I1128 11:00:00.453340 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dsmf7\" (UniqueName: \"kubernetes.io/projected/b9a63111-f82b-4371-9cdb-c839e961dd7e-kube-api-access-dsmf7\") pod \"collect-profiles-29405460-mn7zq\" (UID: \"b9a63111-f82b-4371-9cdb-c839e961dd7e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405460-mn7zq" Nov 28 11:00:00 crc kubenswrapper[4838]: I1128 11:00:00.500476 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405460-mn7zq" Nov 28 11:00:00 crc kubenswrapper[4838]: I1128 11:00:00.990433 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405460-mn7zq"] Nov 28 11:00:01 crc kubenswrapper[4838]: I1128 11:00:01.596116 4838 generic.go:334] "Generic (PLEG): container finished" podID="b9a63111-f82b-4371-9cdb-c839e961dd7e" containerID="012e085818350d5796ad52191bfd1514d075baced248ae8fb4ad685d7c79d13c" exitCode=0 Nov 28 11:00:01 crc kubenswrapper[4838]: I1128 11:00:01.596174 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405460-mn7zq" event={"ID":"b9a63111-f82b-4371-9cdb-c839e961dd7e","Type":"ContainerDied","Data":"012e085818350d5796ad52191bfd1514d075baced248ae8fb4ad685d7c79d13c"} Nov 28 11:00:01 crc kubenswrapper[4838]: I1128 11:00:01.596414 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405460-mn7zq" event={"ID":"b9a63111-f82b-4371-9cdb-c839e961dd7e","Type":"ContainerStarted","Data":"ec3428120c9a572e88b5668f8593db993a79f4b0ece4b67d2b2d6d73ce22ea72"} Nov 28 11:00:03 crc kubenswrapper[4838]: I1128 11:00:03.023542 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405460-mn7zq" Nov 28 11:00:03 crc kubenswrapper[4838]: I1128 11:00:03.202446 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b9a63111-f82b-4371-9cdb-c839e961dd7e-config-volume\") pod \"b9a63111-f82b-4371-9cdb-c839e961dd7e\" (UID: \"b9a63111-f82b-4371-9cdb-c839e961dd7e\") " Nov 28 11:00:03 crc kubenswrapper[4838]: I1128 11:00:03.202818 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dsmf7\" (UniqueName: \"kubernetes.io/projected/b9a63111-f82b-4371-9cdb-c839e961dd7e-kube-api-access-dsmf7\") pod \"b9a63111-f82b-4371-9cdb-c839e961dd7e\" (UID: \"b9a63111-f82b-4371-9cdb-c839e961dd7e\") " Nov 28 11:00:03 crc kubenswrapper[4838]: I1128 11:00:03.202862 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/b9a63111-f82b-4371-9cdb-c839e961dd7e-secret-volume\") pod \"b9a63111-f82b-4371-9cdb-c839e961dd7e\" (UID: \"b9a63111-f82b-4371-9cdb-c839e961dd7e\") " Nov 28 11:00:03 crc kubenswrapper[4838]: I1128 11:00:03.203183 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b9a63111-f82b-4371-9cdb-c839e961dd7e-config-volume" (OuterVolumeSpecName: "config-volume") pod "b9a63111-f82b-4371-9cdb-c839e961dd7e" (UID: "b9a63111-f82b-4371-9cdb-c839e961dd7e"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:00:03 crc kubenswrapper[4838]: I1128 11:00:03.203530 4838 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b9a63111-f82b-4371-9cdb-c839e961dd7e-config-volume\") on node \"crc\" DevicePath \"\"" Nov 28 11:00:03 crc kubenswrapper[4838]: I1128 11:00:03.208823 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b9a63111-f82b-4371-9cdb-c839e961dd7e-kube-api-access-dsmf7" (OuterVolumeSpecName: "kube-api-access-dsmf7") pod "b9a63111-f82b-4371-9cdb-c839e961dd7e" (UID: "b9a63111-f82b-4371-9cdb-c839e961dd7e"). InnerVolumeSpecName "kube-api-access-dsmf7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:00:03 crc kubenswrapper[4838]: I1128 11:00:03.210276 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b9a63111-f82b-4371-9cdb-c839e961dd7e-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "b9a63111-f82b-4371-9cdb-c839e961dd7e" (UID: "b9a63111-f82b-4371-9cdb-c839e961dd7e"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:00:03 crc kubenswrapper[4838]: I1128 11:00:03.305074 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dsmf7\" (UniqueName: \"kubernetes.io/projected/b9a63111-f82b-4371-9cdb-c839e961dd7e-kube-api-access-dsmf7\") on node \"crc\" DevicePath \"\"" Nov 28 11:00:03 crc kubenswrapper[4838]: I1128 11:00:03.305105 4838 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/b9a63111-f82b-4371-9cdb-c839e961dd7e-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 28 11:00:03 crc kubenswrapper[4838]: I1128 11:00:03.612326 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405460-mn7zq" event={"ID":"b9a63111-f82b-4371-9cdb-c839e961dd7e","Type":"ContainerDied","Data":"ec3428120c9a572e88b5668f8593db993a79f4b0ece4b67d2b2d6d73ce22ea72"} Nov 28 11:00:03 crc kubenswrapper[4838]: I1128 11:00:03.612361 4838 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ec3428120c9a572e88b5668f8593db993a79f4b0ece4b67d2b2d6d73ce22ea72" Nov 28 11:00:03 crc kubenswrapper[4838]: I1128 11:00:03.612385 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405460-mn7zq" Nov 28 11:00:04 crc kubenswrapper[4838]: I1128 11:00:04.116532 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405415-t4j6r"] Nov 28 11:00:04 crc kubenswrapper[4838]: I1128 11:00:04.126840 4838 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405415-t4j6r"] Nov 28 11:00:04 crc kubenswrapper[4838]: I1128 11:00:04.584189 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="996c85bb-be41-4eaa-9e6e-a912a0a3db0d" path="/var/lib/kubelet/pods/996c85bb-be41-4eaa-9e6e-a912a0a3db0d/volumes" Nov 28 11:00:28 crc kubenswrapper[4838]: I1128 11:00:28.115855 4838 scope.go:117] "RemoveContainer" containerID="a3fcc5758e976db53e516a97358221b13c2285d66c5486e9da43a9c380a51e8a" Nov 28 11:00:53 crc kubenswrapper[4838]: I1128 11:00:53.939804 4838 patch_prober.go:28] interesting pod/machine-config-daemon-5dxdd container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 11:00:53 crc kubenswrapper[4838]: I1128 11:00:53.940360 4838 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 11:01:00 crc kubenswrapper[4838]: I1128 11:01:00.158956 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-cron-29405461-fclpk"] Nov 28 11:01:00 crc kubenswrapper[4838]: E1128 11:01:00.160111 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b9a63111-f82b-4371-9cdb-c839e961dd7e" containerName="collect-profiles" Nov 28 11:01:00 crc kubenswrapper[4838]: I1128 11:01:00.160132 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="b9a63111-f82b-4371-9cdb-c839e961dd7e" containerName="collect-profiles" Nov 28 11:01:00 crc kubenswrapper[4838]: I1128 11:01:00.160365 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="b9a63111-f82b-4371-9cdb-c839e961dd7e" containerName="collect-profiles" Nov 28 11:01:00 crc kubenswrapper[4838]: I1128 11:01:00.161247 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29405461-fclpk" Nov 28 11:01:00 crc kubenswrapper[4838]: I1128 11:01:00.176889 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29405461-fclpk"] Nov 28 11:01:00 crc kubenswrapper[4838]: I1128 11:01:00.323417 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b9119237-dcca-4d01-b6e2-6deddc18f8f6-config-data\") pod \"keystone-cron-29405461-fclpk\" (UID: \"b9119237-dcca-4d01-b6e2-6deddc18f8f6\") " pod="openstack/keystone-cron-29405461-fclpk" Nov 28 11:01:00 crc kubenswrapper[4838]: I1128 11:01:00.323757 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/b9119237-dcca-4d01-b6e2-6deddc18f8f6-fernet-keys\") pod \"keystone-cron-29405461-fclpk\" (UID: \"b9119237-dcca-4d01-b6e2-6deddc18f8f6\") " pod="openstack/keystone-cron-29405461-fclpk" Nov 28 11:01:00 crc kubenswrapper[4838]: I1128 11:01:00.323859 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b9119237-dcca-4d01-b6e2-6deddc18f8f6-combined-ca-bundle\") pod \"keystone-cron-29405461-fclpk\" (UID: \"b9119237-dcca-4d01-b6e2-6deddc18f8f6\") " pod="openstack/keystone-cron-29405461-fclpk" Nov 28 11:01:00 crc kubenswrapper[4838]: I1128 11:01:00.323939 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kvhhl\" (UniqueName: \"kubernetes.io/projected/b9119237-dcca-4d01-b6e2-6deddc18f8f6-kube-api-access-kvhhl\") pod \"keystone-cron-29405461-fclpk\" (UID: \"b9119237-dcca-4d01-b6e2-6deddc18f8f6\") " pod="openstack/keystone-cron-29405461-fclpk" Nov 28 11:01:00 crc kubenswrapper[4838]: I1128 11:01:00.425812 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b9119237-dcca-4d01-b6e2-6deddc18f8f6-config-data\") pod \"keystone-cron-29405461-fclpk\" (UID: \"b9119237-dcca-4d01-b6e2-6deddc18f8f6\") " pod="openstack/keystone-cron-29405461-fclpk" Nov 28 11:01:00 crc kubenswrapper[4838]: I1128 11:01:00.425878 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/b9119237-dcca-4d01-b6e2-6deddc18f8f6-fernet-keys\") pod \"keystone-cron-29405461-fclpk\" (UID: \"b9119237-dcca-4d01-b6e2-6deddc18f8f6\") " pod="openstack/keystone-cron-29405461-fclpk" Nov 28 11:01:00 crc kubenswrapper[4838]: I1128 11:01:00.425936 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b9119237-dcca-4d01-b6e2-6deddc18f8f6-combined-ca-bundle\") pod \"keystone-cron-29405461-fclpk\" (UID: \"b9119237-dcca-4d01-b6e2-6deddc18f8f6\") " pod="openstack/keystone-cron-29405461-fclpk" Nov 28 11:01:00 crc kubenswrapper[4838]: I1128 11:01:00.426002 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kvhhl\" (UniqueName: \"kubernetes.io/projected/b9119237-dcca-4d01-b6e2-6deddc18f8f6-kube-api-access-kvhhl\") pod \"keystone-cron-29405461-fclpk\" (UID: \"b9119237-dcca-4d01-b6e2-6deddc18f8f6\") " pod="openstack/keystone-cron-29405461-fclpk" Nov 28 11:01:00 crc kubenswrapper[4838]: I1128 11:01:00.432262 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b9119237-dcca-4d01-b6e2-6deddc18f8f6-combined-ca-bundle\") pod \"keystone-cron-29405461-fclpk\" (UID: \"b9119237-dcca-4d01-b6e2-6deddc18f8f6\") " pod="openstack/keystone-cron-29405461-fclpk" Nov 28 11:01:00 crc kubenswrapper[4838]: I1128 11:01:00.432756 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b9119237-dcca-4d01-b6e2-6deddc18f8f6-config-data\") pod \"keystone-cron-29405461-fclpk\" (UID: \"b9119237-dcca-4d01-b6e2-6deddc18f8f6\") " pod="openstack/keystone-cron-29405461-fclpk" Nov 28 11:01:00 crc kubenswrapper[4838]: I1128 11:01:00.441788 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/b9119237-dcca-4d01-b6e2-6deddc18f8f6-fernet-keys\") pod \"keystone-cron-29405461-fclpk\" (UID: \"b9119237-dcca-4d01-b6e2-6deddc18f8f6\") " pod="openstack/keystone-cron-29405461-fclpk" Nov 28 11:01:00 crc kubenswrapper[4838]: I1128 11:01:00.447289 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kvhhl\" (UniqueName: \"kubernetes.io/projected/b9119237-dcca-4d01-b6e2-6deddc18f8f6-kube-api-access-kvhhl\") pod \"keystone-cron-29405461-fclpk\" (UID: \"b9119237-dcca-4d01-b6e2-6deddc18f8f6\") " pod="openstack/keystone-cron-29405461-fclpk" Nov 28 11:01:00 crc kubenswrapper[4838]: I1128 11:01:00.478831 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29405461-fclpk" Nov 28 11:01:01 crc kubenswrapper[4838]: I1128 11:01:01.141973 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29405461-fclpk"] Nov 28 11:01:01 crc kubenswrapper[4838]: I1128 11:01:01.161385 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29405461-fclpk" event={"ID":"b9119237-dcca-4d01-b6e2-6deddc18f8f6","Type":"ContainerStarted","Data":"3d7b1675a9c758e38b4be9dd8cef84a6da46a6f095837a67e55035d63aaa6cf5"} Nov 28 11:01:02 crc kubenswrapper[4838]: I1128 11:01:02.199015 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29405461-fclpk" event={"ID":"b9119237-dcca-4d01-b6e2-6deddc18f8f6","Type":"ContainerStarted","Data":"673bf0d2d8202e6543648c0346950e53332d8013b9e2ebfce4e9869613dd42ea"} Nov 28 11:01:02 crc kubenswrapper[4838]: I1128 11:01:02.222517 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-cron-29405461-fclpk" podStartSLOduration=2.222496716 podStartE2EDuration="2.222496716s" podCreationTimestamp="2025-11-28 11:01:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 11:01:02.219472563 +0000 UTC m=+3833.918446733" watchObservedRunningTime="2025-11-28 11:01:02.222496716 +0000 UTC m=+3833.921470896" Nov 28 11:01:04 crc kubenswrapper[4838]: I1128 11:01:04.214603 4838 generic.go:334] "Generic (PLEG): container finished" podID="b9119237-dcca-4d01-b6e2-6deddc18f8f6" containerID="673bf0d2d8202e6543648c0346950e53332d8013b9e2ebfce4e9869613dd42ea" exitCode=0 Nov 28 11:01:04 crc kubenswrapper[4838]: I1128 11:01:04.215024 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29405461-fclpk" event={"ID":"b9119237-dcca-4d01-b6e2-6deddc18f8f6","Type":"ContainerDied","Data":"673bf0d2d8202e6543648c0346950e53332d8013b9e2ebfce4e9869613dd42ea"} Nov 28 11:01:05 crc kubenswrapper[4838]: I1128 11:01:05.762255 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29405461-fclpk" Nov 28 11:01:05 crc kubenswrapper[4838]: I1128 11:01:05.831068 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b9119237-dcca-4d01-b6e2-6deddc18f8f6-combined-ca-bundle\") pod \"b9119237-dcca-4d01-b6e2-6deddc18f8f6\" (UID: \"b9119237-dcca-4d01-b6e2-6deddc18f8f6\") " Nov 28 11:01:05 crc kubenswrapper[4838]: I1128 11:01:05.831201 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/b9119237-dcca-4d01-b6e2-6deddc18f8f6-fernet-keys\") pod \"b9119237-dcca-4d01-b6e2-6deddc18f8f6\" (UID: \"b9119237-dcca-4d01-b6e2-6deddc18f8f6\") " Nov 28 11:01:05 crc kubenswrapper[4838]: I1128 11:01:05.831264 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kvhhl\" (UniqueName: \"kubernetes.io/projected/b9119237-dcca-4d01-b6e2-6deddc18f8f6-kube-api-access-kvhhl\") pod \"b9119237-dcca-4d01-b6e2-6deddc18f8f6\" (UID: \"b9119237-dcca-4d01-b6e2-6deddc18f8f6\") " Nov 28 11:01:05 crc kubenswrapper[4838]: I1128 11:01:05.831380 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b9119237-dcca-4d01-b6e2-6deddc18f8f6-config-data\") pod \"b9119237-dcca-4d01-b6e2-6deddc18f8f6\" (UID: \"b9119237-dcca-4d01-b6e2-6deddc18f8f6\") " Nov 28 11:01:05 crc kubenswrapper[4838]: I1128 11:01:05.929519 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b9119237-dcca-4d01-b6e2-6deddc18f8f6-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "b9119237-dcca-4d01-b6e2-6deddc18f8f6" (UID: "b9119237-dcca-4d01-b6e2-6deddc18f8f6"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:01:05 crc kubenswrapper[4838]: I1128 11:01:05.929524 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b9119237-dcca-4d01-b6e2-6deddc18f8f6-kube-api-access-kvhhl" (OuterVolumeSpecName: "kube-api-access-kvhhl") pod "b9119237-dcca-4d01-b6e2-6deddc18f8f6" (UID: "b9119237-dcca-4d01-b6e2-6deddc18f8f6"). InnerVolumeSpecName "kube-api-access-kvhhl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:01:05 crc kubenswrapper[4838]: I1128 11:01:05.933452 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b9119237-dcca-4d01-b6e2-6deddc18f8f6-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b9119237-dcca-4d01-b6e2-6deddc18f8f6" (UID: "b9119237-dcca-4d01-b6e2-6deddc18f8f6"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:01:05 crc kubenswrapper[4838]: I1128 11:01:05.935522 4838 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/b9119237-dcca-4d01-b6e2-6deddc18f8f6-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 28 11:01:05 crc kubenswrapper[4838]: I1128 11:01:05.935556 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kvhhl\" (UniqueName: \"kubernetes.io/projected/b9119237-dcca-4d01-b6e2-6deddc18f8f6-kube-api-access-kvhhl\") on node \"crc\" DevicePath \"\"" Nov 28 11:01:05 crc kubenswrapper[4838]: I1128 11:01:05.935599 4838 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b9119237-dcca-4d01-b6e2-6deddc18f8f6-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 11:01:05 crc kubenswrapper[4838]: I1128 11:01:05.975956 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b9119237-dcca-4d01-b6e2-6deddc18f8f6-config-data" (OuterVolumeSpecName: "config-data") pod "b9119237-dcca-4d01-b6e2-6deddc18f8f6" (UID: "b9119237-dcca-4d01-b6e2-6deddc18f8f6"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:01:06 crc kubenswrapper[4838]: I1128 11:01:06.037955 4838 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b9119237-dcca-4d01-b6e2-6deddc18f8f6-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 11:01:06 crc kubenswrapper[4838]: I1128 11:01:06.241141 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29405461-fclpk" event={"ID":"b9119237-dcca-4d01-b6e2-6deddc18f8f6","Type":"ContainerDied","Data":"3d7b1675a9c758e38b4be9dd8cef84a6da46a6f095837a67e55035d63aaa6cf5"} Nov 28 11:01:06 crc kubenswrapper[4838]: I1128 11:01:06.241195 4838 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3d7b1675a9c758e38b4be9dd8cef84a6da46a6f095837a67e55035d63aaa6cf5" Nov 28 11:01:06 crc kubenswrapper[4838]: I1128 11:01:06.241251 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29405461-fclpk" Nov 28 11:01:23 crc kubenswrapper[4838]: I1128 11:01:23.940259 4838 patch_prober.go:28] interesting pod/machine-config-daemon-5dxdd container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 11:01:23 crc kubenswrapper[4838]: I1128 11:01:23.940707 4838 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 11:01:53 crc kubenswrapper[4838]: I1128 11:01:53.940247 4838 patch_prober.go:28] interesting pod/machine-config-daemon-5dxdd container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 11:01:53 crc kubenswrapper[4838]: I1128 11:01:53.940908 4838 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 11:01:53 crc kubenswrapper[4838]: I1128 11:01:53.940963 4838 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" Nov 28 11:01:53 crc kubenswrapper[4838]: I1128 11:01:53.941805 4838 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"616c42b063d53e2f1fdae5c99959bb3a7ba5c7c89c51bc64c2a6b637b60b6dbb"} pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 11:01:53 crc kubenswrapper[4838]: I1128 11:01:53.941912 4838 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" containerName="machine-config-daemon" containerID="cri-o://616c42b063d53e2f1fdae5c99959bb3a7ba5c7c89c51bc64c2a6b637b60b6dbb" gracePeriod=600 Nov 28 11:01:54 crc kubenswrapper[4838]: E1128 11:01:54.085359 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-5dxdd_openshift-machine-config-operator(5c3daa53-8c4e-4e30-aeba-146602dd45cd)\"" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" Nov 28 11:01:54 crc kubenswrapper[4838]: I1128 11:01:54.692986 4838 generic.go:334] "Generic (PLEG): container finished" podID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" containerID="616c42b063d53e2f1fdae5c99959bb3a7ba5c7c89c51bc64c2a6b637b60b6dbb" exitCode=0 Nov 28 11:01:54 crc kubenswrapper[4838]: I1128 11:01:54.693090 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" event={"ID":"5c3daa53-8c4e-4e30-aeba-146602dd45cd","Type":"ContainerDied","Data":"616c42b063d53e2f1fdae5c99959bb3a7ba5c7c89c51bc64c2a6b637b60b6dbb"} Nov 28 11:01:54 crc kubenswrapper[4838]: I1128 11:01:54.693427 4838 scope.go:117] "RemoveContainer" containerID="336a1410d5b7b6661dd9229c5864e9b8a009c3cf2d23e82987c1cf12f8cec0b5" Nov 28 11:01:54 crc kubenswrapper[4838]: I1128 11:01:54.694492 4838 scope.go:117] "RemoveContainer" containerID="616c42b063d53e2f1fdae5c99959bb3a7ba5c7c89c51bc64c2a6b637b60b6dbb" Nov 28 11:01:54 crc kubenswrapper[4838]: E1128 11:01:54.695173 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-5dxdd_openshift-machine-config-operator(5c3daa53-8c4e-4e30-aeba-146602dd45cd)\"" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" Nov 28 11:02:05 crc kubenswrapper[4838]: I1128 11:02:05.053752 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/manila-ab9d-account-create-update-8jghk"] Nov 28 11:02:05 crc kubenswrapper[4838]: I1128 11:02:05.070252 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/manila-db-create-nff9n"] Nov 28 11:02:05 crc kubenswrapper[4838]: I1128 11:02:05.083229 4838 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/manila-ab9d-account-create-update-8jghk"] Nov 28 11:02:05 crc kubenswrapper[4838]: I1128 11:02:05.092281 4838 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/manila-db-create-nff9n"] Nov 28 11:02:06 crc kubenswrapper[4838]: I1128 11:02:06.562466 4838 scope.go:117] "RemoveContainer" containerID="616c42b063d53e2f1fdae5c99959bb3a7ba5c7c89c51bc64c2a6b637b60b6dbb" Nov 28 11:02:06 crc kubenswrapper[4838]: E1128 11:02:06.563197 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-5dxdd_openshift-machine-config-operator(5c3daa53-8c4e-4e30-aeba-146602dd45cd)\"" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" Nov 28 11:02:06 crc kubenswrapper[4838]: I1128 11:02:06.574640 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0024b279-ca1c-4058-924a-0f044953dc17" path="/var/lib/kubelet/pods/0024b279-ca1c-4058-924a-0f044953dc17/volumes" Nov 28 11:02:06 crc kubenswrapper[4838]: I1128 11:02:06.575246 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5f46bbeb-4cbe-4fe8-8d87-fea23975ae8f" path="/var/lib/kubelet/pods/5f46bbeb-4cbe-4fe8-8d87-fea23975ae8f/volumes" Nov 28 11:02:18 crc kubenswrapper[4838]: I1128 11:02:18.562555 4838 scope.go:117] "RemoveContainer" containerID="616c42b063d53e2f1fdae5c99959bb3a7ba5c7c89c51bc64c2a6b637b60b6dbb" Nov 28 11:02:18 crc kubenswrapper[4838]: E1128 11:02:18.563390 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-5dxdd_openshift-machine-config-operator(5c3daa53-8c4e-4e30-aeba-146602dd45cd)\"" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" Nov 28 11:02:28 crc kubenswrapper[4838]: I1128 11:02:28.217790 4838 scope.go:117] "RemoveContainer" containerID="52f9c287a626abca89cb78e5731081da2e9b221ab051e1170200679c0ef6d313" Nov 28 11:02:28 crc kubenswrapper[4838]: I1128 11:02:28.880407 4838 scope.go:117] "RemoveContainer" containerID="b1bc8f10c2e92416696446754dcb7629ee3bcc846892660caba543a47ab0b4c1" Nov 28 11:02:30 crc kubenswrapper[4838]: I1128 11:02:30.562593 4838 scope.go:117] "RemoveContainer" containerID="616c42b063d53e2f1fdae5c99959bb3a7ba5c7c89c51bc64c2a6b637b60b6dbb" Nov 28 11:02:30 crc kubenswrapper[4838]: E1128 11:02:30.563161 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-5dxdd_openshift-machine-config-operator(5c3daa53-8c4e-4e30-aeba-146602dd45cd)\"" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" Nov 28 11:02:44 crc kubenswrapper[4838]: I1128 11:02:44.562280 4838 scope.go:117] "RemoveContainer" containerID="616c42b063d53e2f1fdae5c99959bb3a7ba5c7c89c51bc64c2a6b637b60b6dbb" Nov 28 11:02:44 crc kubenswrapper[4838]: E1128 11:02:44.562956 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-5dxdd_openshift-machine-config-operator(5c3daa53-8c4e-4e30-aeba-146602dd45cd)\"" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" Nov 28 11:02:57 crc kubenswrapper[4838]: I1128 11:02:57.046984 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/manila-db-sync-rnkm7"] Nov 28 11:02:57 crc kubenswrapper[4838]: I1128 11:02:57.059084 4838 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/manila-db-sync-rnkm7"] Nov 28 11:02:58 crc kubenswrapper[4838]: I1128 11:02:58.569222 4838 scope.go:117] "RemoveContainer" containerID="616c42b063d53e2f1fdae5c99959bb3a7ba5c7c89c51bc64c2a6b637b60b6dbb" Nov 28 11:02:58 crc kubenswrapper[4838]: E1128 11:02:58.570257 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-5dxdd_openshift-machine-config-operator(5c3daa53-8c4e-4e30-aeba-146602dd45cd)\"" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" Nov 28 11:02:58 crc kubenswrapper[4838]: I1128 11:02:58.589589 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="05628975-e8c2-42db-b5a0-dc9536eb3e75" path="/var/lib/kubelet/pods/05628975-e8c2-42db-b5a0-dc9536eb3e75/volumes" Nov 28 11:03:02 crc kubenswrapper[4838]: I1128 11:03:02.932231 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-brn4h"] Nov 28 11:03:02 crc kubenswrapper[4838]: E1128 11:03:02.933501 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b9119237-dcca-4d01-b6e2-6deddc18f8f6" containerName="keystone-cron" Nov 28 11:03:02 crc kubenswrapper[4838]: I1128 11:03:02.933525 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="b9119237-dcca-4d01-b6e2-6deddc18f8f6" containerName="keystone-cron" Nov 28 11:03:02 crc kubenswrapper[4838]: I1128 11:03:02.933901 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="b9119237-dcca-4d01-b6e2-6deddc18f8f6" containerName="keystone-cron" Nov 28 11:03:02 crc kubenswrapper[4838]: I1128 11:03:02.936045 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-brn4h" Nov 28 11:03:02 crc kubenswrapper[4838]: I1128 11:03:02.948959 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-brn4h"] Nov 28 11:03:03 crc kubenswrapper[4838]: I1128 11:03:03.129101 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fe07643a-9de2-42ae-bc02-3cbd58b0d012-catalog-content\") pod \"redhat-operators-brn4h\" (UID: \"fe07643a-9de2-42ae-bc02-3cbd58b0d012\") " pod="openshift-marketplace/redhat-operators-brn4h" Nov 28 11:03:03 crc kubenswrapper[4838]: I1128 11:03:03.129148 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fe07643a-9de2-42ae-bc02-3cbd58b0d012-utilities\") pod \"redhat-operators-brn4h\" (UID: \"fe07643a-9de2-42ae-bc02-3cbd58b0d012\") " pod="openshift-marketplace/redhat-operators-brn4h" Nov 28 11:03:03 crc kubenswrapper[4838]: I1128 11:03:03.129194 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vtwv7\" (UniqueName: \"kubernetes.io/projected/fe07643a-9de2-42ae-bc02-3cbd58b0d012-kube-api-access-vtwv7\") pod \"redhat-operators-brn4h\" (UID: \"fe07643a-9de2-42ae-bc02-3cbd58b0d012\") " pod="openshift-marketplace/redhat-operators-brn4h" Nov 28 11:03:03 crc kubenswrapper[4838]: I1128 11:03:03.230757 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fe07643a-9de2-42ae-bc02-3cbd58b0d012-catalog-content\") pod \"redhat-operators-brn4h\" (UID: \"fe07643a-9de2-42ae-bc02-3cbd58b0d012\") " pod="openshift-marketplace/redhat-operators-brn4h" Nov 28 11:03:03 crc kubenswrapper[4838]: I1128 11:03:03.230819 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fe07643a-9de2-42ae-bc02-3cbd58b0d012-utilities\") pod \"redhat-operators-brn4h\" (UID: \"fe07643a-9de2-42ae-bc02-3cbd58b0d012\") " pod="openshift-marketplace/redhat-operators-brn4h" Nov 28 11:03:03 crc kubenswrapper[4838]: I1128 11:03:03.230879 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vtwv7\" (UniqueName: \"kubernetes.io/projected/fe07643a-9de2-42ae-bc02-3cbd58b0d012-kube-api-access-vtwv7\") pod \"redhat-operators-brn4h\" (UID: \"fe07643a-9de2-42ae-bc02-3cbd58b0d012\") " pod="openshift-marketplace/redhat-operators-brn4h" Nov 28 11:03:03 crc kubenswrapper[4838]: I1128 11:03:03.231472 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fe07643a-9de2-42ae-bc02-3cbd58b0d012-catalog-content\") pod \"redhat-operators-brn4h\" (UID: \"fe07643a-9de2-42ae-bc02-3cbd58b0d012\") " pod="openshift-marketplace/redhat-operators-brn4h" Nov 28 11:03:03 crc kubenswrapper[4838]: I1128 11:03:03.231481 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fe07643a-9de2-42ae-bc02-3cbd58b0d012-utilities\") pod \"redhat-operators-brn4h\" (UID: \"fe07643a-9de2-42ae-bc02-3cbd58b0d012\") " pod="openshift-marketplace/redhat-operators-brn4h" Nov 28 11:03:03 crc kubenswrapper[4838]: I1128 11:03:03.258481 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vtwv7\" (UniqueName: \"kubernetes.io/projected/fe07643a-9de2-42ae-bc02-3cbd58b0d012-kube-api-access-vtwv7\") pod \"redhat-operators-brn4h\" (UID: \"fe07643a-9de2-42ae-bc02-3cbd58b0d012\") " pod="openshift-marketplace/redhat-operators-brn4h" Nov 28 11:03:03 crc kubenswrapper[4838]: I1128 11:03:03.271961 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-brn4h" Nov 28 11:03:03 crc kubenswrapper[4838]: I1128 11:03:03.817476 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-brn4h"] Nov 28 11:03:04 crc kubenswrapper[4838]: I1128 11:03:04.385657 4838 generic.go:334] "Generic (PLEG): container finished" podID="fe07643a-9de2-42ae-bc02-3cbd58b0d012" containerID="f92b9eb4f220d755bcb8f2c457f715d687dea532fe1e327c3c43f27fa9591917" exitCode=0 Nov 28 11:03:04 crc kubenswrapper[4838]: I1128 11:03:04.385773 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-brn4h" event={"ID":"fe07643a-9de2-42ae-bc02-3cbd58b0d012","Type":"ContainerDied","Data":"f92b9eb4f220d755bcb8f2c457f715d687dea532fe1e327c3c43f27fa9591917"} Nov 28 11:03:04 crc kubenswrapper[4838]: I1128 11:03:04.385972 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-brn4h" event={"ID":"fe07643a-9de2-42ae-bc02-3cbd58b0d012","Type":"ContainerStarted","Data":"b28402086ad2042ae94f17e59b4e17ee2438835abe126d7368ee386e25c34137"} Nov 28 11:03:04 crc kubenswrapper[4838]: I1128 11:03:04.387637 4838 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 28 11:03:06 crc kubenswrapper[4838]: I1128 11:03:06.410336 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-brn4h" event={"ID":"fe07643a-9de2-42ae-bc02-3cbd58b0d012","Type":"ContainerStarted","Data":"e15e319af8943e203a74a7ad9ae1a1da04f6a49d2b64a31273c6d704296d7958"} Nov 28 11:03:08 crc kubenswrapper[4838]: I1128 11:03:08.431843 4838 generic.go:334] "Generic (PLEG): container finished" podID="fe07643a-9de2-42ae-bc02-3cbd58b0d012" containerID="e15e319af8943e203a74a7ad9ae1a1da04f6a49d2b64a31273c6d704296d7958" exitCode=0 Nov 28 11:03:08 crc kubenswrapper[4838]: I1128 11:03:08.431941 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-brn4h" event={"ID":"fe07643a-9de2-42ae-bc02-3cbd58b0d012","Type":"ContainerDied","Data":"e15e319af8943e203a74a7ad9ae1a1da04f6a49d2b64a31273c6d704296d7958"} Nov 28 11:03:10 crc kubenswrapper[4838]: I1128 11:03:10.460477 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-brn4h" event={"ID":"fe07643a-9de2-42ae-bc02-3cbd58b0d012","Type":"ContainerStarted","Data":"df5b41706a35e0a7379ca85a62ed51cfd6b496bf55137fbada46989392c89882"} Nov 28 11:03:10 crc kubenswrapper[4838]: I1128 11:03:10.563424 4838 scope.go:117] "RemoveContainer" containerID="616c42b063d53e2f1fdae5c99959bb3a7ba5c7c89c51bc64c2a6b637b60b6dbb" Nov 28 11:03:10 crc kubenswrapper[4838]: E1128 11:03:10.563776 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-5dxdd_openshift-machine-config-operator(5c3daa53-8c4e-4e30-aeba-146602dd45cd)\"" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" Nov 28 11:03:13 crc kubenswrapper[4838]: I1128 11:03:13.272787 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-brn4h" Nov 28 11:03:13 crc kubenswrapper[4838]: I1128 11:03:13.273243 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-brn4h" Nov 28 11:03:14 crc kubenswrapper[4838]: I1128 11:03:14.347156 4838 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-brn4h" podUID="fe07643a-9de2-42ae-bc02-3cbd58b0d012" containerName="registry-server" probeResult="failure" output=< Nov 28 11:03:14 crc kubenswrapper[4838]: timeout: failed to connect service ":50051" within 1s Nov 28 11:03:14 crc kubenswrapper[4838]: > Nov 28 11:03:23 crc kubenswrapper[4838]: I1128 11:03:23.335321 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-brn4h" Nov 28 11:03:23 crc kubenswrapper[4838]: I1128 11:03:23.362029 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-brn4h" podStartSLOduration=16.372366045 podStartE2EDuration="21.362005133s" podCreationTimestamp="2025-11-28 11:03:02 +0000 UTC" firstStartedPulling="2025-11-28 11:03:04.387438371 +0000 UTC m=+3956.086412541" lastFinishedPulling="2025-11-28 11:03:09.377077459 +0000 UTC m=+3961.076051629" observedRunningTime="2025-11-28 11:03:10.495594029 +0000 UTC m=+3962.194568199" watchObservedRunningTime="2025-11-28 11:03:23.362005133 +0000 UTC m=+3975.060979323" Nov 28 11:03:23 crc kubenswrapper[4838]: I1128 11:03:23.408396 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-brn4h" Nov 28 11:03:23 crc kubenswrapper[4838]: I1128 11:03:23.562039 4838 scope.go:117] "RemoveContainer" containerID="616c42b063d53e2f1fdae5c99959bb3a7ba5c7c89c51bc64c2a6b637b60b6dbb" Nov 28 11:03:23 crc kubenswrapper[4838]: E1128 11:03:23.562306 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-5dxdd_openshift-machine-config-operator(5c3daa53-8c4e-4e30-aeba-146602dd45cd)\"" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" Nov 28 11:03:23 crc kubenswrapper[4838]: I1128 11:03:23.577891 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-brn4h"] Nov 28 11:03:24 crc kubenswrapper[4838]: I1128 11:03:24.598949 4838 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-brn4h" podUID="fe07643a-9de2-42ae-bc02-3cbd58b0d012" containerName="registry-server" containerID="cri-o://df5b41706a35e0a7379ca85a62ed51cfd6b496bf55137fbada46989392c89882" gracePeriod=2 Nov 28 11:03:25 crc kubenswrapper[4838]: I1128 11:03:25.190745 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-brn4h" Nov 28 11:03:25 crc kubenswrapper[4838]: I1128 11:03:25.307846 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fe07643a-9de2-42ae-bc02-3cbd58b0d012-utilities\") pod \"fe07643a-9de2-42ae-bc02-3cbd58b0d012\" (UID: \"fe07643a-9de2-42ae-bc02-3cbd58b0d012\") " Nov 28 11:03:25 crc kubenswrapper[4838]: I1128 11:03:25.308010 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fe07643a-9de2-42ae-bc02-3cbd58b0d012-catalog-content\") pod \"fe07643a-9de2-42ae-bc02-3cbd58b0d012\" (UID: \"fe07643a-9de2-42ae-bc02-3cbd58b0d012\") " Nov 28 11:03:25 crc kubenswrapper[4838]: I1128 11:03:25.308118 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vtwv7\" (UniqueName: \"kubernetes.io/projected/fe07643a-9de2-42ae-bc02-3cbd58b0d012-kube-api-access-vtwv7\") pod \"fe07643a-9de2-42ae-bc02-3cbd58b0d012\" (UID: \"fe07643a-9de2-42ae-bc02-3cbd58b0d012\") " Nov 28 11:03:25 crc kubenswrapper[4838]: I1128 11:03:25.309018 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fe07643a-9de2-42ae-bc02-3cbd58b0d012-utilities" (OuterVolumeSpecName: "utilities") pod "fe07643a-9de2-42ae-bc02-3cbd58b0d012" (UID: "fe07643a-9de2-42ae-bc02-3cbd58b0d012"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 11:03:25 crc kubenswrapper[4838]: I1128 11:03:25.314549 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fe07643a-9de2-42ae-bc02-3cbd58b0d012-kube-api-access-vtwv7" (OuterVolumeSpecName: "kube-api-access-vtwv7") pod "fe07643a-9de2-42ae-bc02-3cbd58b0d012" (UID: "fe07643a-9de2-42ae-bc02-3cbd58b0d012"). InnerVolumeSpecName "kube-api-access-vtwv7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:03:25 crc kubenswrapper[4838]: I1128 11:03:25.408348 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fe07643a-9de2-42ae-bc02-3cbd58b0d012-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "fe07643a-9de2-42ae-bc02-3cbd58b0d012" (UID: "fe07643a-9de2-42ae-bc02-3cbd58b0d012"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 11:03:25 crc kubenswrapper[4838]: I1128 11:03:25.410475 4838 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fe07643a-9de2-42ae-bc02-3cbd58b0d012-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 11:03:25 crc kubenswrapper[4838]: I1128 11:03:25.410634 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vtwv7\" (UniqueName: \"kubernetes.io/projected/fe07643a-9de2-42ae-bc02-3cbd58b0d012-kube-api-access-vtwv7\") on node \"crc\" DevicePath \"\"" Nov 28 11:03:25 crc kubenswrapper[4838]: I1128 11:03:25.410741 4838 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fe07643a-9de2-42ae-bc02-3cbd58b0d012-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 11:03:25 crc kubenswrapper[4838]: I1128 11:03:25.609192 4838 generic.go:334] "Generic (PLEG): container finished" podID="fe07643a-9de2-42ae-bc02-3cbd58b0d012" containerID="df5b41706a35e0a7379ca85a62ed51cfd6b496bf55137fbada46989392c89882" exitCode=0 Nov 28 11:03:25 crc kubenswrapper[4838]: I1128 11:03:25.609233 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-brn4h" event={"ID":"fe07643a-9de2-42ae-bc02-3cbd58b0d012","Type":"ContainerDied","Data":"df5b41706a35e0a7379ca85a62ed51cfd6b496bf55137fbada46989392c89882"} Nov 28 11:03:25 crc kubenswrapper[4838]: I1128 11:03:25.609252 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-brn4h" Nov 28 11:03:25 crc kubenswrapper[4838]: I1128 11:03:25.609267 4838 scope.go:117] "RemoveContainer" containerID="df5b41706a35e0a7379ca85a62ed51cfd6b496bf55137fbada46989392c89882" Nov 28 11:03:25 crc kubenswrapper[4838]: I1128 11:03:25.609257 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-brn4h" event={"ID":"fe07643a-9de2-42ae-bc02-3cbd58b0d012","Type":"ContainerDied","Data":"b28402086ad2042ae94f17e59b4e17ee2438835abe126d7368ee386e25c34137"} Nov 28 11:03:25 crc kubenswrapper[4838]: I1128 11:03:25.636661 4838 scope.go:117] "RemoveContainer" containerID="e15e319af8943e203a74a7ad9ae1a1da04f6a49d2b64a31273c6d704296d7958" Nov 28 11:03:25 crc kubenswrapper[4838]: I1128 11:03:25.651908 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-brn4h"] Nov 28 11:03:25 crc kubenswrapper[4838]: I1128 11:03:25.660637 4838 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-brn4h"] Nov 28 11:03:25 crc kubenswrapper[4838]: I1128 11:03:25.670800 4838 scope.go:117] "RemoveContainer" containerID="f92b9eb4f220d755bcb8f2c457f715d687dea532fe1e327c3c43f27fa9591917" Nov 28 11:03:25 crc kubenswrapper[4838]: I1128 11:03:25.700093 4838 scope.go:117] "RemoveContainer" containerID="df5b41706a35e0a7379ca85a62ed51cfd6b496bf55137fbada46989392c89882" Nov 28 11:03:25 crc kubenswrapper[4838]: E1128 11:03:25.700822 4838 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"df5b41706a35e0a7379ca85a62ed51cfd6b496bf55137fbada46989392c89882\": container with ID starting with df5b41706a35e0a7379ca85a62ed51cfd6b496bf55137fbada46989392c89882 not found: ID does not exist" containerID="df5b41706a35e0a7379ca85a62ed51cfd6b496bf55137fbada46989392c89882" Nov 28 11:03:25 crc kubenswrapper[4838]: I1128 11:03:25.700870 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"df5b41706a35e0a7379ca85a62ed51cfd6b496bf55137fbada46989392c89882"} err="failed to get container status \"df5b41706a35e0a7379ca85a62ed51cfd6b496bf55137fbada46989392c89882\": rpc error: code = NotFound desc = could not find container \"df5b41706a35e0a7379ca85a62ed51cfd6b496bf55137fbada46989392c89882\": container with ID starting with df5b41706a35e0a7379ca85a62ed51cfd6b496bf55137fbada46989392c89882 not found: ID does not exist" Nov 28 11:03:25 crc kubenswrapper[4838]: I1128 11:03:25.700903 4838 scope.go:117] "RemoveContainer" containerID="e15e319af8943e203a74a7ad9ae1a1da04f6a49d2b64a31273c6d704296d7958" Nov 28 11:03:25 crc kubenswrapper[4838]: E1128 11:03:25.701907 4838 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e15e319af8943e203a74a7ad9ae1a1da04f6a49d2b64a31273c6d704296d7958\": container with ID starting with e15e319af8943e203a74a7ad9ae1a1da04f6a49d2b64a31273c6d704296d7958 not found: ID does not exist" containerID="e15e319af8943e203a74a7ad9ae1a1da04f6a49d2b64a31273c6d704296d7958" Nov 28 11:03:25 crc kubenswrapper[4838]: I1128 11:03:25.701969 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e15e319af8943e203a74a7ad9ae1a1da04f6a49d2b64a31273c6d704296d7958"} err="failed to get container status \"e15e319af8943e203a74a7ad9ae1a1da04f6a49d2b64a31273c6d704296d7958\": rpc error: code = NotFound desc = could not find container \"e15e319af8943e203a74a7ad9ae1a1da04f6a49d2b64a31273c6d704296d7958\": container with ID starting with e15e319af8943e203a74a7ad9ae1a1da04f6a49d2b64a31273c6d704296d7958 not found: ID does not exist" Nov 28 11:03:25 crc kubenswrapper[4838]: I1128 11:03:25.702008 4838 scope.go:117] "RemoveContainer" containerID="f92b9eb4f220d755bcb8f2c457f715d687dea532fe1e327c3c43f27fa9591917" Nov 28 11:03:25 crc kubenswrapper[4838]: E1128 11:03:25.702307 4838 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f92b9eb4f220d755bcb8f2c457f715d687dea532fe1e327c3c43f27fa9591917\": container with ID starting with f92b9eb4f220d755bcb8f2c457f715d687dea532fe1e327c3c43f27fa9591917 not found: ID does not exist" containerID="f92b9eb4f220d755bcb8f2c457f715d687dea532fe1e327c3c43f27fa9591917" Nov 28 11:03:25 crc kubenswrapper[4838]: I1128 11:03:25.702336 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f92b9eb4f220d755bcb8f2c457f715d687dea532fe1e327c3c43f27fa9591917"} err="failed to get container status \"f92b9eb4f220d755bcb8f2c457f715d687dea532fe1e327c3c43f27fa9591917\": rpc error: code = NotFound desc = could not find container \"f92b9eb4f220d755bcb8f2c457f715d687dea532fe1e327c3c43f27fa9591917\": container with ID starting with f92b9eb4f220d755bcb8f2c457f715d687dea532fe1e327c3c43f27fa9591917 not found: ID does not exist" Nov 28 11:03:26 crc kubenswrapper[4838]: I1128 11:03:26.579014 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fe07643a-9de2-42ae-bc02-3cbd58b0d012" path="/var/lib/kubelet/pods/fe07643a-9de2-42ae-bc02-3cbd58b0d012/volumes" Nov 28 11:03:28 crc kubenswrapper[4838]: I1128 11:03:28.955197 4838 scope.go:117] "RemoveContainer" containerID="ba9491d4066fe9a83cc6e20b2fe20c40497d9126f96986013abddd70ab38a5d9" Nov 28 11:03:38 crc kubenswrapper[4838]: I1128 11:03:38.569859 4838 scope.go:117] "RemoveContainer" containerID="616c42b063d53e2f1fdae5c99959bb3a7ba5c7c89c51bc64c2a6b637b60b6dbb" Nov 28 11:03:38 crc kubenswrapper[4838]: E1128 11:03:38.570828 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-5dxdd_openshift-machine-config-operator(5c3daa53-8c4e-4e30-aeba-146602dd45cd)\"" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" Nov 28 11:03:51 crc kubenswrapper[4838]: I1128 11:03:51.562344 4838 scope.go:117] "RemoveContainer" containerID="616c42b063d53e2f1fdae5c99959bb3a7ba5c7c89c51bc64c2a6b637b60b6dbb" Nov 28 11:03:51 crc kubenswrapper[4838]: E1128 11:03:51.563184 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-5dxdd_openshift-machine-config-operator(5c3daa53-8c4e-4e30-aeba-146602dd45cd)\"" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" Nov 28 11:04:06 crc kubenswrapper[4838]: I1128 11:04:06.562939 4838 scope.go:117] "RemoveContainer" containerID="616c42b063d53e2f1fdae5c99959bb3a7ba5c7c89c51bc64c2a6b637b60b6dbb" Nov 28 11:04:06 crc kubenswrapper[4838]: E1128 11:04:06.564079 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-5dxdd_openshift-machine-config-operator(5c3daa53-8c4e-4e30-aeba-146602dd45cd)\"" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" Nov 28 11:04:20 crc kubenswrapper[4838]: I1128 11:04:20.563455 4838 scope.go:117] "RemoveContainer" containerID="616c42b063d53e2f1fdae5c99959bb3a7ba5c7c89c51bc64c2a6b637b60b6dbb" Nov 28 11:04:20 crc kubenswrapper[4838]: E1128 11:04:20.566269 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-5dxdd_openshift-machine-config-operator(5c3daa53-8c4e-4e30-aeba-146602dd45cd)\"" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" Nov 28 11:04:33 crc kubenswrapper[4838]: I1128 11:04:33.562543 4838 scope.go:117] "RemoveContainer" containerID="616c42b063d53e2f1fdae5c99959bb3a7ba5c7c89c51bc64c2a6b637b60b6dbb" Nov 28 11:04:33 crc kubenswrapper[4838]: E1128 11:04:33.563564 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-5dxdd_openshift-machine-config-operator(5c3daa53-8c4e-4e30-aeba-146602dd45cd)\"" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" Nov 28 11:04:44 crc kubenswrapper[4838]: I1128 11:04:44.562827 4838 scope.go:117] "RemoveContainer" containerID="616c42b063d53e2f1fdae5c99959bb3a7ba5c7c89c51bc64c2a6b637b60b6dbb" Nov 28 11:04:44 crc kubenswrapper[4838]: E1128 11:04:44.563778 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-5dxdd_openshift-machine-config-operator(5c3daa53-8c4e-4e30-aeba-146602dd45cd)\"" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" Nov 28 11:04:58 crc kubenswrapper[4838]: I1128 11:04:58.574346 4838 scope.go:117] "RemoveContainer" containerID="616c42b063d53e2f1fdae5c99959bb3a7ba5c7c89c51bc64c2a6b637b60b6dbb" Nov 28 11:04:58 crc kubenswrapper[4838]: E1128 11:04:58.575937 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-5dxdd_openshift-machine-config-operator(5c3daa53-8c4e-4e30-aeba-146602dd45cd)\"" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" Nov 28 11:05:09 crc kubenswrapper[4838]: I1128 11:05:09.563967 4838 scope.go:117] "RemoveContainer" containerID="616c42b063d53e2f1fdae5c99959bb3a7ba5c7c89c51bc64c2a6b637b60b6dbb" Nov 28 11:05:09 crc kubenswrapper[4838]: E1128 11:05:09.567274 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-5dxdd_openshift-machine-config-operator(5c3daa53-8c4e-4e30-aeba-146602dd45cd)\"" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" Nov 28 11:05:23 crc kubenswrapper[4838]: I1128 11:05:23.562414 4838 scope.go:117] "RemoveContainer" containerID="616c42b063d53e2f1fdae5c99959bb3a7ba5c7c89c51bc64c2a6b637b60b6dbb" Nov 28 11:05:23 crc kubenswrapper[4838]: E1128 11:05:23.563447 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-5dxdd_openshift-machine-config-operator(5c3daa53-8c4e-4e30-aeba-146602dd45cd)\"" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" Nov 28 11:05:38 crc kubenswrapper[4838]: I1128 11:05:38.576387 4838 scope.go:117] "RemoveContainer" containerID="616c42b063d53e2f1fdae5c99959bb3a7ba5c7c89c51bc64c2a6b637b60b6dbb" Nov 28 11:05:38 crc kubenswrapper[4838]: E1128 11:05:38.577534 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-5dxdd_openshift-machine-config-operator(5c3daa53-8c4e-4e30-aeba-146602dd45cd)\"" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" Nov 28 11:05:44 crc kubenswrapper[4838]: I1128 11:05:44.681333 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-fx8c9"] Nov 28 11:05:44 crc kubenswrapper[4838]: E1128 11:05:44.682971 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fe07643a-9de2-42ae-bc02-3cbd58b0d012" containerName="registry-server" Nov 28 11:05:44 crc kubenswrapper[4838]: I1128 11:05:44.683009 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="fe07643a-9de2-42ae-bc02-3cbd58b0d012" containerName="registry-server" Nov 28 11:05:44 crc kubenswrapper[4838]: E1128 11:05:44.683076 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fe07643a-9de2-42ae-bc02-3cbd58b0d012" containerName="extract-content" Nov 28 11:05:44 crc kubenswrapper[4838]: I1128 11:05:44.683094 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="fe07643a-9de2-42ae-bc02-3cbd58b0d012" containerName="extract-content" Nov 28 11:05:44 crc kubenswrapper[4838]: E1128 11:05:44.683133 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fe07643a-9de2-42ae-bc02-3cbd58b0d012" containerName="extract-utilities" Nov 28 11:05:44 crc kubenswrapper[4838]: I1128 11:05:44.683152 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="fe07643a-9de2-42ae-bc02-3cbd58b0d012" containerName="extract-utilities" Nov 28 11:05:44 crc kubenswrapper[4838]: I1128 11:05:44.683668 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="fe07643a-9de2-42ae-bc02-3cbd58b0d012" containerName="registry-server" Nov 28 11:05:44 crc kubenswrapper[4838]: I1128 11:05:44.687139 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-fx8c9" Nov 28 11:05:44 crc kubenswrapper[4838]: I1128 11:05:44.692117 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-fx8c9"] Nov 28 11:05:44 crc kubenswrapper[4838]: I1128 11:05:44.711612 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/966245dc-0786-41a8-bfd7-0e72e5d1eb2a-catalog-content\") pod \"redhat-marketplace-fx8c9\" (UID: \"966245dc-0786-41a8-bfd7-0e72e5d1eb2a\") " pod="openshift-marketplace/redhat-marketplace-fx8c9" Nov 28 11:05:44 crc kubenswrapper[4838]: I1128 11:05:44.711812 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/966245dc-0786-41a8-bfd7-0e72e5d1eb2a-utilities\") pod \"redhat-marketplace-fx8c9\" (UID: \"966245dc-0786-41a8-bfd7-0e72e5d1eb2a\") " pod="openshift-marketplace/redhat-marketplace-fx8c9" Nov 28 11:05:44 crc kubenswrapper[4838]: I1128 11:05:44.711912 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rgsjb\" (UniqueName: \"kubernetes.io/projected/966245dc-0786-41a8-bfd7-0e72e5d1eb2a-kube-api-access-rgsjb\") pod \"redhat-marketplace-fx8c9\" (UID: \"966245dc-0786-41a8-bfd7-0e72e5d1eb2a\") " pod="openshift-marketplace/redhat-marketplace-fx8c9" Nov 28 11:05:44 crc kubenswrapper[4838]: I1128 11:05:44.814189 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rgsjb\" (UniqueName: \"kubernetes.io/projected/966245dc-0786-41a8-bfd7-0e72e5d1eb2a-kube-api-access-rgsjb\") pod \"redhat-marketplace-fx8c9\" (UID: \"966245dc-0786-41a8-bfd7-0e72e5d1eb2a\") " pod="openshift-marketplace/redhat-marketplace-fx8c9" Nov 28 11:05:44 crc kubenswrapper[4838]: I1128 11:05:44.814427 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/966245dc-0786-41a8-bfd7-0e72e5d1eb2a-catalog-content\") pod \"redhat-marketplace-fx8c9\" (UID: \"966245dc-0786-41a8-bfd7-0e72e5d1eb2a\") " pod="openshift-marketplace/redhat-marketplace-fx8c9" Nov 28 11:05:44 crc kubenswrapper[4838]: I1128 11:05:44.814469 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/966245dc-0786-41a8-bfd7-0e72e5d1eb2a-utilities\") pod \"redhat-marketplace-fx8c9\" (UID: \"966245dc-0786-41a8-bfd7-0e72e5d1eb2a\") " pod="openshift-marketplace/redhat-marketplace-fx8c9" Nov 28 11:05:44 crc kubenswrapper[4838]: I1128 11:05:44.815070 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/966245dc-0786-41a8-bfd7-0e72e5d1eb2a-utilities\") pod \"redhat-marketplace-fx8c9\" (UID: \"966245dc-0786-41a8-bfd7-0e72e5d1eb2a\") " pod="openshift-marketplace/redhat-marketplace-fx8c9" Nov 28 11:05:44 crc kubenswrapper[4838]: I1128 11:05:44.815783 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/966245dc-0786-41a8-bfd7-0e72e5d1eb2a-catalog-content\") pod \"redhat-marketplace-fx8c9\" (UID: \"966245dc-0786-41a8-bfd7-0e72e5d1eb2a\") " pod="openshift-marketplace/redhat-marketplace-fx8c9" Nov 28 11:05:44 crc kubenswrapper[4838]: I1128 11:05:44.835476 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rgsjb\" (UniqueName: \"kubernetes.io/projected/966245dc-0786-41a8-bfd7-0e72e5d1eb2a-kube-api-access-rgsjb\") pod \"redhat-marketplace-fx8c9\" (UID: \"966245dc-0786-41a8-bfd7-0e72e5d1eb2a\") " pod="openshift-marketplace/redhat-marketplace-fx8c9" Nov 28 11:05:45 crc kubenswrapper[4838]: I1128 11:05:45.039984 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-fx8c9" Nov 28 11:05:45 crc kubenswrapper[4838]: I1128 11:05:45.521550 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-fx8c9"] Nov 28 11:05:46 crc kubenswrapper[4838]: I1128 11:05:46.059635 4838 generic.go:334] "Generic (PLEG): container finished" podID="966245dc-0786-41a8-bfd7-0e72e5d1eb2a" containerID="a7c9ec0af8ee20833e1737c03672b648a5baa13be1bf9de05e6b7daabcd47a08" exitCode=0 Nov 28 11:05:46 crc kubenswrapper[4838]: I1128 11:05:46.059709 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fx8c9" event={"ID":"966245dc-0786-41a8-bfd7-0e72e5d1eb2a","Type":"ContainerDied","Data":"a7c9ec0af8ee20833e1737c03672b648a5baa13be1bf9de05e6b7daabcd47a08"} Nov 28 11:05:46 crc kubenswrapper[4838]: I1128 11:05:46.060104 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fx8c9" event={"ID":"966245dc-0786-41a8-bfd7-0e72e5d1eb2a","Type":"ContainerStarted","Data":"b4c242ac55189a4822acfd71231fbd16f9aee950b6a985be17d8731aef359826"} Nov 28 11:05:48 crc kubenswrapper[4838]: I1128 11:05:48.081165 4838 generic.go:334] "Generic (PLEG): container finished" podID="966245dc-0786-41a8-bfd7-0e72e5d1eb2a" containerID="7f64f190c2da749d8359c8e115edf694bdf26188dddcca43d88186c608cb8e5c" exitCode=0 Nov 28 11:05:48 crc kubenswrapper[4838]: I1128 11:05:48.081218 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fx8c9" event={"ID":"966245dc-0786-41a8-bfd7-0e72e5d1eb2a","Type":"ContainerDied","Data":"7f64f190c2da749d8359c8e115edf694bdf26188dddcca43d88186c608cb8e5c"} Nov 28 11:05:49 crc kubenswrapper[4838]: I1128 11:05:49.092270 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fx8c9" event={"ID":"966245dc-0786-41a8-bfd7-0e72e5d1eb2a","Type":"ContainerStarted","Data":"854366e896edd07c06c7f6b5522631fd0cb28f2cea3aa294885e3fb0c2cfec92"} Nov 28 11:05:49 crc kubenswrapper[4838]: I1128 11:05:49.110943 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-fx8c9" podStartSLOduration=2.704183797 podStartE2EDuration="5.110924581s" podCreationTimestamp="2025-11-28 11:05:44 +0000 UTC" firstStartedPulling="2025-11-28 11:05:46.062678888 +0000 UTC m=+4117.761653078" lastFinishedPulling="2025-11-28 11:05:48.469419672 +0000 UTC m=+4120.168393862" observedRunningTime="2025-11-28 11:05:49.109735669 +0000 UTC m=+4120.808709839" watchObservedRunningTime="2025-11-28 11:05:49.110924581 +0000 UTC m=+4120.809898771" Nov 28 11:05:53 crc kubenswrapper[4838]: I1128 11:05:53.562124 4838 scope.go:117] "RemoveContainer" containerID="616c42b063d53e2f1fdae5c99959bb3a7ba5c7c89c51bc64c2a6b637b60b6dbb" Nov 28 11:05:53 crc kubenswrapper[4838]: E1128 11:05:53.562869 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-5dxdd_openshift-machine-config-operator(5c3daa53-8c4e-4e30-aeba-146602dd45cd)\"" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" Nov 28 11:05:55 crc kubenswrapper[4838]: I1128 11:05:55.040166 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-fx8c9" Nov 28 11:05:55 crc kubenswrapper[4838]: I1128 11:05:55.040520 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-fx8c9" Nov 28 11:05:55 crc kubenswrapper[4838]: I1128 11:05:55.092576 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-fx8c9" Nov 28 11:05:55 crc kubenswrapper[4838]: I1128 11:05:55.195053 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-fx8c9" Nov 28 11:05:55 crc kubenswrapper[4838]: I1128 11:05:55.333835 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-fx8c9"] Nov 28 11:05:57 crc kubenswrapper[4838]: I1128 11:05:57.170581 4838 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-fx8c9" podUID="966245dc-0786-41a8-bfd7-0e72e5d1eb2a" containerName="registry-server" containerID="cri-o://854366e896edd07c06c7f6b5522631fd0cb28f2cea3aa294885e3fb0c2cfec92" gracePeriod=2 Nov 28 11:05:57 crc kubenswrapper[4838]: I1128 11:05:57.725841 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-fx8c9" Nov 28 11:05:57 crc kubenswrapper[4838]: I1128 11:05:57.802920 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/966245dc-0786-41a8-bfd7-0e72e5d1eb2a-utilities\") pod \"966245dc-0786-41a8-bfd7-0e72e5d1eb2a\" (UID: \"966245dc-0786-41a8-bfd7-0e72e5d1eb2a\") " Nov 28 11:05:57 crc kubenswrapper[4838]: I1128 11:05:57.803094 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rgsjb\" (UniqueName: \"kubernetes.io/projected/966245dc-0786-41a8-bfd7-0e72e5d1eb2a-kube-api-access-rgsjb\") pod \"966245dc-0786-41a8-bfd7-0e72e5d1eb2a\" (UID: \"966245dc-0786-41a8-bfd7-0e72e5d1eb2a\") " Nov 28 11:05:57 crc kubenswrapper[4838]: I1128 11:05:57.803208 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/966245dc-0786-41a8-bfd7-0e72e5d1eb2a-catalog-content\") pod \"966245dc-0786-41a8-bfd7-0e72e5d1eb2a\" (UID: \"966245dc-0786-41a8-bfd7-0e72e5d1eb2a\") " Nov 28 11:05:57 crc kubenswrapper[4838]: I1128 11:05:57.804522 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/966245dc-0786-41a8-bfd7-0e72e5d1eb2a-utilities" (OuterVolumeSpecName: "utilities") pod "966245dc-0786-41a8-bfd7-0e72e5d1eb2a" (UID: "966245dc-0786-41a8-bfd7-0e72e5d1eb2a"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 11:05:57 crc kubenswrapper[4838]: I1128 11:05:57.810089 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/966245dc-0786-41a8-bfd7-0e72e5d1eb2a-kube-api-access-rgsjb" (OuterVolumeSpecName: "kube-api-access-rgsjb") pod "966245dc-0786-41a8-bfd7-0e72e5d1eb2a" (UID: "966245dc-0786-41a8-bfd7-0e72e5d1eb2a"). InnerVolumeSpecName "kube-api-access-rgsjb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:05:57 crc kubenswrapper[4838]: I1128 11:05:57.821003 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/966245dc-0786-41a8-bfd7-0e72e5d1eb2a-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "966245dc-0786-41a8-bfd7-0e72e5d1eb2a" (UID: "966245dc-0786-41a8-bfd7-0e72e5d1eb2a"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 11:05:57 crc kubenswrapper[4838]: I1128 11:05:57.905396 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rgsjb\" (UniqueName: \"kubernetes.io/projected/966245dc-0786-41a8-bfd7-0e72e5d1eb2a-kube-api-access-rgsjb\") on node \"crc\" DevicePath \"\"" Nov 28 11:05:57 crc kubenswrapper[4838]: I1128 11:05:57.905433 4838 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/966245dc-0786-41a8-bfd7-0e72e5d1eb2a-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 11:05:57 crc kubenswrapper[4838]: I1128 11:05:57.905444 4838 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/966245dc-0786-41a8-bfd7-0e72e5d1eb2a-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 11:05:58 crc kubenswrapper[4838]: I1128 11:05:58.187466 4838 generic.go:334] "Generic (PLEG): container finished" podID="966245dc-0786-41a8-bfd7-0e72e5d1eb2a" containerID="854366e896edd07c06c7f6b5522631fd0cb28f2cea3aa294885e3fb0c2cfec92" exitCode=0 Nov 28 11:05:58 crc kubenswrapper[4838]: I1128 11:05:58.187546 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fx8c9" event={"ID":"966245dc-0786-41a8-bfd7-0e72e5d1eb2a","Type":"ContainerDied","Data":"854366e896edd07c06c7f6b5522631fd0cb28f2cea3aa294885e3fb0c2cfec92"} Nov 28 11:05:58 crc kubenswrapper[4838]: I1128 11:05:58.188023 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fx8c9" event={"ID":"966245dc-0786-41a8-bfd7-0e72e5d1eb2a","Type":"ContainerDied","Data":"b4c242ac55189a4822acfd71231fbd16f9aee950b6a985be17d8731aef359826"} Nov 28 11:05:58 crc kubenswrapper[4838]: I1128 11:05:58.188075 4838 scope.go:117] "RemoveContainer" containerID="854366e896edd07c06c7f6b5522631fd0cb28f2cea3aa294885e3fb0c2cfec92" Nov 28 11:05:58 crc kubenswrapper[4838]: I1128 11:05:58.187583 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-fx8c9" Nov 28 11:05:58 crc kubenswrapper[4838]: I1128 11:05:58.227440 4838 scope.go:117] "RemoveContainer" containerID="7f64f190c2da749d8359c8e115edf694bdf26188dddcca43d88186c608cb8e5c" Nov 28 11:05:58 crc kubenswrapper[4838]: I1128 11:05:58.249648 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-fx8c9"] Nov 28 11:05:58 crc kubenswrapper[4838]: I1128 11:05:58.263582 4838 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-fx8c9"] Nov 28 11:05:58 crc kubenswrapper[4838]: I1128 11:05:58.265032 4838 scope.go:117] "RemoveContainer" containerID="a7c9ec0af8ee20833e1737c03672b648a5baa13be1bf9de05e6b7daabcd47a08" Nov 28 11:05:58 crc kubenswrapper[4838]: I1128 11:05:58.312703 4838 scope.go:117] "RemoveContainer" containerID="854366e896edd07c06c7f6b5522631fd0cb28f2cea3aa294885e3fb0c2cfec92" Nov 28 11:05:58 crc kubenswrapper[4838]: E1128 11:05:58.314399 4838 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"854366e896edd07c06c7f6b5522631fd0cb28f2cea3aa294885e3fb0c2cfec92\": container with ID starting with 854366e896edd07c06c7f6b5522631fd0cb28f2cea3aa294885e3fb0c2cfec92 not found: ID does not exist" containerID="854366e896edd07c06c7f6b5522631fd0cb28f2cea3aa294885e3fb0c2cfec92" Nov 28 11:05:58 crc kubenswrapper[4838]: I1128 11:05:58.314458 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"854366e896edd07c06c7f6b5522631fd0cb28f2cea3aa294885e3fb0c2cfec92"} err="failed to get container status \"854366e896edd07c06c7f6b5522631fd0cb28f2cea3aa294885e3fb0c2cfec92\": rpc error: code = NotFound desc = could not find container \"854366e896edd07c06c7f6b5522631fd0cb28f2cea3aa294885e3fb0c2cfec92\": container with ID starting with 854366e896edd07c06c7f6b5522631fd0cb28f2cea3aa294885e3fb0c2cfec92 not found: ID does not exist" Nov 28 11:05:58 crc kubenswrapper[4838]: I1128 11:05:58.314492 4838 scope.go:117] "RemoveContainer" containerID="7f64f190c2da749d8359c8e115edf694bdf26188dddcca43d88186c608cb8e5c" Nov 28 11:05:58 crc kubenswrapper[4838]: E1128 11:05:58.314982 4838 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7f64f190c2da749d8359c8e115edf694bdf26188dddcca43d88186c608cb8e5c\": container with ID starting with 7f64f190c2da749d8359c8e115edf694bdf26188dddcca43d88186c608cb8e5c not found: ID does not exist" containerID="7f64f190c2da749d8359c8e115edf694bdf26188dddcca43d88186c608cb8e5c" Nov 28 11:05:58 crc kubenswrapper[4838]: I1128 11:05:58.315016 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7f64f190c2da749d8359c8e115edf694bdf26188dddcca43d88186c608cb8e5c"} err="failed to get container status \"7f64f190c2da749d8359c8e115edf694bdf26188dddcca43d88186c608cb8e5c\": rpc error: code = NotFound desc = could not find container \"7f64f190c2da749d8359c8e115edf694bdf26188dddcca43d88186c608cb8e5c\": container with ID starting with 7f64f190c2da749d8359c8e115edf694bdf26188dddcca43d88186c608cb8e5c not found: ID does not exist" Nov 28 11:05:58 crc kubenswrapper[4838]: I1128 11:05:58.315037 4838 scope.go:117] "RemoveContainer" containerID="a7c9ec0af8ee20833e1737c03672b648a5baa13be1bf9de05e6b7daabcd47a08" Nov 28 11:05:58 crc kubenswrapper[4838]: E1128 11:05:58.315404 4838 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a7c9ec0af8ee20833e1737c03672b648a5baa13be1bf9de05e6b7daabcd47a08\": container with ID starting with a7c9ec0af8ee20833e1737c03672b648a5baa13be1bf9de05e6b7daabcd47a08 not found: ID does not exist" containerID="a7c9ec0af8ee20833e1737c03672b648a5baa13be1bf9de05e6b7daabcd47a08" Nov 28 11:05:58 crc kubenswrapper[4838]: I1128 11:05:58.315468 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a7c9ec0af8ee20833e1737c03672b648a5baa13be1bf9de05e6b7daabcd47a08"} err="failed to get container status \"a7c9ec0af8ee20833e1737c03672b648a5baa13be1bf9de05e6b7daabcd47a08\": rpc error: code = NotFound desc = could not find container \"a7c9ec0af8ee20833e1737c03672b648a5baa13be1bf9de05e6b7daabcd47a08\": container with ID starting with a7c9ec0af8ee20833e1737c03672b648a5baa13be1bf9de05e6b7daabcd47a08 not found: ID does not exist" Nov 28 11:05:58 crc kubenswrapper[4838]: I1128 11:05:58.581248 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="966245dc-0786-41a8-bfd7-0e72e5d1eb2a" path="/var/lib/kubelet/pods/966245dc-0786-41a8-bfd7-0e72e5d1eb2a/volumes" Nov 28 11:06:07 crc kubenswrapper[4838]: I1128 11:06:07.562590 4838 scope.go:117] "RemoveContainer" containerID="616c42b063d53e2f1fdae5c99959bb3a7ba5c7c89c51bc64c2a6b637b60b6dbb" Nov 28 11:06:07 crc kubenswrapper[4838]: E1128 11:06:07.563308 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-5dxdd_openshift-machine-config-operator(5c3daa53-8c4e-4e30-aeba-146602dd45cd)\"" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" Nov 28 11:06:08 crc kubenswrapper[4838]: I1128 11:06:08.293514 4838 generic.go:334] "Generic (PLEG): container finished" podID="7899bfa9-2025-457b-9c46-194188b7f52e" containerID="905d3dc0edf57d99d810fe900ffea0cfdb08431f7351b793cd5f48f1f515c4ca" exitCode=0 Nov 28 11:06:08 crc kubenswrapper[4838]: I1128 11:06:08.293585 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest" event={"ID":"7899bfa9-2025-457b-9c46-194188b7f52e","Type":"ContainerDied","Data":"905d3dc0edf57d99d810fe900ffea0cfdb08431f7351b793cd5f48f1f515c4ca"} Nov 28 11:06:09 crc kubenswrapper[4838]: I1128 11:06:09.668624 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest" Nov 28 11:06:09 crc kubenswrapper[4838]: I1128 11:06:09.863312 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7x6r6\" (UniqueName: \"kubernetes.io/projected/7899bfa9-2025-457b-9c46-194188b7f52e-kube-api-access-7x6r6\") pod \"7899bfa9-2025-457b-9c46-194188b7f52e\" (UID: \"7899bfa9-2025-457b-9c46-194188b7f52e\") " Nov 28 11:06:09 crc kubenswrapper[4838]: I1128 11:06:09.863924 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/7899bfa9-2025-457b-9c46-194188b7f52e-test-operator-ephemeral-workdir\") pod \"7899bfa9-2025-457b-9c46-194188b7f52e\" (UID: \"7899bfa9-2025-457b-9c46-194188b7f52e\") " Nov 28 11:06:09 crc kubenswrapper[4838]: I1128 11:06:09.864011 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"test-operator-logs\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"7899bfa9-2025-457b-9c46-194188b7f52e\" (UID: \"7899bfa9-2025-457b-9c46-194188b7f52e\") " Nov 28 11:06:09 crc kubenswrapper[4838]: I1128 11:06:09.864628 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/7899bfa9-2025-457b-9c46-194188b7f52e-test-operator-ephemeral-temporary\") pod \"7899bfa9-2025-457b-9c46-194188b7f52e\" (UID: \"7899bfa9-2025-457b-9c46-194188b7f52e\") " Nov 28 11:06:09 crc kubenswrapper[4838]: I1128 11:06:09.864796 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/7899bfa9-2025-457b-9c46-194188b7f52e-openstack-config-secret\") pod \"7899bfa9-2025-457b-9c46-194188b7f52e\" (UID: \"7899bfa9-2025-457b-9c46-194188b7f52e\") " Nov 28 11:06:09 crc kubenswrapper[4838]: I1128 11:06:09.865233 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/7899bfa9-2025-457b-9c46-194188b7f52e-openstack-config\") pod \"7899bfa9-2025-457b-9c46-194188b7f52e\" (UID: \"7899bfa9-2025-457b-9c46-194188b7f52e\") " Nov 28 11:06:09 crc kubenswrapper[4838]: I1128 11:06:09.865310 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/7899bfa9-2025-457b-9c46-194188b7f52e-ca-certs\") pod \"7899bfa9-2025-457b-9c46-194188b7f52e\" (UID: \"7899bfa9-2025-457b-9c46-194188b7f52e\") " Nov 28 11:06:09 crc kubenswrapper[4838]: I1128 11:06:09.865387 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7899bfa9-2025-457b-9c46-194188b7f52e-ssh-key\") pod \"7899bfa9-2025-457b-9c46-194188b7f52e\" (UID: \"7899bfa9-2025-457b-9c46-194188b7f52e\") " Nov 28 11:06:09 crc kubenswrapper[4838]: I1128 11:06:09.865468 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/7899bfa9-2025-457b-9c46-194188b7f52e-config-data\") pod \"7899bfa9-2025-457b-9c46-194188b7f52e\" (UID: \"7899bfa9-2025-457b-9c46-194188b7f52e\") " Nov 28 11:06:09 crc kubenswrapper[4838]: I1128 11:06:09.868530 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7899bfa9-2025-457b-9c46-194188b7f52e-test-operator-ephemeral-temporary" (OuterVolumeSpecName: "test-operator-ephemeral-temporary") pod "7899bfa9-2025-457b-9c46-194188b7f52e" (UID: "7899bfa9-2025-457b-9c46-194188b7f52e"). InnerVolumeSpecName "test-operator-ephemeral-temporary". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 11:06:09 crc kubenswrapper[4838]: I1128 11:06:09.868944 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7899bfa9-2025-457b-9c46-194188b7f52e-config-data" (OuterVolumeSpecName: "config-data") pod "7899bfa9-2025-457b-9c46-194188b7f52e" (UID: "7899bfa9-2025-457b-9c46-194188b7f52e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:06:09 crc kubenswrapper[4838]: I1128 11:06:09.875352 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7899bfa9-2025-457b-9c46-194188b7f52e-kube-api-access-7x6r6" (OuterVolumeSpecName: "kube-api-access-7x6r6") pod "7899bfa9-2025-457b-9c46-194188b7f52e" (UID: "7899bfa9-2025-457b-9c46-194188b7f52e"). InnerVolumeSpecName "kube-api-access-7x6r6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:06:09 crc kubenswrapper[4838]: I1128 11:06:09.887077 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage10-crc" (OuterVolumeSpecName: "test-operator-logs") pod "7899bfa9-2025-457b-9c46-194188b7f52e" (UID: "7899bfa9-2025-457b-9c46-194188b7f52e"). InnerVolumeSpecName "local-storage10-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 28 11:06:09 crc kubenswrapper[4838]: I1128 11:06:09.893015 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7899bfa9-2025-457b-9c46-194188b7f52e-test-operator-ephemeral-workdir" (OuterVolumeSpecName: "test-operator-ephemeral-workdir") pod "7899bfa9-2025-457b-9c46-194188b7f52e" (UID: "7899bfa9-2025-457b-9c46-194188b7f52e"). InnerVolumeSpecName "test-operator-ephemeral-workdir". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 11:06:09 crc kubenswrapper[4838]: I1128 11:06:09.899186 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7899bfa9-2025-457b-9c46-194188b7f52e-openstack-config-secret" (OuterVolumeSpecName: "openstack-config-secret") pod "7899bfa9-2025-457b-9c46-194188b7f52e" (UID: "7899bfa9-2025-457b-9c46-194188b7f52e"). InnerVolumeSpecName "openstack-config-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:06:09 crc kubenswrapper[4838]: I1128 11:06:09.905286 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7899bfa9-2025-457b-9c46-194188b7f52e-ca-certs" (OuterVolumeSpecName: "ca-certs") pod "7899bfa9-2025-457b-9c46-194188b7f52e" (UID: "7899bfa9-2025-457b-9c46-194188b7f52e"). InnerVolumeSpecName "ca-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:06:09 crc kubenswrapper[4838]: I1128 11:06:09.910858 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7899bfa9-2025-457b-9c46-194188b7f52e-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "7899bfa9-2025-457b-9c46-194188b7f52e" (UID: "7899bfa9-2025-457b-9c46-194188b7f52e"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:06:09 crc kubenswrapper[4838]: I1128 11:06:09.929279 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7899bfa9-2025-457b-9c46-194188b7f52e-openstack-config" (OuterVolumeSpecName: "openstack-config") pod "7899bfa9-2025-457b-9c46-194188b7f52e" (UID: "7899bfa9-2025-457b-9c46-194188b7f52e"). InnerVolumeSpecName "openstack-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:06:09 crc kubenswrapper[4838]: I1128 11:06:09.968402 4838 reconciler_common.go:293] "Volume detached for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/7899bfa9-2025-457b-9c46-194188b7f52e-openstack-config\") on node \"crc\" DevicePath \"\"" Nov 28 11:06:09 crc kubenswrapper[4838]: I1128 11:06:09.968459 4838 reconciler_common.go:293] "Volume detached for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/7899bfa9-2025-457b-9c46-194188b7f52e-ca-certs\") on node \"crc\" DevicePath \"\"" Nov 28 11:06:09 crc kubenswrapper[4838]: I1128 11:06:09.968480 4838 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7899bfa9-2025-457b-9c46-194188b7f52e-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 28 11:06:09 crc kubenswrapper[4838]: I1128 11:06:09.968496 4838 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/7899bfa9-2025-457b-9c46-194188b7f52e-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 11:06:09 crc kubenswrapper[4838]: I1128 11:06:09.968517 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7x6r6\" (UniqueName: \"kubernetes.io/projected/7899bfa9-2025-457b-9c46-194188b7f52e-kube-api-access-7x6r6\") on node \"crc\" DevicePath \"\"" Nov 28 11:06:09 crc kubenswrapper[4838]: I1128 11:06:09.968538 4838 reconciler_common.go:293] "Volume detached for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/7899bfa9-2025-457b-9c46-194188b7f52e-test-operator-ephemeral-workdir\") on node \"crc\" DevicePath \"\"" Nov 28 11:06:09 crc kubenswrapper[4838]: I1128 11:06:09.968579 4838 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") on node \"crc\" " Nov 28 11:06:09 crc kubenswrapper[4838]: I1128 11:06:09.968600 4838 reconciler_common.go:293] "Volume detached for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/7899bfa9-2025-457b-9c46-194188b7f52e-test-operator-ephemeral-temporary\") on node \"crc\" DevicePath \"\"" Nov 28 11:06:09 crc kubenswrapper[4838]: I1128 11:06:09.968621 4838 reconciler_common.go:293] "Volume detached for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/7899bfa9-2025-457b-9c46-194188b7f52e-openstack-config-secret\") on node \"crc\" DevicePath \"\"" Nov 28 11:06:10 crc kubenswrapper[4838]: I1128 11:06:10.003708 4838 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage10-crc" (UniqueName: "kubernetes.io/local-volume/local-storage10-crc") on node "crc" Nov 28 11:06:10 crc kubenswrapper[4838]: I1128 11:06:10.070404 4838 reconciler_common.go:293] "Volume detached for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") on node \"crc\" DevicePath \"\"" Nov 28 11:06:10 crc kubenswrapper[4838]: I1128 11:06:10.313329 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest" event={"ID":"7899bfa9-2025-457b-9c46-194188b7f52e","Type":"ContainerDied","Data":"334abfa476ed6e421b059067199813c4f98c204b04a440eb1bd18af94eb4b8b1"} Nov 28 11:06:10 crc kubenswrapper[4838]: I1128 11:06:10.313381 4838 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="334abfa476ed6e421b059067199813c4f98c204b04a440eb1bd18af94eb4b8b1" Nov 28 11:06:10 crc kubenswrapper[4838]: I1128 11:06:10.313397 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest" Nov 28 11:06:14 crc kubenswrapper[4838]: I1128 11:06:14.186596 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/test-operator-logs-pod-tempest-tempest-tests-tempest"] Nov 28 11:06:14 crc kubenswrapper[4838]: E1128 11:06:14.189671 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="966245dc-0786-41a8-bfd7-0e72e5d1eb2a" containerName="extract-content" Nov 28 11:06:14 crc kubenswrapper[4838]: I1128 11:06:14.189692 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="966245dc-0786-41a8-bfd7-0e72e5d1eb2a" containerName="extract-content" Nov 28 11:06:14 crc kubenswrapper[4838]: E1128 11:06:14.189703 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="966245dc-0786-41a8-bfd7-0e72e5d1eb2a" containerName="registry-server" Nov 28 11:06:14 crc kubenswrapper[4838]: I1128 11:06:14.189709 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="966245dc-0786-41a8-bfd7-0e72e5d1eb2a" containerName="registry-server" Nov 28 11:06:14 crc kubenswrapper[4838]: E1128 11:06:14.189733 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7899bfa9-2025-457b-9c46-194188b7f52e" containerName="tempest-tests-tempest-tests-runner" Nov 28 11:06:14 crc kubenswrapper[4838]: I1128 11:06:14.189739 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="7899bfa9-2025-457b-9c46-194188b7f52e" containerName="tempest-tests-tempest-tests-runner" Nov 28 11:06:14 crc kubenswrapper[4838]: E1128 11:06:14.189768 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="966245dc-0786-41a8-bfd7-0e72e5d1eb2a" containerName="extract-utilities" Nov 28 11:06:14 crc kubenswrapper[4838]: I1128 11:06:14.189775 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="966245dc-0786-41a8-bfd7-0e72e5d1eb2a" containerName="extract-utilities" Nov 28 11:06:14 crc kubenswrapper[4838]: I1128 11:06:14.189991 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="966245dc-0786-41a8-bfd7-0e72e5d1eb2a" containerName="registry-server" Nov 28 11:06:14 crc kubenswrapper[4838]: I1128 11:06:14.190011 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="7899bfa9-2025-457b-9c46-194188b7f52e" containerName="tempest-tests-tempest-tests-runner" Nov 28 11:06:14 crc kubenswrapper[4838]: I1128 11:06:14.190678 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 28 11:06:14 crc kubenswrapper[4838]: I1128 11:06:14.197099 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"default-dockercfg-m9l7v" Nov 28 11:06:14 crc kubenswrapper[4838]: I1128 11:06:14.207957 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/test-operator-logs-pod-tempest-tempest-tests-tempest"] Nov 28 11:06:14 crc kubenswrapper[4838]: I1128 11:06:14.367914 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"9d260724-3275-4670-966b-0c961bf54da5\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 28 11:06:14 crc kubenswrapper[4838]: I1128 11:06:14.367985 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tl6n7\" (UniqueName: \"kubernetes.io/projected/9d260724-3275-4670-966b-0c961bf54da5-kube-api-access-tl6n7\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"9d260724-3275-4670-966b-0c961bf54da5\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 28 11:06:14 crc kubenswrapper[4838]: I1128 11:06:14.470678 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"9d260724-3275-4670-966b-0c961bf54da5\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 28 11:06:14 crc kubenswrapper[4838]: I1128 11:06:14.471220 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tl6n7\" (UniqueName: \"kubernetes.io/projected/9d260724-3275-4670-966b-0c961bf54da5-kube-api-access-tl6n7\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"9d260724-3275-4670-966b-0c961bf54da5\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 28 11:06:14 crc kubenswrapper[4838]: I1128 11:06:14.471340 4838 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"9d260724-3275-4670-966b-0c961bf54da5\") device mount path \"/mnt/openstack/pv10\"" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 28 11:06:14 crc kubenswrapper[4838]: I1128 11:06:14.514218 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tl6n7\" (UniqueName: \"kubernetes.io/projected/9d260724-3275-4670-966b-0c961bf54da5-kube-api-access-tl6n7\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"9d260724-3275-4670-966b-0c961bf54da5\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 28 11:06:14 crc kubenswrapper[4838]: I1128 11:06:14.518390 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"9d260724-3275-4670-966b-0c961bf54da5\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 28 11:06:14 crc kubenswrapper[4838]: I1128 11:06:14.525675 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 28 11:06:14 crc kubenswrapper[4838]: I1128 11:06:14.898183 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/test-operator-logs-pod-tempest-tempest-tests-tempest"] Nov 28 11:06:15 crc kubenswrapper[4838]: I1128 11:06:15.366228 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" event={"ID":"9d260724-3275-4670-966b-0c961bf54da5","Type":"ContainerStarted","Data":"7e3feb2bde965803d0615071b5e2f5878050c6b61e9d6741cd78858bf273a2ec"} Nov 28 11:06:16 crc kubenswrapper[4838]: I1128 11:06:16.378391 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" event={"ID":"9d260724-3275-4670-966b-0c961bf54da5","Type":"ContainerStarted","Data":"12abb22486ceaa4fc5245ba7e2f5dbc890d57c6388fe295f14a32d3c201cd95a"} Nov 28 11:06:16 crc kubenswrapper[4838]: I1128 11:06:16.396618 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" podStartSLOduration=1.466074731 podStartE2EDuration="2.396593627s" podCreationTimestamp="2025-11-28 11:06:14 +0000 UTC" firstStartedPulling="2025-11-28 11:06:14.897072914 +0000 UTC m=+4146.596047084" lastFinishedPulling="2025-11-28 11:06:15.8275918 +0000 UTC m=+4147.526565980" observedRunningTime="2025-11-28 11:06:16.394572011 +0000 UTC m=+4148.093546221" watchObservedRunningTime="2025-11-28 11:06:16.396593627 +0000 UTC m=+4148.095567827" Nov 28 11:06:22 crc kubenswrapper[4838]: I1128 11:06:22.562882 4838 scope.go:117] "RemoveContainer" containerID="616c42b063d53e2f1fdae5c99959bb3a7ba5c7c89c51bc64c2a6b637b60b6dbb" Nov 28 11:06:22 crc kubenswrapper[4838]: E1128 11:06:22.563676 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-5dxdd_openshift-machine-config-operator(5c3daa53-8c4e-4e30-aeba-146602dd45cd)\"" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" Nov 28 11:06:37 crc kubenswrapper[4838]: I1128 11:06:37.562123 4838 scope.go:117] "RemoveContainer" containerID="616c42b063d53e2f1fdae5c99959bb3a7ba5c7c89c51bc64c2a6b637b60b6dbb" Nov 28 11:06:37 crc kubenswrapper[4838]: E1128 11:06:37.564158 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-5dxdd_openshift-machine-config-operator(5c3daa53-8c4e-4e30-aeba-146602dd45cd)\"" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" Nov 28 11:06:40 crc kubenswrapper[4838]: I1128 11:06:40.763437 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-7sfv7/must-gather-fhhqh"] Nov 28 11:06:40 crc kubenswrapper[4838]: I1128 11:06:40.765222 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-7sfv7/must-gather-fhhqh" Nov 28 11:06:40 crc kubenswrapper[4838]: I1128 11:06:40.768372 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-must-gather-7sfv7"/"default-dockercfg-xxpqm" Nov 28 11:06:40 crc kubenswrapper[4838]: I1128 11:06:40.768413 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-7sfv7"/"openshift-service-ca.crt" Nov 28 11:06:40 crc kubenswrapper[4838]: I1128 11:06:40.768609 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-7sfv7"/"kube-root-ca.crt" Nov 28 11:06:40 crc kubenswrapper[4838]: I1128 11:06:40.790968 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-7sfv7/must-gather-fhhqh"] Nov 28 11:06:40 crc kubenswrapper[4838]: I1128 11:06:40.963117 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/9a7f2e13-2522-459b-b286-862bd963b3c0-must-gather-output\") pod \"must-gather-fhhqh\" (UID: \"9a7f2e13-2522-459b-b286-862bd963b3c0\") " pod="openshift-must-gather-7sfv7/must-gather-fhhqh" Nov 28 11:06:40 crc kubenswrapper[4838]: I1128 11:06:40.963193 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-khrpr\" (UniqueName: \"kubernetes.io/projected/9a7f2e13-2522-459b-b286-862bd963b3c0-kube-api-access-khrpr\") pod \"must-gather-fhhqh\" (UID: \"9a7f2e13-2522-459b-b286-862bd963b3c0\") " pod="openshift-must-gather-7sfv7/must-gather-fhhqh" Nov 28 11:06:41 crc kubenswrapper[4838]: I1128 11:06:41.065438 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-khrpr\" (UniqueName: \"kubernetes.io/projected/9a7f2e13-2522-459b-b286-862bd963b3c0-kube-api-access-khrpr\") pod \"must-gather-fhhqh\" (UID: \"9a7f2e13-2522-459b-b286-862bd963b3c0\") " pod="openshift-must-gather-7sfv7/must-gather-fhhqh" Nov 28 11:06:41 crc kubenswrapper[4838]: I1128 11:06:41.065620 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/9a7f2e13-2522-459b-b286-862bd963b3c0-must-gather-output\") pod \"must-gather-fhhqh\" (UID: \"9a7f2e13-2522-459b-b286-862bd963b3c0\") " pod="openshift-must-gather-7sfv7/must-gather-fhhqh" Nov 28 11:06:41 crc kubenswrapper[4838]: I1128 11:06:41.066029 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/9a7f2e13-2522-459b-b286-862bd963b3c0-must-gather-output\") pod \"must-gather-fhhqh\" (UID: \"9a7f2e13-2522-459b-b286-862bd963b3c0\") " pod="openshift-must-gather-7sfv7/must-gather-fhhqh" Nov 28 11:06:41 crc kubenswrapper[4838]: I1128 11:06:41.089660 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-khrpr\" (UniqueName: \"kubernetes.io/projected/9a7f2e13-2522-459b-b286-862bd963b3c0-kube-api-access-khrpr\") pod \"must-gather-fhhqh\" (UID: \"9a7f2e13-2522-459b-b286-862bd963b3c0\") " pod="openshift-must-gather-7sfv7/must-gather-fhhqh" Nov 28 11:06:41 crc kubenswrapper[4838]: I1128 11:06:41.387382 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-7sfv7/must-gather-fhhqh" Nov 28 11:06:41 crc kubenswrapper[4838]: I1128 11:06:41.885548 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-7sfv7/must-gather-fhhqh"] Nov 28 11:06:41 crc kubenswrapper[4838]: W1128 11:06:41.892728 4838 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9a7f2e13_2522_459b_b286_862bd963b3c0.slice/crio-fadb623cf31c77e11b9e03fcb71340fb748e9a39017bdc420c6451565d3db405 WatchSource:0}: Error finding container fadb623cf31c77e11b9e03fcb71340fb748e9a39017bdc420c6451565d3db405: Status 404 returned error can't find the container with id fadb623cf31c77e11b9e03fcb71340fb748e9a39017bdc420c6451565d3db405 Nov 28 11:06:42 crc kubenswrapper[4838]: I1128 11:06:42.750469 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-7sfv7/must-gather-fhhqh" event={"ID":"9a7f2e13-2522-459b-b286-862bd963b3c0","Type":"ContainerStarted","Data":"fadb623cf31c77e11b9e03fcb71340fb748e9a39017bdc420c6451565d3db405"} Nov 28 11:06:48 crc kubenswrapper[4838]: I1128 11:06:48.568128 4838 scope.go:117] "RemoveContainer" containerID="616c42b063d53e2f1fdae5c99959bb3a7ba5c7c89c51bc64c2a6b637b60b6dbb" Nov 28 11:06:48 crc kubenswrapper[4838]: E1128 11:06:48.568921 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-5dxdd_openshift-machine-config-operator(5c3daa53-8c4e-4e30-aeba-146602dd45cd)\"" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" Nov 28 11:06:48 crc kubenswrapper[4838]: I1128 11:06:48.849694 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-7sfv7/must-gather-fhhqh" event={"ID":"9a7f2e13-2522-459b-b286-862bd963b3c0","Type":"ContainerStarted","Data":"b77b217163030e413da14c958372fdd9664d8625b8ecb50d337c048a42da2f09"} Nov 28 11:06:48 crc kubenswrapper[4838]: I1128 11:06:48.849768 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-7sfv7/must-gather-fhhqh" event={"ID":"9a7f2e13-2522-459b-b286-862bd963b3c0","Type":"ContainerStarted","Data":"6631c11dec99d433705087191b0248fb087bdddde75c680f57e54cba44ac87f5"} Nov 28 11:06:48 crc kubenswrapper[4838]: I1128 11:06:48.875817 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-7sfv7/must-gather-fhhqh" podStartSLOduration=2.713423933 podStartE2EDuration="8.875799991s" podCreationTimestamp="2025-11-28 11:06:40 +0000 UTC" firstStartedPulling="2025-11-28 11:06:41.893755114 +0000 UTC m=+4173.592729284" lastFinishedPulling="2025-11-28 11:06:48.056131182 +0000 UTC m=+4179.755105342" observedRunningTime="2025-11-28 11:06:48.874697741 +0000 UTC m=+4180.573671931" watchObservedRunningTime="2025-11-28 11:06:48.875799991 +0000 UTC m=+4180.574774161" Nov 28 11:06:52 crc kubenswrapper[4838]: I1128 11:06:52.299123 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-7sfv7/crc-debug-nk87t"] Nov 28 11:06:52 crc kubenswrapper[4838]: I1128 11:06:52.301876 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-7sfv7/crc-debug-nk87t" Nov 28 11:06:52 crc kubenswrapper[4838]: I1128 11:06:52.441092 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/0ad0264f-ba94-46c5-b60c-d113c32c361c-host\") pod \"crc-debug-nk87t\" (UID: \"0ad0264f-ba94-46c5-b60c-d113c32c361c\") " pod="openshift-must-gather-7sfv7/crc-debug-nk87t" Nov 28 11:06:52 crc kubenswrapper[4838]: I1128 11:06:52.441196 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-77kdn\" (UniqueName: \"kubernetes.io/projected/0ad0264f-ba94-46c5-b60c-d113c32c361c-kube-api-access-77kdn\") pod \"crc-debug-nk87t\" (UID: \"0ad0264f-ba94-46c5-b60c-d113c32c361c\") " pod="openshift-must-gather-7sfv7/crc-debug-nk87t" Nov 28 11:06:52 crc kubenswrapper[4838]: I1128 11:06:52.543611 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-77kdn\" (UniqueName: \"kubernetes.io/projected/0ad0264f-ba94-46c5-b60c-d113c32c361c-kube-api-access-77kdn\") pod \"crc-debug-nk87t\" (UID: \"0ad0264f-ba94-46c5-b60c-d113c32c361c\") " pod="openshift-must-gather-7sfv7/crc-debug-nk87t" Nov 28 11:06:52 crc kubenswrapper[4838]: I1128 11:06:52.544164 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/0ad0264f-ba94-46c5-b60c-d113c32c361c-host\") pod \"crc-debug-nk87t\" (UID: \"0ad0264f-ba94-46c5-b60c-d113c32c361c\") " pod="openshift-must-gather-7sfv7/crc-debug-nk87t" Nov 28 11:06:52 crc kubenswrapper[4838]: I1128 11:06:52.544271 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/0ad0264f-ba94-46c5-b60c-d113c32c361c-host\") pod \"crc-debug-nk87t\" (UID: \"0ad0264f-ba94-46c5-b60c-d113c32c361c\") " pod="openshift-must-gather-7sfv7/crc-debug-nk87t" Nov 28 11:06:52 crc kubenswrapper[4838]: I1128 11:06:52.567701 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-77kdn\" (UniqueName: \"kubernetes.io/projected/0ad0264f-ba94-46c5-b60c-d113c32c361c-kube-api-access-77kdn\") pod \"crc-debug-nk87t\" (UID: \"0ad0264f-ba94-46c5-b60c-d113c32c361c\") " pod="openshift-must-gather-7sfv7/crc-debug-nk87t" Nov 28 11:06:52 crc kubenswrapper[4838]: I1128 11:06:52.620109 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-7sfv7/crc-debug-nk87t" Nov 28 11:06:52 crc kubenswrapper[4838]: W1128 11:06:52.673743 4838 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0ad0264f_ba94_46c5_b60c_d113c32c361c.slice/crio-1cd05bb65541366b08cd265dc4e612140a0e975705100208a6879c6981b2bdbc WatchSource:0}: Error finding container 1cd05bb65541366b08cd265dc4e612140a0e975705100208a6879c6981b2bdbc: Status 404 returned error can't find the container with id 1cd05bb65541366b08cd265dc4e612140a0e975705100208a6879c6981b2bdbc Nov 28 11:06:52 crc kubenswrapper[4838]: I1128 11:06:52.890128 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-7sfv7/crc-debug-nk87t" event={"ID":"0ad0264f-ba94-46c5-b60c-d113c32c361c","Type":"ContainerStarted","Data":"1cd05bb65541366b08cd265dc4e612140a0e975705100208a6879c6981b2bdbc"} Nov 28 11:07:02 crc kubenswrapper[4838]: I1128 11:07:02.978617 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-7sfv7/crc-debug-nk87t" event={"ID":"0ad0264f-ba94-46c5-b60c-d113c32c361c","Type":"ContainerStarted","Data":"c7408febbdb6fa366fe943a61a93036b19c8c2e8cfa85d1af0d7b0d2e56fefce"} Nov 28 11:07:02 crc kubenswrapper[4838]: I1128 11:07:02.991606 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-7sfv7/crc-debug-nk87t" podStartSLOduration=1.2695876959999999 podStartE2EDuration="10.991590316s" podCreationTimestamp="2025-11-28 11:06:52 +0000 UTC" firstStartedPulling="2025-11-28 11:06:52.676426402 +0000 UTC m=+4184.375400592" lastFinishedPulling="2025-11-28 11:07:02.398429042 +0000 UTC m=+4194.097403212" observedRunningTime="2025-11-28 11:07:02.989989062 +0000 UTC m=+4194.688963232" watchObservedRunningTime="2025-11-28 11:07:02.991590316 +0000 UTC m=+4194.690564486" Nov 28 11:07:03 crc kubenswrapper[4838]: I1128 11:07:03.562895 4838 scope.go:117] "RemoveContainer" containerID="616c42b063d53e2f1fdae5c99959bb3a7ba5c7c89c51bc64c2a6b637b60b6dbb" Nov 28 11:07:03 crc kubenswrapper[4838]: I1128 11:07:03.993312 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" event={"ID":"5c3daa53-8c4e-4e30-aeba-146602dd45cd","Type":"ContainerStarted","Data":"6deb95eccfb366ee3b0f8143f9aea63dc132d0fd50914fed82f0caf61f74a268"} Nov 28 11:07:47 crc kubenswrapper[4838]: I1128 11:07:47.425579 4838 generic.go:334] "Generic (PLEG): container finished" podID="0ad0264f-ba94-46c5-b60c-d113c32c361c" containerID="c7408febbdb6fa366fe943a61a93036b19c8c2e8cfa85d1af0d7b0d2e56fefce" exitCode=0 Nov 28 11:07:47 crc kubenswrapper[4838]: I1128 11:07:47.426326 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-7sfv7/crc-debug-nk87t" event={"ID":"0ad0264f-ba94-46c5-b60c-d113c32c361c","Type":"ContainerDied","Data":"c7408febbdb6fa366fe943a61a93036b19c8c2e8cfa85d1af0d7b0d2e56fefce"} Nov 28 11:07:48 crc kubenswrapper[4838]: I1128 11:07:48.641334 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-7sfv7/crc-debug-nk87t" Nov 28 11:07:48 crc kubenswrapper[4838]: I1128 11:07:48.712474 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/0ad0264f-ba94-46c5-b60c-d113c32c361c-host\") pod \"0ad0264f-ba94-46c5-b60c-d113c32c361c\" (UID: \"0ad0264f-ba94-46c5-b60c-d113c32c361c\") " Nov 28 11:07:48 crc kubenswrapper[4838]: I1128 11:07:48.712587 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/0ad0264f-ba94-46c5-b60c-d113c32c361c-host" (OuterVolumeSpecName: "host") pod "0ad0264f-ba94-46c5-b60c-d113c32c361c" (UID: "0ad0264f-ba94-46c5-b60c-d113c32c361c"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 11:07:48 crc kubenswrapper[4838]: I1128 11:07:48.712681 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-77kdn\" (UniqueName: \"kubernetes.io/projected/0ad0264f-ba94-46c5-b60c-d113c32c361c-kube-api-access-77kdn\") pod \"0ad0264f-ba94-46c5-b60c-d113c32c361c\" (UID: \"0ad0264f-ba94-46c5-b60c-d113c32c361c\") " Nov 28 11:07:48 crc kubenswrapper[4838]: I1128 11:07:48.713108 4838 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/0ad0264f-ba94-46c5-b60c-d113c32c361c-host\") on node \"crc\" DevicePath \"\"" Nov 28 11:07:48 crc kubenswrapper[4838]: I1128 11:07:48.716471 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-7sfv7/crc-debug-nk87t"] Nov 28 11:07:48 crc kubenswrapper[4838]: I1128 11:07:48.730928 4838 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-7sfv7/crc-debug-nk87t"] Nov 28 11:07:49 crc kubenswrapper[4838]: I1128 11:07:49.332031 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0ad0264f-ba94-46c5-b60c-d113c32c361c-kube-api-access-77kdn" (OuterVolumeSpecName: "kube-api-access-77kdn") pod "0ad0264f-ba94-46c5-b60c-d113c32c361c" (UID: "0ad0264f-ba94-46c5-b60c-d113c32c361c"). InnerVolumeSpecName "kube-api-access-77kdn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:07:49 crc kubenswrapper[4838]: I1128 11:07:49.428895 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-77kdn\" (UniqueName: \"kubernetes.io/projected/0ad0264f-ba94-46c5-b60c-d113c32c361c-kube-api-access-77kdn\") on node \"crc\" DevicePath \"\"" Nov 28 11:07:49 crc kubenswrapper[4838]: I1128 11:07:49.448261 4838 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1cd05bb65541366b08cd265dc4e612140a0e975705100208a6879c6981b2bdbc" Nov 28 11:07:49 crc kubenswrapper[4838]: I1128 11:07:49.448328 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-7sfv7/crc-debug-nk87t" Nov 28 11:07:50 crc kubenswrapper[4838]: I1128 11:07:50.581237 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0ad0264f-ba94-46c5-b60c-d113c32c361c" path="/var/lib/kubelet/pods/0ad0264f-ba94-46c5-b60c-d113c32c361c/volumes" Nov 28 11:07:50 crc kubenswrapper[4838]: I1128 11:07:50.582677 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-7sfv7/crc-debug-lgvhh"] Nov 28 11:07:50 crc kubenswrapper[4838]: E1128 11:07:50.583204 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0ad0264f-ba94-46c5-b60c-d113c32c361c" containerName="container-00" Nov 28 11:07:50 crc kubenswrapper[4838]: I1128 11:07:50.583224 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="0ad0264f-ba94-46c5-b60c-d113c32c361c" containerName="container-00" Nov 28 11:07:50 crc kubenswrapper[4838]: I1128 11:07:50.584124 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="0ad0264f-ba94-46c5-b60c-d113c32c361c" containerName="container-00" Nov 28 11:07:50 crc kubenswrapper[4838]: I1128 11:07:50.585295 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-7sfv7/crc-debug-lgvhh" Nov 28 11:07:50 crc kubenswrapper[4838]: I1128 11:07:50.664965 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/028e1fed-6619-44be-9fe4-bcb41d8d27e7-host\") pod \"crc-debug-lgvhh\" (UID: \"028e1fed-6619-44be-9fe4-bcb41d8d27e7\") " pod="openshift-must-gather-7sfv7/crc-debug-lgvhh" Nov 28 11:07:50 crc kubenswrapper[4838]: I1128 11:07:50.665062 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wz6wq\" (UniqueName: \"kubernetes.io/projected/028e1fed-6619-44be-9fe4-bcb41d8d27e7-kube-api-access-wz6wq\") pod \"crc-debug-lgvhh\" (UID: \"028e1fed-6619-44be-9fe4-bcb41d8d27e7\") " pod="openshift-must-gather-7sfv7/crc-debug-lgvhh" Nov 28 11:07:50 crc kubenswrapper[4838]: I1128 11:07:50.766985 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/028e1fed-6619-44be-9fe4-bcb41d8d27e7-host\") pod \"crc-debug-lgvhh\" (UID: \"028e1fed-6619-44be-9fe4-bcb41d8d27e7\") " pod="openshift-must-gather-7sfv7/crc-debug-lgvhh" Nov 28 11:07:50 crc kubenswrapper[4838]: I1128 11:07:50.767172 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/028e1fed-6619-44be-9fe4-bcb41d8d27e7-host\") pod \"crc-debug-lgvhh\" (UID: \"028e1fed-6619-44be-9fe4-bcb41d8d27e7\") " pod="openshift-must-gather-7sfv7/crc-debug-lgvhh" Nov 28 11:07:50 crc kubenswrapper[4838]: I1128 11:07:50.767372 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wz6wq\" (UniqueName: \"kubernetes.io/projected/028e1fed-6619-44be-9fe4-bcb41d8d27e7-kube-api-access-wz6wq\") pod \"crc-debug-lgvhh\" (UID: \"028e1fed-6619-44be-9fe4-bcb41d8d27e7\") " pod="openshift-must-gather-7sfv7/crc-debug-lgvhh" Nov 28 11:07:50 crc kubenswrapper[4838]: I1128 11:07:50.793324 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wz6wq\" (UniqueName: \"kubernetes.io/projected/028e1fed-6619-44be-9fe4-bcb41d8d27e7-kube-api-access-wz6wq\") pod \"crc-debug-lgvhh\" (UID: \"028e1fed-6619-44be-9fe4-bcb41d8d27e7\") " pod="openshift-must-gather-7sfv7/crc-debug-lgvhh" Nov 28 11:07:50 crc kubenswrapper[4838]: I1128 11:07:50.913325 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-7sfv7/crc-debug-lgvhh" Nov 28 11:07:50 crc kubenswrapper[4838]: W1128 11:07:50.951451 4838 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod028e1fed_6619_44be_9fe4_bcb41d8d27e7.slice/crio-a6e36f81e13ff86bc2bc29c6e15be7c72e6cba629dee8483adfe70551b702473 WatchSource:0}: Error finding container a6e36f81e13ff86bc2bc29c6e15be7c72e6cba629dee8483adfe70551b702473: Status 404 returned error can't find the container with id a6e36f81e13ff86bc2bc29c6e15be7c72e6cba629dee8483adfe70551b702473 Nov 28 11:07:51 crc kubenswrapper[4838]: I1128 11:07:51.476774 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-7sfv7/crc-debug-lgvhh" event={"ID":"028e1fed-6619-44be-9fe4-bcb41d8d27e7","Type":"ContainerStarted","Data":"a6e36f81e13ff86bc2bc29c6e15be7c72e6cba629dee8483adfe70551b702473"} Nov 28 11:07:52 crc kubenswrapper[4838]: I1128 11:07:52.498290 4838 generic.go:334] "Generic (PLEG): container finished" podID="028e1fed-6619-44be-9fe4-bcb41d8d27e7" containerID="6d1319f9e3f708b9911f59a71b6ec8210be5630e1569647e6ef6f76b8273bf90" exitCode=0 Nov 28 11:07:52 crc kubenswrapper[4838]: I1128 11:07:52.498400 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-7sfv7/crc-debug-lgvhh" event={"ID":"028e1fed-6619-44be-9fe4-bcb41d8d27e7","Type":"ContainerDied","Data":"6d1319f9e3f708b9911f59a71b6ec8210be5630e1569647e6ef6f76b8273bf90"} Nov 28 11:07:53 crc kubenswrapper[4838]: I1128 11:07:53.618397 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-7sfv7/crc-debug-lgvhh" Nov 28 11:07:53 crc kubenswrapper[4838]: I1128 11:07:53.725702 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wz6wq\" (UniqueName: \"kubernetes.io/projected/028e1fed-6619-44be-9fe4-bcb41d8d27e7-kube-api-access-wz6wq\") pod \"028e1fed-6619-44be-9fe4-bcb41d8d27e7\" (UID: \"028e1fed-6619-44be-9fe4-bcb41d8d27e7\") " Nov 28 11:07:53 crc kubenswrapper[4838]: I1128 11:07:53.725939 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/028e1fed-6619-44be-9fe4-bcb41d8d27e7-host\") pod \"028e1fed-6619-44be-9fe4-bcb41d8d27e7\" (UID: \"028e1fed-6619-44be-9fe4-bcb41d8d27e7\") " Nov 28 11:07:53 crc kubenswrapper[4838]: I1128 11:07:53.726076 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/028e1fed-6619-44be-9fe4-bcb41d8d27e7-host" (OuterVolumeSpecName: "host") pod "028e1fed-6619-44be-9fe4-bcb41d8d27e7" (UID: "028e1fed-6619-44be-9fe4-bcb41d8d27e7"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 11:07:53 crc kubenswrapper[4838]: I1128 11:07:53.726689 4838 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/028e1fed-6619-44be-9fe4-bcb41d8d27e7-host\") on node \"crc\" DevicePath \"\"" Nov 28 11:07:53 crc kubenswrapper[4838]: I1128 11:07:53.735195 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/028e1fed-6619-44be-9fe4-bcb41d8d27e7-kube-api-access-wz6wq" (OuterVolumeSpecName: "kube-api-access-wz6wq") pod "028e1fed-6619-44be-9fe4-bcb41d8d27e7" (UID: "028e1fed-6619-44be-9fe4-bcb41d8d27e7"). InnerVolumeSpecName "kube-api-access-wz6wq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:07:53 crc kubenswrapper[4838]: I1128 11:07:53.828473 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wz6wq\" (UniqueName: \"kubernetes.io/projected/028e1fed-6619-44be-9fe4-bcb41d8d27e7-kube-api-access-wz6wq\") on node \"crc\" DevicePath \"\"" Nov 28 11:07:54 crc kubenswrapper[4838]: I1128 11:07:54.518254 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-7sfv7/crc-debug-lgvhh" event={"ID":"028e1fed-6619-44be-9fe4-bcb41d8d27e7","Type":"ContainerDied","Data":"a6e36f81e13ff86bc2bc29c6e15be7c72e6cba629dee8483adfe70551b702473"} Nov 28 11:07:54 crc kubenswrapper[4838]: I1128 11:07:54.518296 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-7sfv7/crc-debug-lgvhh" Nov 28 11:07:54 crc kubenswrapper[4838]: I1128 11:07:54.518305 4838 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a6e36f81e13ff86bc2bc29c6e15be7c72e6cba629dee8483adfe70551b702473" Nov 28 11:07:54 crc kubenswrapper[4838]: I1128 11:07:54.551739 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-7sfv7/crc-debug-lgvhh"] Nov 28 11:07:54 crc kubenswrapper[4838]: I1128 11:07:54.560026 4838 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-7sfv7/crc-debug-lgvhh"] Nov 28 11:07:54 crc kubenswrapper[4838]: I1128 11:07:54.573540 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="028e1fed-6619-44be-9fe4-bcb41d8d27e7" path="/var/lib/kubelet/pods/028e1fed-6619-44be-9fe4-bcb41d8d27e7/volumes" Nov 28 11:07:55 crc kubenswrapper[4838]: I1128 11:07:55.731429 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-7sfv7/crc-debug-d22c5"] Nov 28 11:07:55 crc kubenswrapper[4838]: E1128 11:07:55.731834 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="028e1fed-6619-44be-9fe4-bcb41d8d27e7" containerName="container-00" Nov 28 11:07:55 crc kubenswrapper[4838]: I1128 11:07:55.731846 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="028e1fed-6619-44be-9fe4-bcb41d8d27e7" containerName="container-00" Nov 28 11:07:55 crc kubenswrapper[4838]: I1128 11:07:55.732047 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="028e1fed-6619-44be-9fe4-bcb41d8d27e7" containerName="container-00" Nov 28 11:07:55 crc kubenswrapper[4838]: I1128 11:07:55.732656 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-7sfv7/crc-debug-d22c5" Nov 28 11:07:55 crc kubenswrapper[4838]: I1128 11:07:55.874745 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/371702db-cfa9-469a-b1cb-17eb5aebdada-host\") pod \"crc-debug-d22c5\" (UID: \"371702db-cfa9-469a-b1cb-17eb5aebdada\") " pod="openshift-must-gather-7sfv7/crc-debug-d22c5" Nov 28 11:07:55 crc kubenswrapper[4838]: I1128 11:07:55.875271 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2kn96\" (UniqueName: \"kubernetes.io/projected/371702db-cfa9-469a-b1cb-17eb5aebdada-kube-api-access-2kn96\") pod \"crc-debug-d22c5\" (UID: \"371702db-cfa9-469a-b1cb-17eb5aebdada\") " pod="openshift-must-gather-7sfv7/crc-debug-d22c5" Nov 28 11:07:55 crc kubenswrapper[4838]: I1128 11:07:55.979651 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2kn96\" (UniqueName: \"kubernetes.io/projected/371702db-cfa9-469a-b1cb-17eb5aebdada-kube-api-access-2kn96\") pod \"crc-debug-d22c5\" (UID: \"371702db-cfa9-469a-b1cb-17eb5aebdada\") " pod="openshift-must-gather-7sfv7/crc-debug-d22c5" Nov 28 11:07:55 crc kubenswrapper[4838]: I1128 11:07:55.980046 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/371702db-cfa9-469a-b1cb-17eb5aebdada-host\") pod \"crc-debug-d22c5\" (UID: \"371702db-cfa9-469a-b1cb-17eb5aebdada\") " pod="openshift-must-gather-7sfv7/crc-debug-d22c5" Nov 28 11:07:55 crc kubenswrapper[4838]: I1128 11:07:55.980217 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/371702db-cfa9-469a-b1cb-17eb5aebdada-host\") pod \"crc-debug-d22c5\" (UID: \"371702db-cfa9-469a-b1cb-17eb5aebdada\") " pod="openshift-must-gather-7sfv7/crc-debug-d22c5" Nov 28 11:07:56 crc kubenswrapper[4838]: I1128 11:07:56.015093 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2kn96\" (UniqueName: \"kubernetes.io/projected/371702db-cfa9-469a-b1cb-17eb5aebdada-kube-api-access-2kn96\") pod \"crc-debug-d22c5\" (UID: \"371702db-cfa9-469a-b1cb-17eb5aebdada\") " pod="openshift-must-gather-7sfv7/crc-debug-d22c5" Nov 28 11:07:56 crc kubenswrapper[4838]: I1128 11:07:56.051466 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-7sfv7/crc-debug-d22c5" Nov 28 11:07:56 crc kubenswrapper[4838]: W1128 11:07:56.098761 4838 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod371702db_cfa9_469a_b1cb_17eb5aebdada.slice/crio-1eec9a85f7f953c17b4936550f504c8bb83739284e33877e38791f0816dea7b0 WatchSource:0}: Error finding container 1eec9a85f7f953c17b4936550f504c8bb83739284e33877e38791f0816dea7b0: Status 404 returned error can't find the container with id 1eec9a85f7f953c17b4936550f504c8bb83739284e33877e38791f0816dea7b0 Nov 28 11:07:56 crc kubenswrapper[4838]: I1128 11:07:56.149152 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-qjjqb"] Nov 28 11:07:56 crc kubenswrapper[4838]: I1128 11:07:56.151133 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-qjjqb" Nov 28 11:07:56 crc kubenswrapper[4838]: I1128 11:07:56.170782 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-qjjqb"] Nov 28 11:07:56 crc kubenswrapper[4838]: I1128 11:07:56.288674 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a453c56f-0a7e-413d-b0e0-38245cc7c304-catalog-content\") pod \"community-operators-qjjqb\" (UID: \"a453c56f-0a7e-413d-b0e0-38245cc7c304\") " pod="openshift-marketplace/community-operators-qjjqb" Nov 28 11:07:56 crc kubenswrapper[4838]: I1128 11:07:56.289043 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a453c56f-0a7e-413d-b0e0-38245cc7c304-utilities\") pod \"community-operators-qjjqb\" (UID: \"a453c56f-0a7e-413d-b0e0-38245cc7c304\") " pod="openshift-marketplace/community-operators-qjjqb" Nov 28 11:07:56 crc kubenswrapper[4838]: I1128 11:07:56.289124 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4nhxz\" (UniqueName: \"kubernetes.io/projected/a453c56f-0a7e-413d-b0e0-38245cc7c304-kube-api-access-4nhxz\") pod \"community-operators-qjjqb\" (UID: \"a453c56f-0a7e-413d-b0e0-38245cc7c304\") " pod="openshift-marketplace/community-operators-qjjqb" Nov 28 11:07:56 crc kubenswrapper[4838]: I1128 11:07:56.390989 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a453c56f-0a7e-413d-b0e0-38245cc7c304-catalog-content\") pod \"community-operators-qjjqb\" (UID: \"a453c56f-0a7e-413d-b0e0-38245cc7c304\") " pod="openshift-marketplace/community-operators-qjjqb" Nov 28 11:07:56 crc kubenswrapper[4838]: I1128 11:07:56.391043 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a453c56f-0a7e-413d-b0e0-38245cc7c304-utilities\") pod \"community-operators-qjjqb\" (UID: \"a453c56f-0a7e-413d-b0e0-38245cc7c304\") " pod="openshift-marketplace/community-operators-qjjqb" Nov 28 11:07:56 crc kubenswrapper[4838]: I1128 11:07:56.391135 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4nhxz\" (UniqueName: \"kubernetes.io/projected/a453c56f-0a7e-413d-b0e0-38245cc7c304-kube-api-access-4nhxz\") pod \"community-operators-qjjqb\" (UID: \"a453c56f-0a7e-413d-b0e0-38245cc7c304\") " pod="openshift-marketplace/community-operators-qjjqb" Nov 28 11:07:56 crc kubenswrapper[4838]: I1128 11:07:56.391466 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a453c56f-0a7e-413d-b0e0-38245cc7c304-catalog-content\") pod \"community-operators-qjjqb\" (UID: \"a453c56f-0a7e-413d-b0e0-38245cc7c304\") " pod="openshift-marketplace/community-operators-qjjqb" Nov 28 11:07:56 crc kubenswrapper[4838]: I1128 11:07:56.391739 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a453c56f-0a7e-413d-b0e0-38245cc7c304-utilities\") pod \"community-operators-qjjqb\" (UID: \"a453c56f-0a7e-413d-b0e0-38245cc7c304\") " pod="openshift-marketplace/community-operators-qjjqb" Nov 28 11:07:56 crc kubenswrapper[4838]: I1128 11:07:56.417957 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4nhxz\" (UniqueName: \"kubernetes.io/projected/a453c56f-0a7e-413d-b0e0-38245cc7c304-kube-api-access-4nhxz\") pod \"community-operators-qjjqb\" (UID: \"a453c56f-0a7e-413d-b0e0-38245cc7c304\") " pod="openshift-marketplace/community-operators-qjjqb" Nov 28 11:07:56 crc kubenswrapper[4838]: I1128 11:07:56.551963 4838 generic.go:334] "Generic (PLEG): container finished" podID="371702db-cfa9-469a-b1cb-17eb5aebdada" containerID="ed01ad4e47ba17d0f8d35f962d89a1a163edadd076419c87c99db1c019e7fda9" exitCode=0 Nov 28 11:07:56 crc kubenswrapper[4838]: I1128 11:07:56.552013 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-7sfv7/crc-debug-d22c5" event={"ID":"371702db-cfa9-469a-b1cb-17eb5aebdada","Type":"ContainerDied","Data":"ed01ad4e47ba17d0f8d35f962d89a1a163edadd076419c87c99db1c019e7fda9"} Nov 28 11:07:56 crc kubenswrapper[4838]: I1128 11:07:56.552241 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-7sfv7/crc-debug-d22c5" event={"ID":"371702db-cfa9-469a-b1cb-17eb5aebdada","Type":"ContainerStarted","Data":"1eec9a85f7f953c17b4936550f504c8bb83739284e33877e38791f0816dea7b0"} Nov 28 11:07:56 crc kubenswrapper[4838]: I1128 11:07:56.584197 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-qjjqb" Nov 28 11:07:56 crc kubenswrapper[4838]: I1128 11:07:56.608705 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-7sfv7/crc-debug-d22c5"] Nov 28 11:07:56 crc kubenswrapper[4838]: I1128 11:07:56.641057 4838 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-7sfv7/crc-debug-d22c5"] Nov 28 11:07:57 crc kubenswrapper[4838]: W1128 11:07:57.070261 4838 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda453c56f_0a7e_413d_b0e0_38245cc7c304.slice/crio-06e097ca5b0488f87102de7a8602a279b35e074056e06f4383d3b3541fcb61fc WatchSource:0}: Error finding container 06e097ca5b0488f87102de7a8602a279b35e074056e06f4383d3b3541fcb61fc: Status 404 returned error can't find the container with id 06e097ca5b0488f87102de7a8602a279b35e074056e06f4383d3b3541fcb61fc Nov 28 11:07:57 crc kubenswrapper[4838]: I1128 11:07:57.074236 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-qjjqb"] Nov 28 11:07:57 crc kubenswrapper[4838]: I1128 11:07:57.563953 4838 generic.go:334] "Generic (PLEG): container finished" podID="a453c56f-0a7e-413d-b0e0-38245cc7c304" containerID="a8ed339df790ffd0a251db6d6768f75891fd929a2c40812d3bb68cfc696a6dd8" exitCode=0 Nov 28 11:07:57 crc kubenswrapper[4838]: I1128 11:07:57.564577 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qjjqb" event={"ID":"a453c56f-0a7e-413d-b0e0-38245cc7c304","Type":"ContainerDied","Data":"a8ed339df790ffd0a251db6d6768f75891fd929a2c40812d3bb68cfc696a6dd8"} Nov 28 11:07:57 crc kubenswrapper[4838]: I1128 11:07:57.564634 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qjjqb" event={"ID":"a453c56f-0a7e-413d-b0e0-38245cc7c304","Type":"ContainerStarted","Data":"06e097ca5b0488f87102de7a8602a279b35e074056e06f4383d3b3541fcb61fc"} Nov 28 11:07:57 crc kubenswrapper[4838]: I1128 11:07:57.680623 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-7sfv7/crc-debug-d22c5" Nov 28 11:07:57 crc kubenswrapper[4838]: I1128 11:07:57.837682 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2kn96\" (UniqueName: \"kubernetes.io/projected/371702db-cfa9-469a-b1cb-17eb5aebdada-kube-api-access-2kn96\") pod \"371702db-cfa9-469a-b1cb-17eb5aebdada\" (UID: \"371702db-cfa9-469a-b1cb-17eb5aebdada\") " Nov 28 11:07:57 crc kubenswrapper[4838]: I1128 11:07:57.837820 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/371702db-cfa9-469a-b1cb-17eb5aebdada-host\") pod \"371702db-cfa9-469a-b1cb-17eb5aebdada\" (UID: \"371702db-cfa9-469a-b1cb-17eb5aebdada\") " Nov 28 11:07:57 crc kubenswrapper[4838]: I1128 11:07:57.838948 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/371702db-cfa9-469a-b1cb-17eb5aebdada-host" (OuterVolumeSpecName: "host") pod "371702db-cfa9-469a-b1cb-17eb5aebdada" (UID: "371702db-cfa9-469a-b1cb-17eb5aebdada"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 11:07:57 crc kubenswrapper[4838]: I1128 11:07:57.846791 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/371702db-cfa9-469a-b1cb-17eb5aebdada-kube-api-access-2kn96" (OuterVolumeSpecName: "kube-api-access-2kn96") pod "371702db-cfa9-469a-b1cb-17eb5aebdada" (UID: "371702db-cfa9-469a-b1cb-17eb5aebdada"). InnerVolumeSpecName "kube-api-access-2kn96". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:07:57 crc kubenswrapper[4838]: I1128 11:07:57.940603 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2kn96\" (UniqueName: \"kubernetes.io/projected/371702db-cfa9-469a-b1cb-17eb5aebdada-kube-api-access-2kn96\") on node \"crc\" DevicePath \"\"" Nov 28 11:07:57 crc kubenswrapper[4838]: I1128 11:07:57.940637 4838 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/371702db-cfa9-469a-b1cb-17eb5aebdada-host\") on node \"crc\" DevicePath \"\"" Nov 28 11:07:58 crc kubenswrapper[4838]: I1128 11:07:58.577976 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-7sfv7/crc-debug-d22c5" Nov 28 11:07:58 crc kubenswrapper[4838]: I1128 11:07:58.592132 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="371702db-cfa9-469a-b1cb-17eb5aebdada" path="/var/lib/kubelet/pods/371702db-cfa9-469a-b1cb-17eb5aebdada/volumes" Nov 28 11:07:58 crc kubenswrapper[4838]: I1128 11:07:58.593903 4838 scope.go:117] "RemoveContainer" containerID="ed01ad4e47ba17d0f8d35f962d89a1a163edadd076419c87c99db1c019e7fda9" Nov 28 11:07:59 crc kubenswrapper[4838]: I1128 11:07:59.592656 4838 generic.go:334] "Generic (PLEG): container finished" podID="a453c56f-0a7e-413d-b0e0-38245cc7c304" containerID="e7051a0f9c6f3dc03505163744d93338129aeeacd6ee9188db6f044662a663dc" exitCode=0 Nov 28 11:07:59 crc kubenswrapper[4838]: I1128 11:07:59.592752 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qjjqb" event={"ID":"a453c56f-0a7e-413d-b0e0-38245cc7c304","Type":"ContainerDied","Data":"e7051a0f9c6f3dc03505163744d93338129aeeacd6ee9188db6f044662a663dc"} Nov 28 11:08:00 crc kubenswrapper[4838]: I1128 11:08:00.604246 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qjjqb" event={"ID":"a453c56f-0a7e-413d-b0e0-38245cc7c304","Type":"ContainerStarted","Data":"addd708a56d1c7316d767dc85e4b131c1c4887b5b1a2966cfb78ac96c4b3432b"} Nov 28 11:08:00 crc kubenswrapper[4838]: I1128 11:08:00.622590 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-qjjqb" podStartSLOduration=2.077897631 podStartE2EDuration="4.62256664s" podCreationTimestamp="2025-11-28 11:07:56 +0000 UTC" firstStartedPulling="2025-11-28 11:07:57.572138557 +0000 UTC m=+4249.271112737" lastFinishedPulling="2025-11-28 11:08:00.116807536 +0000 UTC m=+4251.815781746" observedRunningTime="2025-11-28 11:08:00.618547371 +0000 UTC m=+4252.317521541" watchObservedRunningTime="2025-11-28 11:08:00.62256664 +0000 UTC m=+4252.321540830" Nov 28 11:08:06 crc kubenswrapper[4838]: I1128 11:08:06.585261 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-qjjqb" Nov 28 11:08:06 crc kubenswrapper[4838]: I1128 11:08:06.585949 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-qjjqb" Nov 28 11:08:06 crc kubenswrapper[4838]: I1128 11:08:06.669821 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-qjjqb" Nov 28 11:08:06 crc kubenswrapper[4838]: I1128 11:08:06.760279 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-qjjqb" Nov 28 11:08:06 crc kubenswrapper[4838]: I1128 11:08:06.909924 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-qjjqb"] Nov 28 11:08:08 crc kubenswrapper[4838]: I1128 11:08:08.690770 4838 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-qjjqb" podUID="a453c56f-0a7e-413d-b0e0-38245cc7c304" containerName="registry-server" containerID="cri-o://addd708a56d1c7316d767dc85e4b131c1c4887b5b1a2966cfb78ac96c4b3432b" gracePeriod=2 Nov 28 11:08:09 crc kubenswrapper[4838]: I1128 11:08:09.197455 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-qjjqb" Nov 28 11:08:09 crc kubenswrapper[4838]: I1128 11:08:09.363045 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4nhxz\" (UniqueName: \"kubernetes.io/projected/a453c56f-0a7e-413d-b0e0-38245cc7c304-kube-api-access-4nhxz\") pod \"a453c56f-0a7e-413d-b0e0-38245cc7c304\" (UID: \"a453c56f-0a7e-413d-b0e0-38245cc7c304\") " Nov 28 11:08:09 crc kubenswrapper[4838]: I1128 11:08:09.363084 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a453c56f-0a7e-413d-b0e0-38245cc7c304-catalog-content\") pod \"a453c56f-0a7e-413d-b0e0-38245cc7c304\" (UID: \"a453c56f-0a7e-413d-b0e0-38245cc7c304\") " Nov 28 11:08:09 crc kubenswrapper[4838]: I1128 11:08:09.363177 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a453c56f-0a7e-413d-b0e0-38245cc7c304-utilities\") pod \"a453c56f-0a7e-413d-b0e0-38245cc7c304\" (UID: \"a453c56f-0a7e-413d-b0e0-38245cc7c304\") " Nov 28 11:08:09 crc kubenswrapper[4838]: I1128 11:08:09.364037 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a453c56f-0a7e-413d-b0e0-38245cc7c304-utilities" (OuterVolumeSpecName: "utilities") pod "a453c56f-0a7e-413d-b0e0-38245cc7c304" (UID: "a453c56f-0a7e-413d-b0e0-38245cc7c304"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 11:08:09 crc kubenswrapper[4838]: I1128 11:08:09.370936 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a453c56f-0a7e-413d-b0e0-38245cc7c304-kube-api-access-4nhxz" (OuterVolumeSpecName: "kube-api-access-4nhxz") pod "a453c56f-0a7e-413d-b0e0-38245cc7c304" (UID: "a453c56f-0a7e-413d-b0e0-38245cc7c304"). InnerVolumeSpecName "kube-api-access-4nhxz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:08:09 crc kubenswrapper[4838]: I1128 11:08:09.466161 4838 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a453c56f-0a7e-413d-b0e0-38245cc7c304-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:09 crc kubenswrapper[4838]: I1128 11:08:09.466198 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4nhxz\" (UniqueName: \"kubernetes.io/projected/a453c56f-0a7e-413d-b0e0-38245cc7c304-kube-api-access-4nhxz\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:09 crc kubenswrapper[4838]: I1128 11:08:09.548949 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a453c56f-0a7e-413d-b0e0-38245cc7c304-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "a453c56f-0a7e-413d-b0e0-38245cc7c304" (UID: "a453c56f-0a7e-413d-b0e0-38245cc7c304"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 11:08:09 crc kubenswrapper[4838]: I1128 11:08:09.568553 4838 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a453c56f-0a7e-413d-b0e0-38245cc7c304-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 11:08:09 crc kubenswrapper[4838]: I1128 11:08:09.702607 4838 generic.go:334] "Generic (PLEG): container finished" podID="a453c56f-0a7e-413d-b0e0-38245cc7c304" containerID="addd708a56d1c7316d767dc85e4b131c1c4887b5b1a2966cfb78ac96c4b3432b" exitCode=0 Nov 28 11:08:09 crc kubenswrapper[4838]: I1128 11:08:09.702676 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-qjjqb" Nov 28 11:08:09 crc kubenswrapper[4838]: I1128 11:08:09.702735 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qjjqb" event={"ID":"a453c56f-0a7e-413d-b0e0-38245cc7c304","Type":"ContainerDied","Data":"addd708a56d1c7316d767dc85e4b131c1c4887b5b1a2966cfb78ac96c4b3432b"} Nov 28 11:08:09 crc kubenswrapper[4838]: I1128 11:08:09.703192 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qjjqb" event={"ID":"a453c56f-0a7e-413d-b0e0-38245cc7c304","Type":"ContainerDied","Data":"06e097ca5b0488f87102de7a8602a279b35e074056e06f4383d3b3541fcb61fc"} Nov 28 11:08:09 crc kubenswrapper[4838]: I1128 11:08:09.703221 4838 scope.go:117] "RemoveContainer" containerID="addd708a56d1c7316d767dc85e4b131c1c4887b5b1a2966cfb78ac96c4b3432b" Nov 28 11:08:09 crc kubenswrapper[4838]: I1128 11:08:09.723756 4838 scope.go:117] "RemoveContainer" containerID="e7051a0f9c6f3dc03505163744d93338129aeeacd6ee9188db6f044662a663dc" Nov 28 11:08:09 crc kubenswrapper[4838]: I1128 11:08:09.744310 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-qjjqb"] Nov 28 11:08:09 crc kubenswrapper[4838]: I1128 11:08:09.752181 4838 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-qjjqb"] Nov 28 11:08:09 crc kubenswrapper[4838]: I1128 11:08:09.788135 4838 scope.go:117] "RemoveContainer" containerID="a8ed339df790ffd0a251db6d6768f75891fd929a2c40812d3bb68cfc696a6dd8" Nov 28 11:08:09 crc kubenswrapper[4838]: I1128 11:08:09.819006 4838 scope.go:117] "RemoveContainer" containerID="addd708a56d1c7316d767dc85e4b131c1c4887b5b1a2966cfb78ac96c4b3432b" Nov 28 11:08:09 crc kubenswrapper[4838]: E1128 11:08:09.819606 4838 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"addd708a56d1c7316d767dc85e4b131c1c4887b5b1a2966cfb78ac96c4b3432b\": container with ID starting with addd708a56d1c7316d767dc85e4b131c1c4887b5b1a2966cfb78ac96c4b3432b not found: ID does not exist" containerID="addd708a56d1c7316d767dc85e4b131c1c4887b5b1a2966cfb78ac96c4b3432b" Nov 28 11:08:09 crc kubenswrapper[4838]: I1128 11:08:09.819700 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"addd708a56d1c7316d767dc85e4b131c1c4887b5b1a2966cfb78ac96c4b3432b"} err="failed to get container status \"addd708a56d1c7316d767dc85e4b131c1c4887b5b1a2966cfb78ac96c4b3432b\": rpc error: code = NotFound desc = could not find container \"addd708a56d1c7316d767dc85e4b131c1c4887b5b1a2966cfb78ac96c4b3432b\": container with ID starting with addd708a56d1c7316d767dc85e4b131c1c4887b5b1a2966cfb78ac96c4b3432b not found: ID does not exist" Nov 28 11:08:09 crc kubenswrapper[4838]: I1128 11:08:09.819795 4838 scope.go:117] "RemoveContainer" containerID="e7051a0f9c6f3dc03505163744d93338129aeeacd6ee9188db6f044662a663dc" Nov 28 11:08:09 crc kubenswrapper[4838]: E1128 11:08:09.820392 4838 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e7051a0f9c6f3dc03505163744d93338129aeeacd6ee9188db6f044662a663dc\": container with ID starting with e7051a0f9c6f3dc03505163744d93338129aeeacd6ee9188db6f044662a663dc not found: ID does not exist" containerID="e7051a0f9c6f3dc03505163744d93338129aeeacd6ee9188db6f044662a663dc" Nov 28 11:08:09 crc kubenswrapper[4838]: I1128 11:08:09.820436 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e7051a0f9c6f3dc03505163744d93338129aeeacd6ee9188db6f044662a663dc"} err="failed to get container status \"e7051a0f9c6f3dc03505163744d93338129aeeacd6ee9188db6f044662a663dc\": rpc error: code = NotFound desc = could not find container \"e7051a0f9c6f3dc03505163744d93338129aeeacd6ee9188db6f044662a663dc\": container with ID starting with e7051a0f9c6f3dc03505163744d93338129aeeacd6ee9188db6f044662a663dc not found: ID does not exist" Nov 28 11:08:09 crc kubenswrapper[4838]: I1128 11:08:09.820488 4838 scope.go:117] "RemoveContainer" containerID="a8ed339df790ffd0a251db6d6768f75891fd929a2c40812d3bb68cfc696a6dd8" Nov 28 11:08:09 crc kubenswrapper[4838]: E1128 11:08:09.820981 4838 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a8ed339df790ffd0a251db6d6768f75891fd929a2c40812d3bb68cfc696a6dd8\": container with ID starting with a8ed339df790ffd0a251db6d6768f75891fd929a2c40812d3bb68cfc696a6dd8 not found: ID does not exist" containerID="a8ed339df790ffd0a251db6d6768f75891fd929a2c40812d3bb68cfc696a6dd8" Nov 28 11:08:09 crc kubenswrapper[4838]: I1128 11:08:09.821027 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a8ed339df790ffd0a251db6d6768f75891fd929a2c40812d3bb68cfc696a6dd8"} err="failed to get container status \"a8ed339df790ffd0a251db6d6768f75891fd929a2c40812d3bb68cfc696a6dd8\": rpc error: code = NotFound desc = could not find container \"a8ed339df790ffd0a251db6d6768f75891fd929a2c40812d3bb68cfc696a6dd8\": container with ID starting with a8ed339df790ffd0a251db6d6768f75891fd929a2c40812d3bb68cfc696a6dd8 not found: ID does not exist" Nov 28 11:08:10 crc kubenswrapper[4838]: I1128 11:08:10.574952 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a453c56f-0a7e-413d-b0e0-38245cc7c304" path="/var/lib/kubelet/pods/a453c56f-0a7e-413d-b0e0-38245cc7c304/volumes" Nov 28 11:08:25 crc kubenswrapper[4838]: I1128 11:08:25.446135 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-6dfcbd5794-mx784_54957e0e-0d82-418e-9786-612dd3d121f0/barbican-api/0.log" Nov 28 11:08:25 crc kubenswrapper[4838]: I1128 11:08:25.618996 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-6dfcbd5794-mx784_54957e0e-0d82-418e-9786-612dd3d121f0/barbican-api-log/0.log" Nov 28 11:08:25 crc kubenswrapper[4838]: I1128 11:08:25.659156 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-65687b7854-r7rh5_de6a5f4a-30c6-4f42-88e7-3f113c1ed53b/barbican-keystone-listener/0.log" Nov 28 11:08:25 crc kubenswrapper[4838]: I1128 11:08:25.873051 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-6669b75dd9-q6nlg_39e38efd-bd92-419d-90e8-f6630032e7d7/barbican-worker/0.log" Nov 28 11:08:25 crc kubenswrapper[4838]: I1128 11:08:25.962029 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-65687b7854-r7rh5_de6a5f4a-30c6-4f42-88e7-3f113c1ed53b/barbican-keystone-listener-log/0.log" Nov 28 11:08:25 crc kubenswrapper[4838]: I1128 11:08:25.999386 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-6669b75dd9-q6nlg_39e38efd-bd92-419d-90e8-f6630032e7d7/barbican-worker-log/0.log" Nov 28 11:08:26 crc kubenswrapper[4838]: I1128 11:08:26.104668 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_bootstrap-edpm-deployment-openstack-edpm-ipam-lbwtr_226a6e3a-8fcf-4284-b8a5-3f4055ae9838/bootstrap-edpm-deployment-openstack-edpm-ipam/0.log" Nov 28 11:08:26 crc kubenswrapper[4838]: I1128 11:08:26.208831 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_afc12091-3d32-4b69-8e6c-29d521764b7c/ceilometer-central-agent/0.log" Nov 28 11:08:26 crc kubenswrapper[4838]: I1128 11:08:26.848092 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_afc12091-3d32-4b69-8e6c-29d521764b7c/ceilometer-notification-agent/0.log" Nov 28 11:08:26 crc kubenswrapper[4838]: I1128 11:08:26.857380 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_afc12091-3d32-4b69-8e6c-29d521764b7c/sg-core/0.log" Nov 28 11:08:26 crc kubenswrapper[4838]: I1128 11:08:26.887207 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_afc12091-3d32-4b69-8e6c-29d521764b7c/proxy-httpd/0.log" Nov 28 11:08:27 crc kubenswrapper[4838]: I1128 11:08:27.048551 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-l5xjj_882b8dc3-77a7-42d9-a380-c9e27ff2a3e0/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam/0.log" Nov 28 11:08:27 crc kubenswrapper[4838]: I1128 11:08:27.058514 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceph-client-edpm-deployment-openstack-edpm-ipam-npvhp_6ce03fb4-60ff-4aff-a42d-ad1e083b1d3a/ceph-client-edpm-deployment-openstack-edpm-ipam/0.log" Nov 28 11:08:27 crc kubenswrapper[4838]: I1128 11:08:27.290294 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_e94e5f12-61ab-40e8-97ce-dc6f3c706583/cinder-api/0.log" Nov 28 11:08:27 crc kubenswrapper[4838]: I1128 11:08:27.306258 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_e94e5f12-61ab-40e8-97ce-dc6f3c706583/cinder-api-log/0.log" Nov 28 11:08:27 crc kubenswrapper[4838]: I1128 11:08:27.515990 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-backup-0_8525f0f2-643f-4177-a4f8-12ca22b43363/probe/0.log" Nov 28 11:08:27 crc kubenswrapper[4838]: I1128 11:08:27.656943 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_289f7c5f-5d1c-44fa-9231-281ed2d83e7a/cinder-scheduler/0.log" Nov 28 11:08:27 crc kubenswrapper[4838]: I1128 11:08:27.667401 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-backup-0_8525f0f2-643f-4177-a4f8-12ca22b43363/cinder-backup/0.log" Nov 28 11:08:27 crc kubenswrapper[4838]: I1128 11:08:27.740611 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_289f7c5f-5d1c-44fa-9231-281ed2d83e7a/probe/0.log" Nov 28 11:08:27 crc kubenswrapper[4838]: I1128 11:08:27.853637 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-volume-volume1-0_5efcb033-775b-46d6-8c77-2bafc360c749/cinder-volume/0.log" Nov 28 11:08:27 crc kubenswrapper[4838]: I1128 11:08:27.869595 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-volume-volume1-0_5efcb033-775b-46d6-8c77-2bafc360c749/probe/0.log" Nov 28 11:08:27 crc kubenswrapper[4838]: I1128 11:08:27.971537 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_configure-network-edpm-deployment-openstack-edpm-ipam-b8l59_c2c8e67b-9151-44fa-b8f3-f86621d4fd67/configure-network-edpm-deployment-openstack-edpm-ipam/0.log" Nov 28 11:08:28 crc kubenswrapper[4838]: I1128 11:08:28.119431 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_configure-os-edpm-deployment-openstack-edpm-ipam-tb7vp_345f52bd-a4c3-4f71-bd23-9141bc780bfb/configure-os-edpm-deployment-openstack-edpm-ipam/0.log" Nov 28 11:08:28 crc kubenswrapper[4838]: I1128 11:08:28.223258 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-76b5fdb995-4hbv2_05ad03f2-cfab-4825-9740-5c405550e376/init/0.log" Nov 28 11:08:28 crc kubenswrapper[4838]: I1128 11:08:28.429196 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-76b5fdb995-4hbv2_05ad03f2-cfab-4825-9740-5c405550e376/dnsmasq-dns/0.log" Nov 28 11:08:28 crc kubenswrapper[4838]: I1128 11:08:28.429918 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-76b5fdb995-4hbv2_05ad03f2-cfab-4825-9740-5c405550e376/init/0.log" Nov 28 11:08:28 crc kubenswrapper[4838]: I1128 11:08:28.444860 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_65afdfae-6cab-4f19-9c41-49b9409a7352/glance-log/0.log" Nov 28 11:08:28 crc kubenswrapper[4838]: I1128 11:08:28.447016 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_65afdfae-6cab-4f19-9c41-49b9409a7352/glance-httpd/0.log" Nov 28 11:08:28 crc kubenswrapper[4838]: I1128 11:08:28.626436 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_89b4bc38-621a-4f06-acb9-a59089d304c1/glance-httpd/0.log" Nov 28 11:08:28 crc kubenswrapper[4838]: I1128 11:08:28.682937 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_89b4bc38-621a-4f06-acb9-a59089d304c1/glance-log/0.log" Nov 28 11:08:28 crc kubenswrapper[4838]: I1128 11:08:28.749284 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_horizon-77d65cd94d-8f62l_97cbb2f0-d45e-4b75-ad50-becba9e4db9b/horizon/0.log" Nov 28 11:08:28 crc kubenswrapper[4838]: I1128 11:08:28.921838 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_install-certs-edpm-deployment-openstack-edpm-ipam-bbz46_145f5aa8-896b-4b3c-846d-e896d932097d/install-certs-edpm-deployment-openstack-edpm-ipam/0.log" Nov 28 11:08:28 crc kubenswrapper[4838]: I1128 11:08:28.925511 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_horizon-77d65cd94d-8f62l_97cbb2f0-d45e-4b75-ad50-becba9e4db9b/horizon-log/0.log" Nov 28 11:08:28 crc kubenswrapper[4838]: I1128 11:08:28.942745 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_install-os-edpm-deployment-openstack-edpm-ipam-ht9vn_02540331-1ba6-45ee-824c-52e9b076f511/install-os-edpm-deployment-openstack-edpm-ipam/0.log" Nov 28 11:08:29 crc kubenswrapper[4838]: I1128 11:08:29.298883 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-cron-29405461-fclpk_b9119237-dcca-4d01-b6e2-6deddc18f8f6/keystone-cron/0.log" Nov 28 11:08:29 crc kubenswrapper[4838]: I1128 11:08:29.502830 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_kube-state-metrics-0_9d3e1aba-11d2-478c-9715-49ba175c7b03/kube-state-metrics/0.log" Nov 28 11:08:29 crc kubenswrapper[4838]: I1128 11:08:29.566192 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_libvirt-edpm-deployment-openstack-edpm-ipam-8958p_d861b650-a017-43fc-8da3-b65d8f9e8ce8/libvirt-edpm-deployment-openstack-edpm-ipam/0.log" Nov 28 11:08:30 crc kubenswrapper[4838]: I1128 11:08:30.003902 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_manila-scheduler-0_9b49fbfc-a8f5-48aa-bb7b-96d82967eecb/probe/0.log" Nov 28 11:08:30 crc kubenswrapper[4838]: I1128 11:08:30.146296 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_manila-api-0_a16e601a-3619-49a9-82d1-67129c2e2413/manila-api/0.log" Nov 28 11:08:30 crc kubenswrapper[4838]: I1128 11:08:30.147745 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_manila-scheduler-0_9b49fbfc-a8f5-48aa-bb7b-96d82967eecb/manila-scheduler/0.log" Nov 28 11:08:30 crc kubenswrapper[4838]: I1128 11:08:30.377916 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_manila-share-share1-0_4697ec9a-896b-4703-87c0-84a7741b8724/probe/0.log" Nov 28 11:08:30 crc kubenswrapper[4838]: I1128 11:08:30.416676 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-698bf66db7-q4nv6_4d77c8cd-d0c5-4bb9-84a3-e3a00f7c9a99/keystone-api/0.log" Nov 28 11:08:30 crc kubenswrapper[4838]: I1128 11:08:30.698390 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_manila-share-share1-0_4697ec9a-896b-4703-87c0-84a7741b8724/manila-share/0.log" Nov 28 11:08:30 crc kubenswrapper[4838]: I1128 11:08:30.769085 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_manila-api-0_a16e601a-3619-49a9-82d1-67129c2e2413/manila-api-log/0.log" Nov 28 11:08:30 crc kubenswrapper[4838]: I1128 11:08:30.827039 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-69bc8cb85-2qbr6_5f9ddfd3-3f45-40e8-a9f8-2976dd20280f/neutron-api/0.log" Nov 28 11:08:30 crc kubenswrapper[4838]: I1128 11:08:30.906628 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-69bc8cb85-2qbr6_5f9ddfd3-3f45-40e8-a9f8-2976dd20280f/neutron-httpd/0.log" Nov 28 11:08:30 crc kubenswrapper[4838]: I1128 11:08:30.993536 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-metadata-edpm-deployment-openstack-edpm-ipam-nnxmj_777a7bbd-ba32-4b20-a263-de82be50d3b1/neutron-metadata-edpm-deployment-openstack-edpm-ipam/0.log" Nov 28 11:08:31 crc kubenswrapper[4838]: I1128 11:08:31.261701 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_3305f5c4-7a09-439f-bf0f-534b3dea0b05/nova-api-log/0.log" Nov 28 11:08:31 crc kubenswrapper[4838]: I1128 11:08:31.401205 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell0-conductor-0_71625902-2a8a-4e8b-beb2-faaee7714ed2/nova-cell0-conductor-conductor/0.log" Nov 28 11:08:31 crc kubenswrapper[4838]: I1128 11:08:31.486082 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_3305f5c4-7a09-439f-bf0f-534b3dea0b05/nova-api-api/0.log" Nov 28 11:08:31 crc kubenswrapper[4838]: I1128 11:08:31.531709 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-conductor-0_88e49157-0dd0-455d-a9bd-5a13c3d95087/nova-cell1-conductor-conductor/0.log" Nov 28 11:08:31 crc kubenswrapper[4838]: I1128 11:08:31.805295 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-novncproxy-0_71d3fada-848c-4e73-ad9e-f63e8fdde48e/nova-cell1-novncproxy-novncproxy/0.log" Nov 28 11:08:31 crc kubenswrapper[4838]: I1128 11:08:31.812115 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-cv52x_54e4c0ee-74da-434c-bb61-702d4e78c663/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam/0.log" Nov 28 11:08:32 crc kubenswrapper[4838]: I1128 11:08:32.098200 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_a524de62-c36c-4abf-9a45-57247679c4e7/nova-metadata-log/0.log" Nov 28 11:08:32 crc kubenswrapper[4838]: I1128 11:08:32.172163 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-scheduler-0_3a05bed3-8b30-4959-9ee2-4b25a928b0e5/nova-scheduler-scheduler/0.log" Nov 28 11:08:32 crc kubenswrapper[4838]: I1128 11:08:32.298538 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_061baebe-5a1a-4090-a396-84571f88b105/mysql-bootstrap/0.log" Nov 28 11:08:32 crc kubenswrapper[4838]: I1128 11:08:32.500261 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_061baebe-5a1a-4090-a396-84571f88b105/galera/0.log" Nov 28 11:08:32 crc kubenswrapper[4838]: I1128 11:08:32.504897 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_061baebe-5a1a-4090-a396-84571f88b105/mysql-bootstrap/0.log" Nov 28 11:08:32 crc kubenswrapper[4838]: I1128 11:08:32.754215 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_55cfa883-d16d-4231-95e7-fd0b3ad9b702/mysql-bootstrap/0.log" Nov 28 11:08:32 crc kubenswrapper[4838]: I1128 11:08:32.921951 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_55cfa883-d16d-4231-95e7-fd0b3ad9b702/mysql-bootstrap/0.log" Nov 28 11:08:33 crc kubenswrapper[4838]: I1128 11:08:33.028603 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_55cfa883-d16d-4231-95e7-fd0b3ad9b702/galera/0.log" Nov 28 11:08:33 crc kubenswrapper[4838]: I1128 11:08:33.160477 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstackclient_b87ad3f7-9446-43b7-9141-2279794386a0/openstackclient/0.log" Nov 28 11:08:33 crc kubenswrapper[4838]: I1128 11:08:33.231556 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-dqjd8_50cdff0a-cfe5-41e1-8eed-67b23079335f/ovn-controller/0.log" Nov 28 11:08:33 crc kubenswrapper[4838]: I1128 11:08:33.379560 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_a524de62-c36c-4abf-9a45-57247679c4e7/nova-metadata-metadata/0.log" Nov 28 11:08:33 crc kubenswrapper[4838]: I1128 11:08:33.473706 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-metrics-b7fh7_77ac18d8-c660-4742-8367-281a06a82e37/openstack-network-exporter/0.log" Nov 28 11:08:33 crc kubenswrapper[4838]: I1128 11:08:33.817441 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-zt4fz_4cb6513a-e07a-40b9-a3ad-f147b8b4a96d/ovsdb-server-init/0.log" Nov 28 11:08:33 crc kubenswrapper[4838]: I1128 11:08:33.932551 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-zt4fz_4cb6513a-e07a-40b9-a3ad-f147b8b4a96d/ovsdb-server-init/0.log" Nov 28 11:08:33 crc kubenswrapper[4838]: I1128 11:08:33.940289 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-zt4fz_4cb6513a-e07a-40b9-a3ad-f147b8b4a96d/ovs-vswitchd/0.log" Nov 28 11:08:33 crc kubenswrapper[4838]: I1128 11:08:33.948169 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-zt4fz_4cb6513a-e07a-40b9-a3ad-f147b8b4a96d/ovsdb-server/0.log" Nov 28 11:08:34 crc kubenswrapper[4838]: I1128 11:08:34.129953 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_d62996a9-1816-49c7-9280-f115770a83ad/openstack-network-exporter/0.log" Nov 28 11:08:34 crc kubenswrapper[4838]: I1128 11:08:34.212634 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-edpm-deployment-openstack-edpm-ipam-49cxw_e2bfe003-bf8c-444c-97ea-57a3b0a1c4ae/ovn-edpm-deployment-openstack-edpm-ipam/0.log" Nov 28 11:08:34 crc kubenswrapper[4838]: I1128 11:08:34.289570 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_d62996a9-1816-49c7-9280-f115770a83ad/ovn-northd/0.log" Nov 28 11:08:34 crc kubenswrapper[4838]: I1128 11:08:34.399082 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_20476ab2-8070-42b3-a05c-d2c07c111ea9/openstack-network-exporter/0.log" Nov 28 11:08:34 crc kubenswrapper[4838]: I1128 11:08:34.892415 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_20476ab2-8070-42b3-a05c-d2c07c111ea9/ovsdbserver-nb/0.log" Nov 28 11:08:34 crc kubenswrapper[4838]: I1128 11:08:34.955859 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_c2037801-5d52-402d-9d8c-4b17928fb33a/ovsdbserver-sb/0.log" Nov 28 11:08:34 crc kubenswrapper[4838]: I1128 11:08:34.988198 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_c2037801-5d52-402d-9d8c-4b17928fb33a/openstack-network-exporter/0.log" Nov 28 11:08:35 crc kubenswrapper[4838]: I1128 11:08:35.226976 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-7f5f5f8b64-f2wff_f4ad309b-1078-40e9-abd7-d1b476971fce/placement-api/0.log" Nov 28 11:08:35 crc kubenswrapper[4838]: I1128 11:08:35.239936 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-7f5f5f8b64-f2wff_f4ad309b-1078-40e9-abd7-d1b476971fce/placement-log/0.log" Nov 28 11:08:35 crc kubenswrapper[4838]: I1128 11:08:35.358057 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_091769fb-bf67-454a-b0da-3e33589799f9/setup-container/0.log" Nov 28 11:08:35 crc kubenswrapper[4838]: I1128 11:08:35.539927 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_091769fb-bf67-454a-b0da-3e33589799f9/setup-container/0.log" Nov 28 11:08:35 crc kubenswrapper[4838]: I1128 11:08:35.595332 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_091769fb-bf67-454a-b0da-3e33589799f9/rabbitmq/0.log" Nov 28 11:08:35 crc kubenswrapper[4838]: I1128 11:08:35.621585 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_969e66ab-e24e-4a63-9543-8214980ccbe3/setup-container/0.log" Nov 28 11:08:35 crc kubenswrapper[4838]: I1128 11:08:35.838427 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_969e66ab-e24e-4a63-9543-8214980ccbe3/setup-container/0.log" Nov 28 11:08:35 crc kubenswrapper[4838]: I1128 11:08:35.890869 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_reboot-os-edpm-deployment-openstack-edpm-ipam-p5n7c_801ebab7-c9de-423e-910d-32e56be5cc7b/reboot-os-edpm-deployment-openstack-edpm-ipam/0.log" Nov 28 11:08:35 crc kubenswrapper[4838]: I1128 11:08:35.900376 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_969e66ab-e24e-4a63-9543-8214980ccbe3/rabbitmq/0.log" Nov 28 11:08:36 crc kubenswrapper[4838]: I1128 11:08:36.055608 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_repo-setup-edpm-deployment-openstack-edpm-ipam-sjch6_277c54af-c091-426d-bf0d-523eca9b41fb/repo-setup-edpm-deployment-openstack-edpm-ipam/0.log" Nov 28 11:08:36 crc kubenswrapper[4838]: I1128 11:08:36.671553 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_run-os-edpm-deployment-openstack-edpm-ipam-f8h25_ea469a90-76a4-4712-90a7-af038c331ee2/run-os-edpm-deployment-openstack-edpm-ipam/0.log" Nov 28 11:08:36 crc kubenswrapper[4838]: I1128 11:08:36.672971 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ssh-known-hosts-edpm-deployment-zfmqh_fcf22c85-ed90-47d9-9131-365fa3620686/ssh-known-hosts-edpm-deployment/0.log" Nov 28 11:08:36 crc kubenswrapper[4838]: I1128 11:08:36.956083 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_test-operator-logs-pod-tempest-tempest-tests-tempest_9d260724-3275-4670-966b-0c961bf54da5/test-operator-logs-container/0.log" Nov 28 11:08:36 crc kubenswrapper[4838]: I1128 11:08:36.963741 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_tempest-tests-tempest_7899bfa9-2025-457b-9c46-194188b7f52e/tempest-tests-tempest-tests-runner/0.log" Nov 28 11:08:37 crc kubenswrapper[4838]: I1128 11:08:37.166289 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_validate-network-edpm-deployment-openstack-edpm-ipam-mrkhc_d1fe48d6-01d5-4805-8359-921c9b8888a4/validate-network-edpm-deployment-openstack-edpm-ipam/0.log" Nov 28 11:08:50 crc kubenswrapper[4838]: I1128 11:08:50.682789 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_memcached-0_09948818-c683-4cab-ab8e-c4bfa54809a1/memcached/0.log" Nov 28 11:09:07 crc kubenswrapper[4838]: I1128 11:09:07.578351 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_703c3d28e1f0f4fdc243d52db993b95e20d3eb98fc494e62d3c67e00b725j9s_b2da9f3e-de9a-430d-887e-6b75fa6133d1/util/0.log" Nov 28 11:09:07 crc kubenswrapper[4838]: I1128 11:09:07.801879 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_703c3d28e1f0f4fdc243d52db993b95e20d3eb98fc494e62d3c67e00b725j9s_b2da9f3e-de9a-430d-887e-6b75fa6133d1/pull/0.log" Nov 28 11:09:07 crc kubenswrapper[4838]: I1128 11:09:07.811346 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_703c3d28e1f0f4fdc243d52db993b95e20d3eb98fc494e62d3c67e00b725j9s_b2da9f3e-de9a-430d-887e-6b75fa6133d1/pull/0.log" Nov 28 11:09:07 crc kubenswrapper[4838]: I1128 11:09:07.815488 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_703c3d28e1f0f4fdc243d52db993b95e20d3eb98fc494e62d3c67e00b725j9s_b2da9f3e-de9a-430d-887e-6b75fa6133d1/util/0.log" Nov 28 11:09:07 crc kubenswrapper[4838]: I1128 11:09:07.976998 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_703c3d28e1f0f4fdc243d52db993b95e20d3eb98fc494e62d3c67e00b725j9s_b2da9f3e-de9a-430d-887e-6b75fa6133d1/util/0.log" Nov 28 11:09:07 crc kubenswrapper[4838]: I1128 11:09:07.983199 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_703c3d28e1f0f4fdc243d52db993b95e20d3eb98fc494e62d3c67e00b725j9s_b2da9f3e-de9a-430d-887e-6b75fa6133d1/extract/0.log" Nov 28 11:09:08 crc kubenswrapper[4838]: I1128 11:09:08.036122 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_703c3d28e1f0f4fdc243d52db993b95e20d3eb98fc494e62d3c67e00b725j9s_b2da9f3e-de9a-430d-887e-6b75fa6133d1/pull/0.log" Nov 28 11:09:08 crc kubenswrapper[4838]: I1128 11:09:08.164752 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-7b64f4fb85-zsv5q_2d42b4ea-468b-482a-8d06-57d2cd7d40f0/kube-rbac-proxy/0.log" Nov 28 11:09:08 crc kubenswrapper[4838]: I1128 11:09:08.220950 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-7b64f4fb85-zsv5q_2d42b4ea-468b-482a-8d06-57d2cd7d40f0/manager/0.log" Nov 28 11:09:08 crc kubenswrapper[4838]: I1128 11:09:08.280359 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-6b7f75547b-z8m7s_458f4354-42e8-46d1-a571-0a0d1a852574/kube-rbac-proxy/0.log" Nov 28 11:09:08 crc kubenswrapper[4838]: I1128 11:09:08.433759 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-6b7f75547b-z8m7s_458f4354-42e8-46d1-a571-0a0d1a852574/manager/0.log" Nov 28 11:09:08 crc kubenswrapper[4838]: I1128 11:09:08.513343 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-955677c94-jtm69_aceca209-5955-4644-a139-2dfc5d36bb48/kube-rbac-proxy/0.log" Nov 28 11:09:08 crc kubenswrapper[4838]: I1128 11:09:08.584501 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-955677c94-jtm69_aceca209-5955-4644-a139-2dfc5d36bb48/manager/0.log" Nov 28 11:09:08 crc kubenswrapper[4838]: I1128 11:09:08.688971 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-589cbd6b5b-sgt4v_20706334-3560-47c0-beee-0eacda6e2eeb/kube-rbac-proxy/0.log" Nov 28 11:09:08 crc kubenswrapper[4838]: I1128 11:09:08.766815 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-589cbd6b5b-sgt4v_20706334-3560-47c0-beee-0eacda6e2eeb/manager/0.log" Nov 28 11:09:08 crc kubenswrapper[4838]: I1128 11:09:08.898204 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-5b77f656f-mfvxw_bfc8796f-9498-4707-ae79-225de0c3d39f/manager/0.log" Nov 28 11:09:08 crc kubenswrapper[4838]: I1128 11:09:08.899571 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-5b77f656f-mfvxw_bfc8796f-9498-4707-ae79-225de0c3d39f/kube-rbac-proxy/0.log" Nov 28 11:09:08 crc kubenswrapper[4838]: I1128 11:09:08.975518 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-5d494799bf-99pnl_f79a2f03-43b3-47d0-89f8-55374a730a22/kube-rbac-proxy/0.log" Nov 28 11:09:09 crc kubenswrapper[4838]: I1128 11:09:09.079985 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-5d494799bf-99pnl_f79a2f03-43b3-47d0-89f8-55374a730a22/manager/0.log" Nov 28 11:09:09 crc kubenswrapper[4838]: I1128 11:09:09.129316 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-57548d458d-7qwxs_64a7b90b-6294-429b-b7f8-7820d9a5514e/kube-rbac-proxy/0.log" Nov 28 11:09:09 crc kubenswrapper[4838]: I1128 11:09:09.394214 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-67cb4dc6d4-dldwc_00ae7528-ac6c-4ceb-9e8f-80e588aced3d/kube-rbac-proxy/0.log" Nov 28 11:09:09 crc kubenswrapper[4838]: I1128 11:09:09.536146 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-57548d458d-7qwxs_64a7b90b-6294-429b-b7f8-7820d9a5514e/manager/0.log" Nov 28 11:09:09 crc kubenswrapper[4838]: I1128 11:09:09.575839 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-67cb4dc6d4-dldwc_00ae7528-ac6c-4ceb-9e8f-80e588aced3d/manager/0.log" Nov 28 11:09:09 crc kubenswrapper[4838]: I1128 11:09:09.742125 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-7b4567c7cf-qt6hp_f9825746-5143-4716-9458-aad44231b721/kube-rbac-proxy/0.log" Nov 28 11:09:09 crc kubenswrapper[4838]: I1128 11:09:09.791995 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-7b4567c7cf-qt6hp_f9825746-5143-4716-9458-aad44231b721/manager/0.log" Nov 28 11:09:09 crc kubenswrapper[4838]: I1128 11:09:09.814527 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-67764766d7-5dcgh_d8596d98-979f-4b13-bef4-ccaabbcf155e/kube-rbac-proxy/0.log" Nov 28 11:09:09 crc kubenswrapper[4838]: I1128 11:09:09.954401 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-66f4dd4bc7-lppm4_aef566ed-5e8f-4ce9-9fa4-75bfef26a65e/kube-rbac-proxy/0.log" Nov 28 11:09:09 crc kubenswrapper[4838]: I1128 11:09:09.970598 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-67764766d7-5dcgh_d8596d98-979f-4b13-bef4-ccaabbcf155e/manager/0.log" Nov 28 11:09:10 crc kubenswrapper[4838]: I1128 11:09:10.022952 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-66f4dd4bc7-lppm4_aef566ed-5e8f-4ce9-9fa4-75bfef26a65e/manager/0.log" Nov 28 11:09:10 crc kubenswrapper[4838]: I1128 11:09:10.143971 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-6fdcddb789-nw7wx_03bb7fb2-31ae-4e18-b77d-e6dad8007460/kube-rbac-proxy/0.log" Nov 28 11:09:10 crc kubenswrapper[4838]: I1128 11:09:10.273151 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-6fdcddb789-nw7wx_03bb7fb2-31ae-4e18-b77d-e6dad8007460/manager/0.log" Nov 28 11:09:10 crc kubenswrapper[4838]: I1128 11:09:10.325083 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-79556f57fc-zrl6r_90e6e6d2-fd36-40e9-9002-d3a5e4c53f4e/kube-rbac-proxy/0.log" Nov 28 11:09:10 crc kubenswrapper[4838]: I1128 11:09:10.468437 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-79556f57fc-zrl6r_90e6e6d2-fd36-40e9-9002-d3a5e4c53f4e/manager/0.log" Nov 28 11:09:10 crc kubenswrapper[4838]: I1128 11:09:10.504592 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-64cdc6ff96-tjbnw_40466f6c-03c1-4aa4-9d03-947168f4068c/kube-rbac-proxy/0.log" Nov 28 11:09:10 crc kubenswrapper[4838]: I1128 11:09:10.596370 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-64cdc6ff96-tjbnw_40466f6c-03c1-4aa4-9d03-947168f4068c/manager/0.log" Nov 28 11:09:10 crc kubenswrapper[4838]: I1128 11:09:10.733740 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-5fcdb54b6bpf8p6_dd6ab766-6c66-4d3e-8089-9fe2faf6a28a/kube-rbac-proxy/0.log" Nov 28 11:09:10 crc kubenswrapper[4838]: I1128 11:09:10.759854 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-5fcdb54b6bpf8p6_dd6ab766-6c66-4d3e-8089-9fe2faf6a28a/manager/0.log" Nov 28 11:09:11 crc kubenswrapper[4838]: I1128 11:09:11.072623 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-index-8j4q4_c89c4ded-08e4-4d58-bb51-0f0e997b76a6/registry-server/0.log" Nov 28 11:09:11 crc kubenswrapper[4838]: I1128 11:09:11.116596 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-operator-5d66f99678-q8k4k_50baf569-8340-4264-8e08-28049728c9ad/operator/0.log" Nov 28 11:09:11 crc kubenswrapper[4838]: I1128 11:09:11.182199 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-56897c768d-jk9v2_eb245c1e-92f6-486e-be63-0093a22ed7b0/kube-rbac-proxy/0.log" Nov 28 11:09:11 crc kubenswrapper[4838]: I1128 11:09:11.290435 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-56897c768d-jk9v2_eb245c1e-92f6-486e-be63-0093a22ed7b0/manager/0.log" Nov 28 11:09:11 crc kubenswrapper[4838]: I1128 11:09:11.353171 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-57988cc5b5-t8tj5_f74a2c89-ae8d-428e-b8b2-d2d58e943f8e/kube-rbac-proxy/0.log" Nov 28 11:09:11 crc kubenswrapper[4838]: I1128 11:09:11.442037 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-57988cc5b5-t8tj5_f74a2c89-ae8d-428e-b8b2-d2d58e943f8e/manager/0.log" Nov 28 11:09:11 crc kubenswrapper[4838]: I1128 11:09:11.601960 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_rabbitmq-cluster-operator-manager-668c99d594-xj7cz_6979c02b-5bc1-4eec-aa05-086f449ffd93/operator/0.log" Nov 28 11:09:11 crc kubenswrapper[4838]: I1128 11:09:11.621954 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-d77b94747-2mxtr_fb9e8fa1-8798-424a-a435-daae465a8e79/kube-rbac-proxy/0.log" Nov 28 11:09:11 crc kubenswrapper[4838]: I1128 11:09:11.835039 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-76cc84c6bb-2xswp_5f3be357-f971-4dd1-bb7e-82098aaad7b4/kube-rbac-proxy/0.log" Nov 28 11:09:11 crc kubenswrapper[4838]: I1128 11:09:11.856609 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-d77b94747-2mxtr_fb9e8fa1-8798-424a-a435-daae465a8e79/manager/0.log" Nov 28 11:09:12 crc kubenswrapper[4838]: I1128 11:09:12.009973 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-76cc84c6bb-2xswp_5f3be357-f971-4dd1-bb7e-82098aaad7b4/manager/0.log" Nov 28 11:09:12 crc kubenswrapper[4838]: I1128 11:09:12.034369 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-5cd6c7f4c8-jrtsw_7b19f93b-ae7d-4e10-acce-53f0c65bbce0/manager/0.log" Nov 28 11:09:12 crc kubenswrapper[4838]: I1128 11:09:12.046841 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-5cd6c7f4c8-jrtsw_7b19f93b-ae7d-4e10-acce-53f0c65bbce0/kube-rbac-proxy/0.log" Nov 28 11:09:12 crc kubenswrapper[4838]: I1128 11:09:12.120119 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-manager-8599fd59b5-m2r97_397a8728-e2ae-4db0-8446-9044007df4e1/manager/0.log" Nov 28 11:09:12 crc kubenswrapper[4838]: I1128 11:09:12.205633 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-656dcb59d4-pqwfn_431c4d08-781a-4925-96cd-153997f72239/kube-rbac-proxy/0.log" Nov 28 11:09:12 crc kubenswrapper[4838]: I1128 11:09:12.222171 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-656dcb59d4-pqwfn_431c4d08-781a-4925-96cd-153997f72239/manager/0.log" Nov 28 11:09:23 crc kubenswrapper[4838]: I1128 11:09:23.939593 4838 patch_prober.go:28] interesting pod/machine-config-daemon-5dxdd container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 11:09:23 crc kubenswrapper[4838]: I1128 11:09:23.940253 4838 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 11:09:32 crc kubenswrapper[4838]: I1128 11:09:32.840130 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_control-plane-machine-set-operator-78cbb6b69f-vrzwk_502acc2d-a5e3-4240-b2fb-7f67b7518b82/control-plane-machine-set-operator/0.log" Nov 28 11:09:33 crc kubenswrapper[4838]: I1128 11:09:33.000905 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-bs85m_fb0f7dc9-74c6-4031-8edb-7b10c219df34/machine-api-operator/0.log" Nov 28 11:09:33 crc kubenswrapper[4838]: I1128 11:09:33.022048 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-bs85m_fb0f7dc9-74c6-4031-8edb-7b10c219df34/kube-rbac-proxy/0.log" Nov 28 11:09:48 crc kubenswrapper[4838]: I1128 11:09:48.945909 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-5b446d88c5-gcqpn_e850d813-cc68-49bd-aa4d-ab3271b36d41/cert-manager-controller/0.log" Nov 28 11:09:49 crc kubenswrapper[4838]: I1128 11:09:49.064584 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-cainjector-7f985d654d-pc7lt_34c85aea-53ac-4f8b-b4b7-a5262768ea9a/cert-manager-cainjector/0.log" Nov 28 11:09:49 crc kubenswrapper[4838]: I1128 11:09:49.096310 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-webhook-5655c58dd6-zldj2_863773f7-97a9-4bcc-8c5d-86b5533f1c6b/cert-manager-webhook/0.log" Nov 28 11:09:53 crc kubenswrapper[4838]: I1128 11:09:53.940113 4838 patch_prober.go:28] interesting pod/machine-config-daemon-5dxdd container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 11:09:53 crc kubenswrapper[4838]: I1128 11:09:53.940709 4838 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 11:10:03 crc kubenswrapper[4838]: I1128 11:10:03.909361 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-console-plugin-7fbb5f6569-qvrkm_2205c062-150d-43c9-8e91-9187c92a1908/nmstate-console-plugin/0.log" Nov 28 11:10:04 crc kubenswrapper[4838]: I1128 11:10:04.071224 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-handler-sgkvp_7d076e50-6cc5-4258-b334-faa9d4f1a3b4/nmstate-handler/0.log" Nov 28 11:10:04 crc kubenswrapper[4838]: I1128 11:10:04.148159 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-7f946cbc9-hrdrc_1190df35-2195-49c1-abb5-1a5e11626ec4/nmstate-metrics/0.log" Nov 28 11:10:04 crc kubenswrapper[4838]: I1128 11:10:04.172527 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-7f946cbc9-hrdrc_1190df35-2195-49c1-abb5-1a5e11626ec4/kube-rbac-proxy/0.log" Nov 28 11:10:04 crc kubenswrapper[4838]: I1128 11:10:04.261455 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-operator-5b5b58f5c8-9n28j_8726f2de-449f-4a3d-ae20-cf2e1f14abe2/nmstate-operator/0.log" Nov 28 11:10:04 crc kubenswrapper[4838]: I1128 11:10:04.362743 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-webhook-5f6d4c5ccb-pf4kk_27f91860-f5ee-4232-b298-bf97137a1d12/nmstate-webhook/0.log" Nov 28 11:10:20 crc kubenswrapper[4838]: I1128 11:10:20.885165 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-f8648f98b-p4xws_ef168536-335c-417d-b9d5-7dc2affb0b62/kube-rbac-proxy/0.log" Nov 28 11:10:21 crc kubenswrapper[4838]: I1128 11:10:21.002577 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-f8648f98b-p4xws_ef168536-335c-417d-b9d5-7dc2affb0b62/controller/0.log" Nov 28 11:10:21 crc kubenswrapper[4838]: I1128 11:10:21.077885 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-hdb2b_9e6ea3f2-6774-4bbb-b0fc-fcb8b6106b53/cp-frr-files/0.log" Nov 28 11:10:21 crc kubenswrapper[4838]: I1128 11:10:21.187307 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-hdb2b_9e6ea3f2-6774-4bbb-b0fc-fcb8b6106b53/cp-frr-files/0.log" Nov 28 11:10:21 crc kubenswrapper[4838]: I1128 11:10:21.207697 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-hdb2b_9e6ea3f2-6774-4bbb-b0fc-fcb8b6106b53/cp-reloader/0.log" Nov 28 11:10:21 crc kubenswrapper[4838]: I1128 11:10:21.221506 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-hdb2b_9e6ea3f2-6774-4bbb-b0fc-fcb8b6106b53/cp-metrics/0.log" Nov 28 11:10:21 crc kubenswrapper[4838]: I1128 11:10:21.285288 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-hdb2b_9e6ea3f2-6774-4bbb-b0fc-fcb8b6106b53/cp-reloader/0.log" Nov 28 11:10:21 crc kubenswrapper[4838]: I1128 11:10:21.463781 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-hdb2b_9e6ea3f2-6774-4bbb-b0fc-fcb8b6106b53/cp-reloader/0.log" Nov 28 11:10:21 crc kubenswrapper[4838]: I1128 11:10:21.485698 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-hdb2b_9e6ea3f2-6774-4bbb-b0fc-fcb8b6106b53/cp-metrics/0.log" Nov 28 11:10:21 crc kubenswrapper[4838]: I1128 11:10:21.488328 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-hdb2b_9e6ea3f2-6774-4bbb-b0fc-fcb8b6106b53/cp-frr-files/0.log" Nov 28 11:10:21 crc kubenswrapper[4838]: I1128 11:10:21.523793 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-hdb2b_9e6ea3f2-6774-4bbb-b0fc-fcb8b6106b53/cp-metrics/0.log" Nov 28 11:10:21 crc kubenswrapper[4838]: I1128 11:10:21.661071 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-hdb2b_9e6ea3f2-6774-4bbb-b0fc-fcb8b6106b53/cp-frr-files/0.log" Nov 28 11:10:21 crc kubenswrapper[4838]: I1128 11:10:21.688497 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-hdb2b_9e6ea3f2-6774-4bbb-b0fc-fcb8b6106b53/cp-reloader/0.log" Nov 28 11:10:21 crc kubenswrapper[4838]: I1128 11:10:21.716249 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-hdb2b_9e6ea3f2-6774-4bbb-b0fc-fcb8b6106b53/controller/0.log" Nov 28 11:10:21 crc kubenswrapper[4838]: I1128 11:10:21.719589 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-hdb2b_9e6ea3f2-6774-4bbb-b0fc-fcb8b6106b53/cp-metrics/0.log" Nov 28 11:10:21 crc kubenswrapper[4838]: I1128 11:10:21.898000 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-hdb2b_9e6ea3f2-6774-4bbb-b0fc-fcb8b6106b53/frr-metrics/0.log" Nov 28 11:10:21 crc kubenswrapper[4838]: I1128 11:10:21.921445 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-hdb2b_9e6ea3f2-6774-4bbb-b0fc-fcb8b6106b53/kube-rbac-proxy-frr/0.log" Nov 28 11:10:21 crc kubenswrapper[4838]: I1128 11:10:21.929334 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-hdb2b_9e6ea3f2-6774-4bbb-b0fc-fcb8b6106b53/kube-rbac-proxy/0.log" Nov 28 11:10:22 crc kubenswrapper[4838]: I1128 11:10:22.171268 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-hdb2b_9e6ea3f2-6774-4bbb-b0fc-fcb8b6106b53/reloader/0.log" Nov 28 11:10:22 crc kubenswrapper[4838]: I1128 11:10:22.181036 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-webhook-server-7fcb986d4-8frbg_8ef825e1-e9dc-4a29-94cd-722613098926/frr-k8s-webhook-server/0.log" Nov 28 11:10:22 crc kubenswrapper[4838]: I1128 11:10:22.386752 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-controller-manager-84ddbcdd65-dqrq8_3eb34f58-6fc9-4220-87a6-74090d0f1874/manager/0.log" Nov 28 11:10:22 crc kubenswrapper[4838]: I1128 11:10:22.606496 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-webhook-server-5bf7d9b684-tdkmz_4588ca95-c23d-4709-8160-ab77a17f858d/webhook-server/0.log" Nov 28 11:10:22 crc kubenswrapper[4838]: I1128 11:10:22.621410 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-xrbxd_7f764b03-48a1-45af-b406-40c60c1e912c/kube-rbac-proxy/0.log" Nov 28 11:10:23 crc kubenswrapper[4838]: I1128 11:10:23.198757 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-xrbxd_7f764b03-48a1-45af-b406-40c60c1e912c/speaker/0.log" Nov 28 11:10:23 crc kubenswrapper[4838]: I1128 11:10:23.367544 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-hdb2b_9e6ea3f2-6774-4bbb-b0fc-fcb8b6106b53/frr/0.log" Nov 28 11:10:23 crc kubenswrapper[4838]: I1128 11:10:23.939870 4838 patch_prober.go:28] interesting pod/machine-config-daemon-5dxdd container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 11:10:23 crc kubenswrapper[4838]: I1128 11:10:23.939932 4838 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 11:10:23 crc kubenswrapper[4838]: I1128 11:10:23.939978 4838 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" Nov 28 11:10:23 crc kubenswrapper[4838]: I1128 11:10:23.940707 4838 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"6deb95eccfb366ee3b0f8143f9aea63dc132d0fd50914fed82f0caf61f74a268"} pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 11:10:23 crc kubenswrapper[4838]: I1128 11:10:23.940777 4838 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" containerName="machine-config-daemon" containerID="cri-o://6deb95eccfb366ee3b0f8143f9aea63dc132d0fd50914fed82f0caf61f74a268" gracePeriod=600 Nov 28 11:10:24 crc kubenswrapper[4838]: I1128 11:10:24.583860 4838 generic.go:334] "Generic (PLEG): container finished" podID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" containerID="6deb95eccfb366ee3b0f8143f9aea63dc132d0fd50914fed82f0caf61f74a268" exitCode=0 Nov 28 11:10:24 crc kubenswrapper[4838]: I1128 11:10:24.583947 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" event={"ID":"5c3daa53-8c4e-4e30-aeba-146602dd45cd","Type":"ContainerDied","Data":"6deb95eccfb366ee3b0f8143f9aea63dc132d0fd50914fed82f0caf61f74a268"} Nov 28 11:10:24 crc kubenswrapper[4838]: I1128 11:10:24.584366 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" event={"ID":"5c3daa53-8c4e-4e30-aeba-146602dd45cd","Type":"ContainerStarted","Data":"b8237eaaf27ec7883da43a41ebf9c501dd653ec36cf7eded33b05d3eea15582e"} Nov 28 11:10:24 crc kubenswrapper[4838]: I1128 11:10:24.584405 4838 scope.go:117] "RemoveContainer" containerID="616c42b063d53e2f1fdae5c99959bb3a7ba5c7c89c51bc64c2a6b637b60b6dbb" Nov 28 11:10:39 crc kubenswrapper[4838]: I1128 11:10:39.446838 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f6zxql_607ae4a0-90c8-48ff-afa9-21eb5b545fce/util/0.log" Nov 28 11:10:39 crc kubenswrapper[4838]: I1128 11:10:39.682516 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f6zxql_607ae4a0-90c8-48ff-afa9-21eb5b545fce/pull/0.log" Nov 28 11:10:39 crc kubenswrapper[4838]: I1128 11:10:39.707743 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f6zxql_607ae4a0-90c8-48ff-afa9-21eb5b545fce/util/0.log" Nov 28 11:10:39 crc kubenswrapper[4838]: I1128 11:10:39.708580 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f6zxql_607ae4a0-90c8-48ff-afa9-21eb5b545fce/pull/0.log" Nov 28 11:10:39 crc kubenswrapper[4838]: I1128 11:10:39.877194 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f6zxql_607ae4a0-90c8-48ff-afa9-21eb5b545fce/util/0.log" Nov 28 11:10:39 crc kubenswrapper[4838]: I1128 11:10:39.879143 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f6zxql_607ae4a0-90c8-48ff-afa9-21eb5b545fce/pull/0.log" Nov 28 11:10:39 crc kubenswrapper[4838]: I1128 11:10:39.918244 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f6zxql_607ae4a0-90c8-48ff-afa9-21eb5b545fce/extract/0.log" Nov 28 11:10:40 crc kubenswrapper[4838]: I1128 11:10:40.042985 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f8375dl7_29970a13-5b66-4cf6-8515-3b4bd570dd2f/util/0.log" Nov 28 11:10:40 crc kubenswrapper[4838]: I1128 11:10:40.227856 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f8375dl7_29970a13-5b66-4cf6-8515-3b4bd570dd2f/pull/0.log" Nov 28 11:10:40 crc kubenswrapper[4838]: I1128 11:10:40.242421 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f8375dl7_29970a13-5b66-4cf6-8515-3b4bd570dd2f/util/0.log" Nov 28 11:10:40 crc kubenswrapper[4838]: I1128 11:10:40.247233 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f8375dl7_29970a13-5b66-4cf6-8515-3b4bd570dd2f/pull/0.log" Nov 28 11:10:40 crc kubenswrapper[4838]: I1128 11:10:40.798040 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f8375dl7_29970a13-5b66-4cf6-8515-3b4bd570dd2f/pull/0.log" Nov 28 11:10:40 crc kubenswrapper[4838]: I1128 11:10:40.851971 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f8375dl7_29970a13-5b66-4cf6-8515-3b4bd570dd2f/extract/0.log" Nov 28 11:10:40 crc kubenswrapper[4838]: I1128 11:10:40.859632 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f8375dl7_29970a13-5b66-4cf6-8515-3b4bd570dd2f/util/0.log" Nov 28 11:10:41 crc kubenswrapper[4838]: I1128 11:10:41.002906 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-j64zj_c38d1237-07ae-448a-9a53-5432a944fd83/extract-utilities/0.log" Nov 28 11:10:41 crc kubenswrapper[4838]: I1128 11:10:41.143930 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-j64zj_c38d1237-07ae-448a-9a53-5432a944fd83/extract-utilities/0.log" Nov 28 11:10:41 crc kubenswrapper[4838]: I1128 11:10:41.227864 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-j64zj_c38d1237-07ae-448a-9a53-5432a944fd83/extract-content/0.log" Nov 28 11:10:41 crc kubenswrapper[4838]: I1128 11:10:41.378632 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-j64zj_c38d1237-07ae-448a-9a53-5432a944fd83/extract-utilities/0.log" Nov 28 11:10:41 crc kubenswrapper[4838]: I1128 11:10:41.399526 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-j64zj_c38d1237-07ae-448a-9a53-5432a944fd83/extract-content/0.log" Nov 28 11:10:41 crc kubenswrapper[4838]: I1128 11:10:41.482700 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-j64zj_c38d1237-07ae-448a-9a53-5432a944fd83/extract-content/0.log" Nov 28 11:10:41 crc kubenswrapper[4838]: I1128 11:10:41.747043 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-n4cg9_9afd2484-54f9-4dd9-b081-6537c075864f/extract-utilities/0.log" Nov 28 11:10:41 crc kubenswrapper[4838]: I1128 11:10:41.874058 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-n4cg9_9afd2484-54f9-4dd9-b081-6537c075864f/extract-utilities/0.log" Nov 28 11:10:41 crc kubenswrapper[4838]: I1128 11:10:41.904987 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-n4cg9_9afd2484-54f9-4dd9-b081-6537c075864f/extract-content/0.log" Nov 28 11:10:41 crc kubenswrapper[4838]: I1128 11:10:41.971960 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-n4cg9_9afd2484-54f9-4dd9-b081-6537c075864f/extract-content/0.log" Nov 28 11:10:42 crc kubenswrapper[4838]: I1128 11:10:42.110256 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-j64zj_c38d1237-07ae-448a-9a53-5432a944fd83/registry-server/0.log" Nov 28 11:10:42 crc kubenswrapper[4838]: I1128 11:10:42.146513 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-n4cg9_9afd2484-54f9-4dd9-b081-6537c075864f/extract-utilities/0.log" Nov 28 11:10:42 crc kubenswrapper[4838]: I1128 11:10:42.178441 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-n4cg9_9afd2484-54f9-4dd9-b081-6537c075864f/extract-content/0.log" Nov 28 11:10:42 crc kubenswrapper[4838]: I1128 11:10:42.382419 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-6mmw4_b0c9680e-7b0a-47a9-87dc-4da8cfbfce77/marketplace-operator/0.log" Nov 28 11:10:42 crc kubenswrapper[4838]: I1128 11:10:42.595109 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-trs27_1b0a5899-5f8c-42e9-b686-cbc3ae3b33c8/extract-utilities/0.log" Nov 28 11:10:42 crc kubenswrapper[4838]: I1128 11:10:42.785737 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-n4cg9_9afd2484-54f9-4dd9-b081-6537c075864f/registry-server/0.log" Nov 28 11:10:42 crc kubenswrapper[4838]: I1128 11:10:42.812898 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-trs27_1b0a5899-5f8c-42e9-b686-cbc3ae3b33c8/extract-content/0.log" Nov 28 11:10:42 crc kubenswrapper[4838]: I1128 11:10:42.816829 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-trs27_1b0a5899-5f8c-42e9-b686-cbc3ae3b33c8/extract-content/0.log" Nov 28 11:10:42 crc kubenswrapper[4838]: I1128 11:10:42.840894 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-trs27_1b0a5899-5f8c-42e9-b686-cbc3ae3b33c8/extract-utilities/0.log" Nov 28 11:10:42 crc kubenswrapper[4838]: I1128 11:10:42.939817 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-trs27_1b0a5899-5f8c-42e9-b686-cbc3ae3b33c8/extract-utilities/0.log" Nov 28 11:10:42 crc kubenswrapper[4838]: I1128 11:10:42.960243 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-trs27_1b0a5899-5f8c-42e9-b686-cbc3ae3b33c8/extract-content/0.log" Nov 28 11:10:43 crc kubenswrapper[4838]: I1128 11:10:43.097034 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-trs27_1b0a5899-5f8c-42e9-b686-cbc3ae3b33c8/registry-server/0.log" Nov 28 11:10:43 crc kubenswrapper[4838]: I1128 11:10:43.100550 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-cmcj8_01087280-c77b-4764-91ba-468b21f32427/extract-utilities/0.log" Nov 28 11:10:43 crc kubenswrapper[4838]: I1128 11:10:43.446776 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-cmcj8_01087280-c77b-4764-91ba-468b21f32427/extract-utilities/0.log" Nov 28 11:10:43 crc kubenswrapper[4838]: I1128 11:10:43.476971 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-cmcj8_01087280-c77b-4764-91ba-468b21f32427/extract-content/0.log" Nov 28 11:10:43 crc kubenswrapper[4838]: I1128 11:10:43.507058 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-cmcj8_01087280-c77b-4764-91ba-468b21f32427/extract-content/0.log" Nov 28 11:10:43 crc kubenswrapper[4838]: I1128 11:10:43.584691 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-cmcj8_01087280-c77b-4764-91ba-468b21f32427/extract-utilities/0.log" Nov 28 11:10:43 crc kubenswrapper[4838]: I1128 11:10:43.623078 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-cmcj8_01087280-c77b-4764-91ba-468b21f32427/extract-content/0.log" Nov 28 11:10:44 crc kubenswrapper[4838]: I1128 11:10:44.077141 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-cmcj8_01087280-c77b-4764-91ba-468b21f32427/registry-server/0.log" Nov 28 11:10:47 crc kubenswrapper[4838]: I1128 11:10:47.949694 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-sv4qj"] Nov 28 11:10:47 crc kubenswrapper[4838]: E1128 11:10:47.952874 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a453c56f-0a7e-413d-b0e0-38245cc7c304" containerName="registry-server" Nov 28 11:10:47 crc kubenswrapper[4838]: I1128 11:10:47.953074 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="a453c56f-0a7e-413d-b0e0-38245cc7c304" containerName="registry-server" Nov 28 11:10:47 crc kubenswrapper[4838]: E1128 11:10:47.953246 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="371702db-cfa9-469a-b1cb-17eb5aebdada" containerName="container-00" Nov 28 11:10:47 crc kubenswrapper[4838]: I1128 11:10:47.953374 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="371702db-cfa9-469a-b1cb-17eb5aebdada" containerName="container-00" Nov 28 11:10:47 crc kubenswrapper[4838]: E1128 11:10:47.953521 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a453c56f-0a7e-413d-b0e0-38245cc7c304" containerName="extract-utilities" Nov 28 11:10:47 crc kubenswrapper[4838]: I1128 11:10:47.953642 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="a453c56f-0a7e-413d-b0e0-38245cc7c304" containerName="extract-utilities" Nov 28 11:10:47 crc kubenswrapper[4838]: E1128 11:10:47.953841 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a453c56f-0a7e-413d-b0e0-38245cc7c304" containerName="extract-content" Nov 28 11:10:47 crc kubenswrapper[4838]: I1128 11:10:47.953988 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="a453c56f-0a7e-413d-b0e0-38245cc7c304" containerName="extract-content" Nov 28 11:10:47 crc kubenswrapper[4838]: I1128 11:10:47.954481 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="371702db-cfa9-469a-b1cb-17eb5aebdada" containerName="container-00" Nov 28 11:10:47 crc kubenswrapper[4838]: I1128 11:10:47.954651 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="a453c56f-0a7e-413d-b0e0-38245cc7c304" containerName="registry-server" Nov 28 11:10:47 crc kubenswrapper[4838]: I1128 11:10:47.957352 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-sv4qj" Nov 28 11:10:47 crc kubenswrapper[4838]: I1128 11:10:47.990530 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-sv4qj"] Nov 28 11:10:48 crc kubenswrapper[4838]: I1128 11:10:48.028363 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e3d5ed34-1560-48e6-9ce6-3603dd91f6c6-utilities\") pod \"certified-operators-sv4qj\" (UID: \"e3d5ed34-1560-48e6-9ce6-3603dd91f6c6\") " pod="openshift-marketplace/certified-operators-sv4qj" Nov 28 11:10:48 crc kubenswrapper[4838]: I1128 11:10:48.028542 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b9p67\" (UniqueName: \"kubernetes.io/projected/e3d5ed34-1560-48e6-9ce6-3603dd91f6c6-kube-api-access-b9p67\") pod \"certified-operators-sv4qj\" (UID: \"e3d5ed34-1560-48e6-9ce6-3603dd91f6c6\") " pod="openshift-marketplace/certified-operators-sv4qj" Nov 28 11:10:48 crc kubenswrapper[4838]: I1128 11:10:48.028572 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e3d5ed34-1560-48e6-9ce6-3603dd91f6c6-catalog-content\") pod \"certified-operators-sv4qj\" (UID: \"e3d5ed34-1560-48e6-9ce6-3603dd91f6c6\") " pod="openshift-marketplace/certified-operators-sv4qj" Nov 28 11:10:48 crc kubenswrapper[4838]: I1128 11:10:48.130036 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b9p67\" (UniqueName: \"kubernetes.io/projected/e3d5ed34-1560-48e6-9ce6-3603dd91f6c6-kube-api-access-b9p67\") pod \"certified-operators-sv4qj\" (UID: \"e3d5ed34-1560-48e6-9ce6-3603dd91f6c6\") " pod="openshift-marketplace/certified-operators-sv4qj" Nov 28 11:10:48 crc kubenswrapper[4838]: I1128 11:10:48.130092 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e3d5ed34-1560-48e6-9ce6-3603dd91f6c6-catalog-content\") pod \"certified-operators-sv4qj\" (UID: \"e3d5ed34-1560-48e6-9ce6-3603dd91f6c6\") " pod="openshift-marketplace/certified-operators-sv4qj" Nov 28 11:10:48 crc kubenswrapper[4838]: I1128 11:10:48.130215 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e3d5ed34-1560-48e6-9ce6-3603dd91f6c6-utilities\") pod \"certified-operators-sv4qj\" (UID: \"e3d5ed34-1560-48e6-9ce6-3603dd91f6c6\") " pod="openshift-marketplace/certified-operators-sv4qj" Nov 28 11:10:48 crc kubenswrapper[4838]: I1128 11:10:48.130881 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e3d5ed34-1560-48e6-9ce6-3603dd91f6c6-utilities\") pod \"certified-operators-sv4qj\" (UID: \"e3d5ed34-1560-48e6-9ce6-3603dd91f6c6\") " pod="openshift-marketplace/certified-operators-sv4qj" Nov 28 11:10:48 crc kubenswrapper[4838]: I1128 11:10:48.131247 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e3d5ed34-1560-48e6-9ce6-3603dd91f6c6-catalog-content\") pod \"certified-operators-sv4qj\" (UID: \"e3d5ed34-1560-48e6-9ce6-3603dd91f6c6\") " pod="openshift-marketplace/certified-operators-sv4qj" Nov 28 11:10:48 crc kubenswrapper[4838]: I1128 11:10:48.170224 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b9p67\" (UniqueName: \"kubernetes.io/projected/e3d5ed34-1560-48e6-9ce6-3603dd91f6c6-kube-api-access-b9p67\") pod \"certified-operators-sv4qj\" (UID: \"e3d5ed34-1560-48e6-9ce6-3603dd91f6c6\") " pod="openshift-marketplace/certified-operators-sv4qj" Nov 28 11:10:48 crc kubenswrapper[4838]: I1128 11:10:48.282384 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-sv4qj" Nov 28 11:10:48 crc kubenswrapper[4838]: I1128 11:10:48.849320 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-sv4qj"] Nov 28 11:10:49 crc kubenswrapper[4838]: I1128 11:10:49.900509 4838 generic.go:334] "Generic (PLEG): container finished" podID="e3d5ed34-1560-48e6-9ce6-3603dd91f6c6" containerID="c62c0ee357f35ee040f5143a9ce628811688784cafceaeadc29a302492d942ce" exitCode=0 Nov 28 11:10:49 crc kubenswrapper[4838]: I1128 11:10:49.900644 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-sv4qj" event={"ID":"e3d5ed34-1560-48e6-9ce6-3603dd91f6c6","Type":"ContainerDied","Data":"c62c0ee357f35ee040f5143a9ce628811688784cafceaeadc29a302492d942ce"} Nov 28 11:10:49 crc kubenswrapper[4838]: I1128 11:10:49.901114 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-sv4qj" event={"ID":"e3d5ed34-1560-48e6-9ce6-3603dd91f6c6","Type":"ContainerStarted","Data":"a656490caaedf214c83106fb461909d35605d6741b38654930c6d00f1a70ff8c"} Nov 28 11:10:49 crc kubenswrapper[4838]: I1128 11:10:49.903404 4838 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 28 11:10:51 crc kubenswrapper[4838]: I1128 11:10:51.920980 4838 generic.go:334] "Generic (PLEG): container finished" podID="e3d5ed34-1560-48e6-9ce6-3603dd91f6c6" containerID="45de502b173e61af82c18794ea3632f4400d7dd18265526b638aef5b67a05e1b" exitCode=0 Nov 28 11:10:51 crc kubenswrapper[4838]: I1128 11:10:51.921222 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-sv4qj" event={"ID":"e3d5ed34-1560-48e6-9ce6-3603dd91f6c6","Type":"ContainerDied","Data":"45de502b173e61af82c18794ea3632f4400d7dd18265526b638aef5b67a05e1b"} Nov 28 11:10:52 crc kubenswrapper[4838]: I1128 11:10:52.936833 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-sv4qj" event={"ID":"e3d5ed34-1560-48e6-9ce6-3603dd91f6c6","Type":"ContainerStarted","Data":"3a3876c5f3289dc625eb294800c409b3fb920ef21729efa9a8658c301bf16a70"} Nov 28 11:10:52 crc kubenswrapper[4838]: I1128 11:10:52.963184 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-sv4qj" podStartSLOduration=3.452190441 podStartE2EDuration="5.963164701s" podCreationTimestamp="2025-11-28 11:10:47 +0000 UTC" firstStartedPulling="2025-11-28 11:10:49.903006464 +0000 UTC m=+4421.601980664" lastFinishedPulling="2025-11-28 11:10:52.413980714 +0000 UTC m=+4424.112954924" observedRunningTime="2025-11-28 11:10:52.95941488 +0000 UTC m=+4424.658389080" watchObservedRunningTime="2025-11-28 11:10:52.963164701 +0000 UTC m=+4424.662138871" Nov 28 11:10:58 crc kubenswrapper[4838]: I1128 11:10:58.283295 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-sv4qj" Nov 28 11:10:58 crc kubenswrapper[4838]: I1128 11:10:58.283779 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-sv4qj" Nov 28 11:10:58 crc kubenswrapper[4838]: I1128 11:10:58.355590 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-sv4qj" Nov 28 11:10:59 crc kubenswrapper[4838]: I1128 11:10:59.047236 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-sv4qj" Nov 28 11:10:59 crc kubenswrapper[4838]: I1128 11:10:59.715568 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-sv4qj"] Nov 28 11:11:01 crc kubenswrapper[4838]: I1128 11:11:01.020062 4838 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-sv4qj" podUID="e3d5ed34-1560-48e6-9ce6-3603dd91f6c6" containerName="registry-server" containerID="cri-o://3a3876c5f3289dc625eb294800c409b3fb920ef21729efa9a8658c301bf16a70" gracePeriod=2 Nov 28 11:11:02 crc kubenswrapper[4838]: I1128 11:11:02.026911 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-sv4qj" Nov 28 11:11:02 crc kubenswrapper[4838]: I1128 11:11:02.028522 4838 generic.go:334] "Generic (PLEG): container finished" podID="e3d5ed34-1560-48e6-9ce6-3603dd91f6c6" containerID="3a3876c5f3289dc625eb294800c409b3fb920ef21729efa9a8658c301bf16a70" exitCode=0 Nov 28 11:11:02 crc kubenswrapper[4838]: I1128 11:11:02.028871 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-sv4qj" event={"ID":"e3d5ed34-1560-48e6-9ce6-3603dd91f6c6","Type":"ContainerDied","Data":"3a3876c5f3289dc625eb294800c409b3fb920ef21729efa9a8658c301bf16a70"} Nov 28 11:11:02 crc kubenswrapper[4838]: I1128 11:11:02.028996 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-sv4qj" event={"ID":"e3d5ed34-1560-48e6-9ce6-3603dd91f6c6","Type":"ContainerDied","Data":"a656490caaedf214c83106fb461909d35605d6741b38654930c6d00f1a70ff8c"} Nov 28 11:11:02 crc kubenswrapper[4838]: I1128 11:11:02.029025 4838 scope.go:117] "RemoveContainer" containerID="3a3876c5f3289dc625eb294800c409b3fb920ef21729efa9a8658c301bf16a70" Nov 28 11:11:02 crc kubenswrapper[4838]: I1128 11:11:02.054689 4838 scope.go:117] "RemoveContainer" containerID="45de502b173e61af82c18794ea3632f4400d7dd18265526b638aef5b67a05e1b" Nov 28 11:11:02 crc kubenswrapper[4838]: I1128 11:11:02.157986 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e3d5ed34-1560-48e6-9ce6-3603dd91f6c6-catalog-content\") pod \"e3d5ed34-1560-48e6-9ce6-3603dd91f6c6\" (UID: \"e3d5ed34-1560-48e6-9ce6-3603dd91f6c6\") " Nov 28 11:11:02 crc kubenswrapper[4838]: I1128 11:11:02.158181 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-b9p67\" (UniqueName: \"kubernetes.io/projected/e3d5ed34-1560-48e6-9ce6-3603dd91f6c6-kube-api-access-b9p67\") pod \"e3d5ed34-1560-48e6-9ce6-3603dd91f6c6\" (UID: \"e3d5ed34-1560-48e6-9ce6-3603dd91f6c6\") " Nov 28 11:11:02 crc kubenswrapper[4838]: I1128 11:11:02.158256 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e3d5ed34-1560-48e6-9ce6-3603dd91f6c6-utilities\") pod \"e3d5ed34-1560-48e6-9ce6-3603dd91f6c6\" (UID: \"e3d5ed34-1560-48e6-9ce6-3603dd91f6c6\") " Nov 28 11:11:02 crc kubenswrapper[4838]: I1128 11:11:02.159585 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e3d5ed34-1560-48e6-9ce6-3603dd91f6c6-utilities" (OuterVolumeSpecName: "utilities") pod "e3d5ed34-1560-48e6-9ce6-3603dd91f6c6" (UID: "e3d5ed34-1560-48e6-9ce6-3603dd91f6c6"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 11:11:02 crc kubenswrapper[4838]: I1128 11:11:02.223955 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e3d5ed34-1560-48e6-9ce6-3603dd91f6c6-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "e3d5ed34-1560-48e6-9ce6-3603dd91f6c6" (UID: "e3d5ed34-1560-48e6-9ce6-3603dd91f6c6"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 11:11:02 crc kubenswrapper[4838]: I1128 11:11:02.260806 4838 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e3d5ed34-1560-48e6-9ce6-3603dd91f6c6-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 11:11:02 crc kubenswrapper[4838]: I1128 11:11:02.260834 4838 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e3d5ed34-1560-48e6-9ce6-3603dd91f6c6-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 11:11:02 crc kubenswrapper[4838]: I1128 11:11:02.733983 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e3d5ed34-1560-48e6-9ce6-3603dd91f6c6-kube-api-access-b9p67" (OuterVolumeSpecName: "kube-api-access-b9p67") pod "e3d5ed34-1560-48e6-9ce6-3603dd91f6c6" (UID: "e3d5ed34-1560-48e6-9ce6-3603dd91f6c6"). InnerVolumeSpecName "kube-api-access-b9p67". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:11:02 crc kubenswrapper[4838]: I1128 11:11:02.747858 4838 scope.go:117] "RemoveContainer" containerID="c62c0ee357f35ee040f5143a9ce628811688784cafceaeadc29a302492d942ce" Nov 28 11:11:02 crc kubenswrapper[4838]: I1128 11:11:02.772151 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-b9p67\" (UniqueName: \"kubernetes.io/projected/e3d5ed34-1560-48e6-9ce6-3603dd91f6c6-kube-api-access-b9p67\") on node \"crc\" DevicePath \"\"" Nov 28 11:11:02 crc kubenswrapper[4838]: I1128 11:11:02.837529 4838 scope.go:117] "RemoveContainer" containerID="3a3876c5f3289dc625eb294800c409b3fb920ef21729efa9a8658c301bf16a70" Nov 28 11:11:02 crc kubenswrapper[4838]: E1128 11:11:02.838176 4838 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3a3876c5f3289dc625eb294800c409b3fb920ef21729efa9a8658c301bf16a70\": container with ID starting with 3a3876c5f3289dc625eb294800c409b3fb920ef21729efa9a8658c301bf16a70 not found: ID does not exist" containerID="3a3876c5f3289dc625eb294800c409b3fb920ef21729efa9a8658c301bf16a70" Nov 28 11:11:02 crc kubenswrapper[4838]: I1128 11:11:02.838361 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3a3876c5f3289dc625eb294800c409b3fb920ef21729efa9a8658c301bf16a70"} err="failed to get container status \"3a3876c5f3289dc625eb294800c409b3fb920ef21729efa9a8658c301bf16a70\": rpc error: code = NotFound desc = could not find container \"3a3876c5f3289dc625eb294800c409b3fb920ef21729efa9a8658c301bf16a70\": container with ID starting with 3a3876c5f3289dc625eb294800c409b3fb920ef21729efa9a8658c301bf16a70 not found: ID does not exist" Nov 28 11:11:02 crc kubenswrapper[4838]: I1128 11:11:02.838450 4838 scope.go:117] "RemoveContainer" containerID="45de502b173e61af82c18794ea3632f4400d7dd18265526b638aef5b67a05e1b" Nov 28 11:11:02 crc kubenswrapper[4838]: E1128 11:11:02.838852 4838 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"45de502b173e61af82c18794ea3632f4400d7dd18265526b638aef5b67a05e1b\": container with ID starting with 45de502b173e61af82c18794ea3632f4400d7dd18265526b638aef5b67a05e1b not found: ID does not exist" containerID="45de502b173e61af82c18794ea3632f4400d7dd18265526b638aef5b67a05e1b" Nov 28 11:11:02 crc kubenswrapper[4838]: I1128 11:11:02.838893 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"45de502b173e61af82c18794ea3632f4400d7dd18265526b638aef5b67a05e1b"} err="failed to get container status \"45de502b173e61af82c18794ea3632f4400d7dd18265526b638aef5b67a05e1b\": rpc error: code = NotFound desc = could not find container \"45de502b173e61af82c18794ea3632f4400d7dd18265526b638aef5b67a05e1b\": container with ID starting with 45de502b173e61af82c18794ea3632f4400d7dd18265526b638aef5b67a05e1b not found: ID does not exist" Nov 28 11:11:02 crc kubenswrapper[4838]: I1128 11:11:02.838919 4838 scope.go:117] "RemoveContainer" containerID="c62c0ee357f35ee040f5143a9ce628811688784cafceaeadc29a302492d942ce" Nov 28 11:11:02 crc kubenswrapper[4838]: E1128 11:11:02.839158 4838 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c62c0ee357f35ee040f5143a9ce628811688784cafceaeadc29a302492d942ce\": container with ID starting with c62c0ee357f35ee040f5143a9ce628811688784cafceaeadc29a302492d942ce not found: ID does not exist" containerID="c62c0ee357f35ee040f5143a9ce628811688784cafceaeadc29a302492d942ce" Nov 28 11:11:02 crc kubenswrapper[4838]: I1128 11:11:02.839189 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c62c0ee357f35ee040f5143a9ce628811688784cafceaeadc29a302492d942ce"} err="failed to get container status \"c62c0ee357f35ee040f5143a9ce628811688784cafceaeadc29a302492d942ce\": rpc error: code = NotFound desc = could not find container \"c62c0ee357f35ee040f5143a9ce628811688784cafceaeadc29a302492d942ce\": container with ID starting with c62c0ee357f35ee040f5143a9ce628811688784cafceaeadc29a302492d942ce not found: ID does not exist" Nov 28 11:11:03 crc kubenswrapper[4838]: I1128 11:11:03.041266 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-sv4qj" Nov 28 11:11:03 crc kubenswrapper[4838]: I1128 11:11:03.109655 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-sv4qj"] Nov 28 11:11:03 crc kubenswrapper[4838]: I1128 11:11:03.119328 4838 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-sv4qj"] Nov 28 11:11:04 crc kubenswrapper[4838]: I1128 11:11:04.596478 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e3d5ed34-1560-48e6-9ce6-3603dd91f6c6" path="/var/lib/kubelet/pods/e3d5ed34-1560-48e6-9ce6-3603dd91f6c6/volumes" Nov 28 11:11:22 crc kubenswrapper[4838]: E1128 11:11:22.512529 4838 upgradeaware.go:427] Error proxying data from client to backend: readfrom tcp 38.102.83.65:34850->38.102.83.65:35709: write tcp 38.102.83.65:34850->38.102.83.65:35709: write: broken pipe Nov 28 11:12:33 crc kubenswrapper[4838]: I1128 11:12:33.127330 4838 generic.go:334] "Generic (PLEG): container finished" podID="9a7f2e13-2522-459b-b286-862bd963b3c0" containerID="6631c11dec99d433705087191b0248fb087bdddde75c680f57e54cba44ac87f5" exitCode=0 Nov 28 11:12:33 crc kubenswrapper[4838]: I1128 11:12:33.127410 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-7sfv7/must-gather-fhhqh" event={"ID":"9a7f2e13-2522-459b-b286-862bd963b3c0","Type":"ContainerDied","Data":"6631c11dec99d433705087191b0248fb087bdddde75c680f57e54cba44ac87f5"} Nov 28 11:12:33 crc kubenswrapper[4838]: I1128 11:12:33.129144 4838 scope.go:117] "RemoveContainer" containerID="6631c11dec99d433705087191b0248fb087bdddde75c680f57e54cba44ac87f5" Nov 28 11:12:33 crc kubenswrapper[4838]: I1128 11:12:33.958013 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-7sfv7_must-gather-fhhqh_9a7f2e13-2522-459b-b286-862bd963b3c0/gather/0.log" Nov 28 11:12:42 crc kubenswrapper[4838]: I1128 11:12:42.604765 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-7sfv7/must-gather-fhhqh"] Nov 28 11:12:42 crc kubenswrapper[4838]: I1128 11:12:42.605444 4838 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-must-gather-7sfv7/must-gather-fhhqh" podUID="9a7f2e13-2522-459b-b286-862bd963b3c0" containerName="copy" containerID="cri-o://b77b217163030e413da14c958372fdd9664d8625b8ecb50d337c048a42da2f09" gracePeriod=2 Nov 28 11:12:42 crc kubenswrapper[4838]: I1128 11:12:42.611571 4838 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-7sfv7/must-gather-fhhqh"] Nov 28 11:12:43 crc kubenswrapper[4838]: I1128 11:12:43.154583 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-7sfv7_must-gather-fhhqh_9a7f2e13-2522-459b-b286-862bd963b3c0/copy/0.log" Nov 28 11:12:43 crc kubenswrapper[4838]: I1128 11:12:43.155468 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-7sfv7/must-gather-fhhqh" Nov 28 11:12:43 crc kubenswrapper[4838]: I1128 11:12:43.241150 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-khrpr\" (UniqueName: \"kubernetes.io/projected/9a7f2e13-2522-459b-b286-862bd963b3c0-kube-api-access-khrpr\") pod \"9a7f2e13-2522-459b-b286-862bd963b3c0\" (UID: \"9a7f2e13-2522-459b-b286-862bd963b3c0\") " Nov 28 11:12:43 crc kubenswrapper[4838]: I1128 11:12:43.249836 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-7sfv7_must-gather-fhhqh_9a7f2e13-2522-459b-b286-862bd963b3c0/copy/0.log" Nov 28 11:12:43 crc kubenswrapper[4838]: I1128 11:12:43.253185 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9a7f2e13-2522-459b-b286-862bd963b3c0-kube-api-access-khrpr" (OuterVolumeSpecName: "kube-api-access-khrpr") pod "9a7f2e13-2522-459b-b286-862bd963b3c0" (UID: "9a7f2e13-2522-459b-b286-862bd963b3c0"). InnerVolumeSpecName "kube-api-access-khrpr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:12:43 crc kubenswrapper[4838]: I1128 11:12:43.253220 4838 generic.go:334] "Generic (PLEG): container finished" podID="9a7f2e13-2522-459b-b286-862bd963b3c0" containerID="b77b217163030e413da14c958372fdd9664d8625b8ecb50d337c048a42da2f09" exitCode=143 Nov 28 11:12:43 crc kubenswrapper[4838]: I1128 11:12:43.253270 4838 scope.go:117] "RemoveContainer" containerID="b77b217163030e413da14c958372fdd9664d8625b8ecb50d337c048a42da2f09" Nov 28 11:12:43 crc kubenswrapper[4838]: I1128 11:12:43.253666 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-7sfv7/must-gather-fhhqh" Nov 28 11:12:43 crc kubenswrapper[4838]: I1128 11:12:43.304183 4838 scope.go:117] "RemoveContainer" containerID="6631c11dec99d433705087191b0248fb087bdddde75c680f57e54cba44ac87f5" Nov 28 11:12:43 crc kubenswrapper[4838]: I1128 11:12:43.342979 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/9a7f2e13-2522-459b-b286-862bd963b3c0-must-gather-output\") pod \"9a7f2e13-2522-459b-b286-862bd963b3c0\" (UID: \"9a7f2e13-2522-459b-b286-862bd963b3c0\") " Nov 28 11:12:43 crc kubenswrapper[4838]: I1128 11:12:43.343997 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-khrpr\" (UniqueName: \"kubernetes.io/projected/9a7f2e13-2522-459b-b286-862bd963b3c0-kube-api-access-khrpr\") on node \"crc\" DevicePath \"\"" Nov 28 11:12:43 crc kubenswrapper[4838]: I1128 11:12:43.397466 4838 scope.go:117] "RemoveContainer" containerID="b77b217163030e413da14c958372fdd9664d8625b8ecb50d337c048a42da2f09" Nov 28 11:12:43 crc kubenswrapper[4838]: E1128 11:12:43.399070 4838 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b77b217163030e413da14c958372fdd9664d8625b8ecb50d337c048a42da2f09\": container with ID starting with b77b217163030e413da14c958372fdd9664d8625b8ecb50d337c048a42da2f09 not found: ID does not exist" containerID="b77b217163030e413da14c958372fdd9664d8625b8ecb50d337c048a42da2f09" Nov 28 11:12:43 crc kubenswrapper[4838]: I1128 11:12:43.399246 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b77b217163030e413da14c958372fdd9664d8625b8ecb50d337c048a42da2f09"} err="failed to get container status \"b77b217163030e413da14c958372fdd9664d8625b8ecb50d337c048a42da2f09\": rpc error: code = NotFound desc = could not find container \"b77b217163030e413da14c958372fdd9664d8625b8ecb50d337c048a42da2f09\": container with ID starting with b77b217163030e413da14c958372fdd9664d8625b8ecb50d337c048a42da2f09 not found: ID does not exist" Nov 28 11:12:43 crc kubenswrapper[4838]: I1128 11:12:43.399540 4838 scope.go:117] "RemoveContainer" containerID="6631c11dec99d433705087191b0248fb087bdddde75c680f57e54cba44ac87f5" Nov 28 11:12:43 crc kubenswrapper[4838]: E1128 11:12:43.400030 4838 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6631c11dec99d433705087191b0248fb087bdddde75c680f57e54cba44ac87f5\": container with ID starting with 6631c11dec99d433705087191b0248fb087bdddde75c680f57e54cba44ac87f5 not found: ID does not exist" containerID="6631c11dec99d433705087191b0248fb087bdddde75c680f57e54cba44ac87f5" Nov 28 11:12:43 crc kubenswrapper[4838]: I1128 11:12:43.400073 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6631c11dec99d433705087191b0248fb087bdddde75c680f57e54cba44ac87f5"} err="failed to get container status \"6631c11dec99d433705087191b0248fb087bdddde75c680f57e54cba44ac87f5\": rpc error: code = NotFound desc = could not find container \"6631c11dec99d433705087191b0248fb087bdddde75c680f57e54cba44ac87f5\": container with ID starting with 6631c11dec99d433705087191b0248fb087bdddde75c680f57e54cba44ac87f5 not found: ID does not exist" Nov 28 11:12:43 crc kubenswrapper[4838]: I1128 11:12:43.512307 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9a7f2e13-2522-459b-b286-862bd963b3c0-must-gather-output" (OuterVolumeSpecName: "must-gather-output") pod "9a7f2e13-2522-459b-b286-862bd963b3c0" (UID: "9a7f2e13-2522-459b-b286-862bd963b3c0"). InnerVolumeSpecName "must-gather-output". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 11:12:43 crc kubenswrapper[4838]: I1128 11:12:43.549791 4838 reconciler_common.go:293] "Volume detached for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/9a7f2e13-2522-459b-b286-862bd963b3c0-must-gather-output\") on node \"crc\" DevicePath \"\"" Nov 28 11:12:44 crc kubenswrapper[4838]: I1128 11:12:44.578150 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9a7f2e13-2522-459b-b286-862bd963b3c0" path="/var/lib/kubelet/pods/9a7f2e13-2522-459b-b286-862bd963b3c0/volumes" Nov 28 11:12:53 crc kubenswrapper[4838]: I1128 11:12:53.939768 4838 patch_prober.go:28] interesting pod/machine-config-daemon-5dxdd container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 11:12:53 crc kubenswrapper[4838]: I1128 11:12:53.940391 4838 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 11:13:05 crc kubenswrapper[4838]: I1128 11:13:05.231559 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-6qzm6"] Nov 28 11:13:05 crc kubenswrapper[4838]: E1128 11:13:05.234168 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e3d5ed34-1560-48e6-9ce6-3603dd91f6c6" containerName="registry-server" Nov 28 11:13:05 crc kubenswrapper[4838]: I1128 11:13:05.234320 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="e3d5ed34-1560-48e6-9ce6-3603dd91f6c6" containerName="registry-server" Nov 28 11:13:05 crc kubenswrapper[4838]: E1128 11:13:05.234466 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e3d5ed34-1560-48e6-9ce6-3603dd91f6c6" containerName="extract-content" Nov 28 11:13:05 crc kubenswrapper[4838]: I1128 11:13:05.234576 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="e3d5ed34-1560-48e6-9ce6-3603dd91f6c6" containerName="extract-content" Nov 28 11:13:05 crc kubenswrapper[4838]: E1128 11:13:05.234695 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e3d5ed34-1560-48e6-9ce6-3603dd91f6c6" containerName="extract-utilities" Nov 28 11:13:05 crc kubenswrapper[4838]: I1128 11:13:05.234854 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="e3d5ed34-1560-48e6-9ce6-3603dd91f6c6" containerName="extract-utilities" Nov 28 11:13:05 crc kubenswrapper[4838]: E1128 11:13:05.234979 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9a7f2e13-2522-459b-b286-862bd963b3c0" containerName="copy" Nov 28 11:13:05 crc kubenswrapper[4838]: I1128 11:13:05.235088 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="9a7f2e13-2522-459b-b286-862bd963b3c0" containerName="copy" Nov 28 11:13:05 crc kubenswrapper[4838]: E1128 11:13:05.235200 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9a7f2e13-2522-459b-b286-862bd963b3c0" containerName="gather" Nov 28 11:13:05 crc kubenswrapper[4838]: I1128 11:13:05.235313 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="9a7f2e13-2522-459b-b286-862bd963b3c0" containerName="gather" Nov 28 11:13:05 crc kubenswrapper[4838]: I1128 11:13:05.235890 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="9a7f2e13-2522-459b-b286-862bd963b3c0" containerName="copy" Nov 28 11:13:05 crc kubenswrapper[4838]: I1128 11:13:05.236099 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="e3d5ed34-1560-48e6-9ce6-3603dd91f6c6" containerName="registry-server" Nov 28 11:13:05 crc kubenswrapper[4838]: I1128 11:13:05.236279 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="9a7f2e13-2522-459b-b286-862bd963b3c0" containerName="gather" Nov 28 11:13:05 crc kubenswrapper[4838]: I1128 11:13:05.239498 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-6qzm6" Nov 28 11:13:05 crc kubenswrapper[4838]: I1128 11:13:05.252106 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-6qzm6"] Nov 28 11:13:05 crc kubenswrapper[4838]: I1128 11:13:05.332039 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f86f57ec-180e-4821-be6c-fa840892373f-utilities\") pod \"redhat-operators-6qzm6\" (UID: \"f86f57ec-180e-4821-be6c-fa840892373f\") " pod="openshift-marketplace/redhat-operators-6qzm6" Nov 28 11:13:05 crc kubenswrapper[4838]: I1128 11:13:05.332102 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m5mcx\" (UniqueName: \"kubernetes.io/projected/f86f57ec-180e-4821-be6c-fa840892373f-kube-api-access-m5mcx\") pod \"redhat-operators-6qzm6\" (UID: \"f86f57ec-180e-4821-be6c-fa840892373f\") " pod="openshift-marketplace/redhat-operators-6qzm6" Nov 28 11:13:05 crc kubenswrapper[4838]: I1128 11:13:05.332187 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f86f57ec-180e-4821-be6c-fa840892373f-catalog-content\") pod \"redhat-operators-6qzm6\" (UID: \"f86f57ec-180e-4821-be6c-fa840892373f\") " pod="openshift-marketplace/redhat-operators-6qzm6" Nov 28 11:13:05 crc kubenswrapper[4838]: I1128 11:13:05.433938 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f86f57ec-180e-4821-be6c-fa840892373f-utilities\") pod \"redhat-operators-6qzm6\" (UID: \"f86f57ec-180e-4821-be6c-fa840892373f\") " pod="openshift-marketplace/redhat-operators-6qzm6" Nov 28 11:13:05 crc kubenswrapper[4838]: I1128 11:13:05.434574 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f86f57ec-180e-4821-be6c-fa840892373f-utilities\") pod \"redhat-operators-6qzm6\" (UID: \"f86f57ec-180e-4821-be6c-fa840892373f\") " pod="openshift-marketplace/redhat-operators-6qzm6" Nov 28 11:13:05 crc kubenswrapper[4838]: I1128 11:13:05.434778 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m5mcx\" (UniqueName: \"kubernetes.io/projected/f86f57ec-180e-4821-be6c-fa840892373f-kube-api-access-m5mcx\") pod \"redhat-operators-6qzm6\" (UID: \"f86f57ec-180e-4821-be6c-fa840892373f\") " pod="openshift-marketplace/redhat-operators-6qzm6" Nov 28 11:13:05 crc kubenswrapper[4838]: I1128 11:13:05.434981 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f86f57ec-180e-4821-be6c-fa840892373f-catalog-content\") pod \"redhat-operators-6qzm6\" (UID: \"f86f57ec-180e-4821-be6c-fa840892373f\") " pod="openshift-marketplace/redhat-operators-6qzm6" Nov 28 11:13:05 crc kubenswrapper[4838]: I1128 11:13:05.435648 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f86f57ec-180e-4821-be6c-fa840892373f-catalog-content\") pod \"redhat-operators-6qzm6\" (UID: \"f86f57ec-180e-4821-be6c-fa840892373f\") " pod="openshift-marketplace/redhat-operators-6qzm6" Nov 28 11:13:05 crc kubenswrapper[4838]: I1128 11:13:05.456813 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m5mcx\" (UniqueName: \"kubernetes.io/projected/f86f57ec-180e-4821-be6c-fa840892373f-kube-api-access-m5mcx\") pod \"redhat-operators-6qzm6\" (UID: \"f86f57ec-180e-4821-be6c-fa840892373f\") " pod="openshift-marketplace/redhat-operators-6qzm6" Nov 28 11:13:05 crc kubenswrapper[4838]: I1128 11:13:05.578093 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-6qzm6" Nov 28 11:13:06 crc kubenswrapper[4838]: I1128 11:13:06.074975 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-6qzm6"] Nov 28 11:13:06 crc kubenswrapper[4838]: I1128 11:13:06.502750 4838 generic.go:334] "Generic (PLEG): container finished" podID="f86f57ec-180e-4821-be6c-fa840892373f" containerID="ffe9713262982aa0892fa0ae942b2df545ecbcb68bb67a65d8231f89bd49f82d" exitCode=0 Nov 28 11:13:06 crc kubenswrapper[4838]: I1128 11:13:06.502796 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6qzm6" event={"ID":"f86f57ec-180e-4821-be6c-fa840892373f","Type":"ContainerDied","Data":"ffe9713262982aa0892fa0ae942b2df545ecbcb68bb67a65d8231f89bd49f82d"} Nov 28 11:13:06 crc kubenswrapper[4838]: I1128 11:13:06.502822 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6qzm6" event={"ID":"f86f57ec-180e-4821-be6c-fa840892373f","Type":"ContainerStarted","Data":"5a36a622bdb44ce42347601c2aecdb73a95a54595b7a4f158c2c687bfea1129e"} Nov 28 11:13:07 crc kubenswrapper[4838]: I1128 11:13:07.519194 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6qzm6" event={"ID":"f86f57ec-180e-4821-be6c-fa840892373f","Type":"ContainerStarted","Data":"06c3f7d0ca8b2d8cc058897fa27b98021f07ee21cae3eea7ea72f23a6aca135b"} Nov 28 11:13:09 crc kubenswrapper[4838]: I1128 11:13:09.538869 4838 generic.go:334] "Generic (PLEG): container finished" podID="f86f57ec-180e-4821-be6c-fa840892373f" containerID="06c3f7d0ca8b2d8cc058897fa27b98021f07ee21cae3eea7ea72f23a6aca135b" exitCode=0 Nov 28 11:13:09 crc kubenswrapper[4838]: I1128 11:13:09.538976 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6qzm6" event={"ID":"f86f57ec-180e-4821-be6c-fa840892373f","Type":"ContainerDied","Data":"06c3f7d0ca8b2d8cc058897fa27b98021f07ee21cae3eea7ea72f23a6aca135b"} Nov 28 11:13:10 crc kubenswrapper[4838]: I1128 11:13:10.551971 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6qzm6" event={"ID":"f86f57ec-180e-4821-be6c-fa840892373f","Type":"ContainerStarted","Data":"f42c116edb2fbc7a3ce2e830995b90f0b5658b523a0a00124976b77724c83e1a"} Nov 28 11:13:10 crc kubenswrapper[4838]: I1128 11:13:10.585635 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-6qzm6" podStartSLOduration=1.94860451 podStartE2EDuration="5.585608307s" podCreationTimestamp="2025-11-28 11:13:05 +0000 UTC" firstStartedPulling="2025-11-28 11:13:06.504582005 +0000 UTC m=+4558.203556175" lastFinishedPulling="2025-11-28 11:13:10.141585792 +0000 UTC m=+4561.840559972" observedRunningTime="2025-11-28 11:13:10.58167783 +0000 UTC m=+4562.280652000" watchObservedRunningTime="2025-11-28 11:13:10.585608307 +0000 UTC m=+4562.284582507" Nov 28 11:13:15 crc kubenswrapper[4838]: I1128 11:13:15.579039 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-6qzm6" Nov 28 11:13:15 crc kubenswrapper[4838]: I1128 11:13:15.579377 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-6qzm6" Nov 28 11:13:16 crc kubenswrapper[4838]: I1128 11:13:16.623010 4838 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-6qzm6" podUID="f86f57ec-180e-4821-be6c-fa840892373f" containerName="registry-server" probeResult="failure" output=< Nov 28 11:13:16 crc kubenswrapper[4838]: timeout: failed to connect service ":50051" within 1s Nov 28 11:13:16 crc kubenswrapper[4838]: > Nov 28 11:13:23 crc kubenswrapper[4838]: I1128 11:13:23.940397 4838 patch_prober.go:28] interesting pod/machine-config-daemon-5dxdd container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 11:13:23 crc kubenswrapper[4838]: I1128 11:13:23.940997 4838 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 11:13:25 crc kubenswrapper[4838]: I1128 11:13:25.628200 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-6qzm6" Nov 28 11:13:25 crc kubenswrapper[4838]: I1128 11:13:25.683536 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-6qzm6" Nov 28 11:13:25 crc kubenswrapper[4838]: I1128 11:13:25.869505 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-6qzm6"] Nov 28 11:13:26 crc kubenswrapper[4838]: I1128 11:13:26.727323 4838 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-6qzm6" podUID="f86f57ec-180e-4821-be6c-fa840892373f" containerName="registry-server" containerID="cri-o://f42c116edb2fbc7a3ce2e830995b90f0b5658b523a0a00124976b77724c83e1a" gracePeriod=2 Nov 28 11:13:27 crc kubenswrapper[4838]: I1128 11:13:27.745111 4838 generic.go:334] "Generic (PLEG): container finished" podID="f86f57ec-180e-4821-be6c-fa840892373f" containerID="f42c116edb2fbc7a3ce2e830995b90f0b5658b523a0a00124976b77724c83e1a" exitCode=0 Nov 28 11:13:27 crc kubenswrapper[4838]: I1128 11:13:27.745368 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6qzm6" event={"ID":"f86f57ec-180e-4821-be6c-fa840892373f","Type":"ContainerDied","Data":"f42c116edb2fbc7a3ce2e830995b90f0b5658b523a0a00124976b77724c83e1a"} Nov 28 11:13:27 crc kubenswrapper[4838]: I1128 11:13:27.964257 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-6qzm6" Nov 28 11:13:28 crc kubenswrapper[4838]: I1128 11:13:28.049062 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f86f57ec-180e-4821-be6c-fa840892373f-utilities\") pod \"f86f57ec-180e-4821-be6c-fa840892373f\" (UID: \"f86f57ec-180e-4821-be6c-fa840892373f\") " Nov 28 11:13:28 crc kubenswrapper[4838]: I1128 11:13:28.049236 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f86f57ec-180e-4821-be6c-fa840892373f-catalog-content\") pod \"f86f57ec-180e-4821-be6c-fa840892373f\" (UID: \"f86f57ec-180e-4821-be6c-fa840892373f\") " Nov 28 11:13:28 crc kubenswrapper[4838]: I1128 11:13:28.049293 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m5mcx\" (UniqueName: \"kubernetes.io/projected/f86f57ec-180e-4821-be6c-fa840892373f-kube-api-access-m5mcx\") pod \"f86f57ec-180e-4821-be6c-fa840892373f\" (UID: \"f86f57ec-180e-4821-be6c-fa840892373f\") " Nov 28 11:13:28 crc kubenswrapper[4838]: I1128 11:13:28.050274 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f86f57ec-180e-4821-be6c-fa840892373f-utilities" (OuterVolumeSpecName: "utilities") pod "f86f57ec-180e-4821-be6c-fa840892373f" (UID: "f86f57ec-180e-4821-be6c-fa840892373f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 11:13:28 crc kubenswrapper[4838]: I1128 11:13:28.062116 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f86f57ec-180e-4821-be6c-fa840892373f-kube-api-access-m5mcx" (OuterVolumeSpecName: "kube-api-access-m5mcx") pod "f86f57ec-180e-4821-be6c-fa840892373f" (UID: "f86f57ec-180e-4821-be6c-fa840892373f"). InnerVolumeSpecName "kube-api-access-m5mcx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:13:28 crc kubenswrapper[4838]: I1128 11:13:28.151968 4838 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f86f57ec-180e-4821-be6c-fa840892373f-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 11:13:28 crc kubenswrapper[4838]: I1128 11:13:28.152320 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m5mcx\" (UniqueName: \"kubernetes.io/projected/f86f57ec-180e-4821-be6c-fa840892373f-kube-api-access-m5mcx\") on node \"crc\" DevicePath \"\"" Nov 28 11:13:28 crc kubenswrapper[4838]: I1128 11:13:28.163359 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f86f57ec-180e-4821-be6c-fa840892373f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "f86f57ec-180e-4821-be6c-fa840892373f" (UID: "f86f57ec-180e-4821-be6c-fa840892373f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 11:13:28 crc kubenswrapper[4838]: I1128 11:13:28.254280 4838 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f86f57ec-180e-4821-be6c-fa840892373f-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 11:13:28 crc kubenswrapper[4838]: I1128 11:13:28.761207 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6qzm6" event={"ID":"f86f57ec-180e-4821-be6c-fa840892373f","Type":"ContainerDied","Data":"5a36a622bdb44ce42347601c2aecdb73a95a54595b7a4f158c2c687bfea1129e"} Nov 28 11:13:28 crc kubenswrapper[4838]: I1128 11:13:28.761275 4838 scope.go:117] "RemoveContainer" containerID="f42c116edb2fbc7a3ce2e830995b90f0b5658b523a0a00124976b77724c83e1a" Nov 28 11:13:28 crc kubenswrapper[4838]: I1128 11:13:28.761916 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-6qzm6" Nov 28 11:13:28 crc kubenswrapper[4838]: I1128 11:13:28.799431 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-6qzm6"] Nov 28 11:13:28 crc kubenswrapper[4838]: I1128 11:13:28.807464 4838 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-6qzm6"] Nov 28 11:13:28 crc kubenswrapper[4838]: I1128 11:13:28.822221 4838 scope.go:117] "RemoveContainer" containerID="06c3f7d0ca8b2d8cc058897fa27b98021f07ee21cae3eea7ea72f23a6aca135b" Nov 28 11:13:28 crc kubenswrapper[4838]: I1128 11:13:28.849565 4838 scope.go:117] "RemoveContainer" containerID="ffe9713262982aa0892fa0ae942b2df545ecbcb68bb67a65d8231f89bd49f82d" Nov 28 11:13:29 crc kubenswrapper[4838]: I1128 11:13:29.323434 4838 scope.go:117] "RemoveContainer" containerID="c7408febbdb6fa366fe943a61a93036b19c8c2e8cfa85d1af0d7b0d2e56fefce" Nov 28 11:13:30 crc kubenswrapper[4838]: I1128 11:13:30.572982 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f86f57ec-180e-4821-be6c-fa840892373f" path="/var/lib/kubelet/pods/f86f57ec-180e-4821-be6c-fa840892373f/volumes" Nov 28 11:13:53 crc kubenswrapper[4838]: I1128 11:13:53.940364 4838 patch_prober.go:28] interesting pod/machine-config-daemon-5dxdd container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 11:13:53 crc kubenswrapper[4838]: I1128 11:13:53.940918 4838 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 11:13:53 crc kubenswrapper[4838]: I1128 11:13:53.940976 4838 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" Nov 28 11:13:53 crc kubenswrapper[4838]: I1128 11:13:53.941948 4838 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"b8237eaaf27ec7883da43a41ebf9c501dd653ec36cf7eded33b05d3eea15582e"} pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 11:13:53 crc kubenswrapper[4838]: I1128 11:13:53.942022 4838 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" containerName="machine-config-daemon" containerID="cri-o://b8237eaaf27ec7883da43a41ebf9c501dd653ec36cf7eded33b05d3eea15582e" gracePeriod=600 Nov 28 11:13:54 crc kubenswrapper[4838]: E1128 11:13:54.073286 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-5dxdd_openshift-machine-config-operator(5c3daa53-8c4e-4e30-aeba-146602dd45cd)\"" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" Nov 28 11:13:55 crc kubenswrapper[4838]: I1128 11:13:55.066767 4838 generic.go:334] "Generic (PLEG): container finished" podID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" containerID="b8237eaaf27ec7883da43a41ebf9c501dd653ec36cf7eded33b05d3eea15582e" exitCode=0 Nov 28 11:13:55 crc kubenswrapper[4838]: I1128 11:13:55.066796 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" event={"ID":"5c3daa53-8c4e-4e30-aeba-146602dd45cd","Type":"ContainerDied","Data":"b8237eaaf27ec7883da43a41ebf9c501dd653ec36cf7eded33b05d3eea15582e"} Nov 28 11:13:55 crc kubenswrapper[4838]: I1128 11:13:55.067257 4838 scope.go:117] "RemoveContainer" containerID="6deb95eccfb366ee3b0f8143f9aea63dc132d0fd50914fed82f0caf61f74a268" Nov 28 11:13:55 crc kubenswrapper[4838]: I1128 11:13:55.068374 4838 scope.go:117] "RemoveContainer" containerID="b8237eaaf27ec7883da43a41ebf9c501dd653ec36cf7eded33b05d3eea15582e" Nov 28 11:13:55 crc kubenswrapper[4838]: E1128 11:13:55.069083 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-5dxdd_openshift-machine-config-operator(5c3daa53-8c4e-4e30-aeba-146602dd45cd)\"" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" Nov 28 11:14:05 crc kubenswrapper[4838]: I1128 11:14:05.562255 4838 scope.go:117] "RemoveContainer" containerID="b8237eaaf27ec7883da43a41ebf9c501dd653ec36cf7eded33b05d3eea15582e" Nov 28 11:14:05 crc kubenswrapper[4838]: E1128 11:14:05.563111 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-5dxdd_openshift-machine-config-operator(5c3daa53-8c4e-4e30-aeba-146602dd45cd)\"" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" Nov 28 11:14:17 crc kubenswrapper[4838]: I1128 11:14:17.562303 4838 scope.go:117] "RemoveContainer" containerID="b8237eaaf27ec7883da43a41ebf9c501dd653ec36cf7eded33b05d3eea15582e" Nov 28 11:14:17 crc kubenswrapper[4838]: E1128 11:14:17.563156 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-5dxdd_openshift-machine-config-operator(5c3daa53-8c4e-4e30-aeba-146602dd45cd)\"" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" Nov 28 11:14:29 crc kubenswrapper[4838]: I1128 11:14:29.921531 4838 scope.go:117] "RemoveContainer" containerID="6d1319f9e3f708b9911f59a71b6ec8210be5630e1569647e6ef6f76b8273bf90" Nov 28 11:14:31 crc kubenswrapper[4838]: I1128 11:14:31.563430 4838 scope.go:117] "RemoveContainer" containerID="b8237eaaf27ec7883da43a41ebf9c501dd653ec36cf7eded33b05d3eea15582e" Nov 28 11:14:31 crc kubenswrapper[4838]: E1128 11:14:31.564118 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-5dxdd_openshift-machine-config-operator(5c3daa53-8c4e-4e30-aeba-146602dd45cd)\"" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" Nov 28 11:14:44 crc kubenswrapper[4838]: I1128 11:14:44.563204 4838 scope.go:117] "RemoveContainer" containerID="b8237eaaf27ec7883da43a41ebf9c501dd653ec36cf7eded33b05d3eea15582e" Nov 28 11:14:44 crc kubenswrapper[4838]: E1128 11:14:44.564245 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-5dxdd_openshift-machine-config-operator(5c3daa53-8c4e-4e30-aeba-146602dd45cd)\"" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" Nov 28 11:14:55 crc kubenswrapper[4838]: I1128 11:14:55.563242 4838 scope.go:117] "RemoveContainer" containerID="b8237eaaf27ec7883da43a41ebf9c501dd653ec36cf7eded33b05d3eea15582e" Nov 28 11:14:55 crc kubenswrapper[4838]: E1128 11:14:55.564532 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-5dxdd_openshift-machine-config-operator(5c3daa53-8c4e-4e30-aeba-146602dd45cd)\"" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" Nov 28 11:15:00 crc kubenswrapper[4838]: I1128 11:15:00.174385 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405475-8mp6s"] Nov 28 11:15:00 crc kubenswrapper[4838]: E1128 11:15:00.175334 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f86f57ec-180e-4821-be6c-fa840892373f" containerName="extract-content" Nov 28 11:15:00 crc kubenswrapper[4838]: I1128 11:15:00.175351 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="f86f57ec-180e-4821-be6c-fa840892373f" containerName="extract-content" Nov 28 11:15:00 crc kubenswrapper[4838]: E1128 11:15:00.175375 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f86f57ec-180e-4821-be6c-fa840892373f" containerName="extract-utilities" Nov 28 11:15:00 crc kubenswrapper[4838]: I1128 11:15:00.175382 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="f86f57ec-180e-4821-be6c-fa840892373f" containerName="extract-utilities" Nov 28 11:15:00 crc kubenswrapper[4838]: E1128 11:15:00.175405 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f86f57ec-180e-4821-be6c-fa840892373f" containerName="registry-server" Nov 28 11:15:00 crc kubenswrapper[4838]: I1128 11:15:00.175412 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="f86f57ec-180e-4821-be6c-fa840892373f" containerName="registry-server" Nov 28 11:15:00 crc kubenswrapper[4838]: I1128 11:15:00.175621 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="f86f57ec-180e-4821-be6c-fa840892373f" containerName="registry-server" Nov 28 11:15:00 crc kubenswrapper[4838]: I1128 11:15:00.176373 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405475-8mp6s" Nov 28 11:15:00 crc kubenswrapper[4838]: I1128 11:15:00.179264 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 28 11:15:00 crc kubenswrapper[4838]: I1128 11:15:00.179517 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 28 11:15:00 crc kubenswrapper[4838]: I1128 11:15:00.194873 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405475-8mp6s"] Nov 28 11:15:00 crc kubenswrapper[4838]: I1128 11:15:00.319164 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-llc9l\" (UniqueName: \"kubernetes.io/projected/d77859b1-b60e-4249-998e-3fc94bfe7a31-kube-api-access-llc9l\") pod \"collect-profiles-29405475-8mp6s\" (UID: \"d77859b1-b60e-4249-998e-3fc94bfe7a31\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405475-8mp6s" Nov 28 11:15:00 crc kubenswrapper[4838]: I1128 11:15:00.319313 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/d77859b1-b60e-4249-998e-3fc94bfe7a31-config-volume\") pod \"collect-profiles-29405475-8mp6s\" (UID: \"d77859b1-b60e-4249-998e-3fc94bfe7a31\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405475-8mp6s" Nov 28 11:15:00 crc kubenswrapper[4838]: I1128 11:15:00.319402 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/d77859b1-b60e-4249-998e-3fc94bfe7a31-secret-volume\") pod \"collect-profiles-29405475-8mp6s\" (UID: \"d77859b1-b60e-4249-998e-3fc94bfe7a31\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405475-8mp6s" Nov 28 11:15:00 crc kubenswrapper[4838]: I1128 11:15:00.421913 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-llc9l\" (UniqueName: \"kubernetes.io/projected/d77859b1-b60e-4249-998e-3fc94bfe7a31-kube-api-access-llc9l\") pod \"collect-profiles-29405475-8mp6s\" (UID: \"d77859b1-b60e-4249-998e-3fc94bfe7a31\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405475-8mp6s" Nov 28 11:15:00 crc kubenswrapper[4838]: I1128 11:15:00.422010 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/d77859b1-b60e-4249-998e-3fc94bfe7a31-config-volume\") pod \"collect-profiles-29405475-8mp6s\" (UID: \"d77859b1-b60e-4249-998e-3fc94bfe7a31\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405475-8mp6s" Nov 28 11:15:00 crc kubenswrapper[4838]: I1128 11:15:00.422060 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/d77859b1-b60e-4249-998e-3fc94bfe7a31-secret-volume\") pod \"collect-profiles-29405475-8mp6s\" (UID: \"d77859b1-b60e-4249-998e-3fc94bfe7a31\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405475-8mp6s" Nov 28 11:15:00 crc kubenswrapper[4838]: I1128 11:15:00.423564 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/d77859b1-b60e-4249-998e-3fc94bfe7a31-config-volume\") pod \"collect-profiles-29405475-8mp6s\" (UID: \"d77859b1-b60e-4249-998e-3fc94bfe7a31\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405475-8mp6s" Nov 28 11:15:00 crc kubenswrapper[4838]: I1128 11:15:00.429662 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/d77859b1-b60e-4249-998e-3fc94bfe7a31-secret-volume\") pod \"collect-profiles-29405475-8mp6s\" (UID: \"d77859b1-b60e-4249-998e-3fc94bfe7a31\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405475-8mp6s" Nov 28 11:15:00 crc kubenswrapper[4838]: I1128 11:15:00.455168 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-llc9l\" (UniqueName: \"kubernetes.io/projected/d77859b1-b60e-4249-998e-3fc94bfe7a31-kube-api-access-llc9l\") pod \"collect-profiles-29405475-8mp6s\" (UID: \"d77859b1-b60e-4249-998e-3fc94bfe7a31\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405475-8mp6s" Nov 28 11:15:00 crc kubenswrapper[4838]: I1128 11:15:00.510160 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405475-8mp6s" Nov 28 11:15:00 crc kubenswrapper[4838]: I1128 11:15:00.995834 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405475-8mp6s"] Nov 28 11:15:01 crc kubenswrapper[4838]: I1128 11:15:01.773435 4838 generic.go:334] "Generic (PLEG): container finished" podID="d77859b1-b60e-4249-998e-3fc94bfe7a31" containerID="6e7292f22e1c68e211c0e38cf5ff539dba450012435957a3898505e59fb6b679" exitCode=0 Nov 28 11:15:01 crc kubenswrapper[4838]: I1128 11:15:01.773940 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405475-8mp6s" event={"ID":"d77859b1-b60e-4249-998e-3fc94bfe7a31","Type":"ContainerDied","Data":"6e7292f22e1c68e211c0e38cf5ff539dba450012435957a3898505e59fb6b679"} Nov 28 11:15:01 crc kubenswrapper[4838]: I1128 11:15:01.773991 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405475-8mp6s" event={"ID":"d77859b1-b60e-4249-998e-3fc94bfe7a31","Type":"ContainerStarted","Data":"7a63d7f041a0a7d92977e7de4ec70fbea824c301d1d6db2546fda35bf5120d7d"} Nov 28 11:15:03 crc kubenswrapper[4838]: I1128 11:15:03.487213 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405475-8mp6s" Nov 28 11:15:03 crc kubenswrapper[4838]: I1128 11:15:03.594226 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/d77859b1-b60e-4249-998e-3fc94bfe7a31-secret-volume\") pod \"d77859b1-b60e-4249-998e-3fc94bfe7a31\" (UID: \"d77859b1-b60e-4249-998e-3fc94bfe7a31\") " Nov 28 11:15:03 crc kubenswrapper[4838]: I1128 11:15:03.594408 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-llc9l\" (UniqueName: \"kubernetes.io/projected/d77859b1-b60e-4249-998e-3fc94bfe7a31-kube-api-access-llc9l\") pod \"d77859b1-b60e-4249-998e-3fc94bfe7a31\" (UID: \"d77859b1-b60e-4249-998e-3fc94bfe7a31\") " Nov 28 11:15:03 crc kubenswrapper[4838]: I1128 11:15:03.594453 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/d77859b1-b60e-4249-998e-3fc94bfe7a31-config-volume\") pod \"d77859b1-b60e-4249-998e-3fc94bfe7a31\" (UID: \"d77859b1-b60e-4249-998e-3fc94bfe7a31\") " Nov 28 11:15:03 crc kubenswrapper[4838]: I1128 11:15:03.595748 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d77859b1-b60e-4249-998e-3fc94bfe7a31-config-volume" (OuterVolumeSpecName: "config-volume") pod "d77859b1-b60e-4249-998e-3fc94bfe7a31" (UID: "d77859b1-b60e-4249-998e-3fc94bfe7a31"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 11:15:03 crc kubenswrapper[4838]: I1128 11:15:03.601124 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d77859b1-b60e-4249-998e-3fc94bfe7a31-kube-api-access-llc9l" (OuterVolumeSpecName: "kube-api-access-llc9l") pod "d77859b1-b60e-4249-998e-3fc94bfe7a31" (UID: "d77859b1-b60e-4249-998e-3fc94bfe7a31"). InnerVolumeSpecName "kube-api-access-llc9l". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:15:03 crc kubenswrapper[4838]: I1128 11:15:03.602205 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d77859b1-b60e-4249-998e-3fc94bfe7a31-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "d77859b1-b60e-4249-998e-3fc94bfe7a31" (UID: "d77859b1-b60e-4249-998e-3fc94bfe7a31"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 11:15:03 crc kubenswrapper[4838]: I1128 11:15:03.697779 4838 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/d77859b1-b60e-4249-998e-3fc94bfe7a31-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 28 11:15:03 crc kubenswrapper[4838]: I1128 11:15:03.697828 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-llc9l\" (UniqueName: \"kubernetes.io/projected/d77859b1-b60e-4249-998e-3fc94bfe7a31-kube-api-access-llc9l\") on node \"crc\" DevicePath \"\"" Nov 28 11:15:03 crc kubenswrapper[4838]: I1128 11:15:03.697842 4838 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/d77859b1-b60e-4249-998e-3fc94bfe7a31-config-volume\") on node \"crc\" DevicePath \"\"" Nov 28 11:15:03 crc kubenswrapper[4838]: I1128 11:15:03.805067 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405475-8mp6s" event={"ID":"d77859b1-b60e-4249-998e-3fc94bfe7a31","Type":"ContainerDied","Data":"7a63d7f041a0a7d92977e7de4ec70fbea824c301d1d6db2546fda35bf5120d7d"} Nov 28 11:15:03 crc kubenswrapper[4838]: I1128 11:15:03.805108 4838 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7a63d7f041a0a7d92977e7de4ec70fbea824c301d1d6db2546fda35bf5120d7d" Nov 28 11:15:03 crc kubenswrapper[4838]: I1128 11:15:03.805163 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405475-8mp6s" Nov 28 11:15:04 crc kubenswrapper[4838]: I1128 11:15:04.574551 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405430-q6b6w"] Nov 28 11:15:04 crc kubenswrapper[4838]: I1128 11:15:04.576188 4838 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405430-q6b6w"] Nov 28 11:15:06 crc kubenswrapper[4838]: I1128 11:15:06.562414 4838 scope.go:117] "RemoveContainer" containerID="b8237eaaf27ec7883da43a41ebf9c501dd653ec36cf7eded33b05d3eea15582e" Nov 28 11:15:06 crc kubenswrapper[4838]: E1128 11:15:06.564749 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-5dxdd_openshift-machine-config-operator(5c3daa53-8c4e-4e30-aeba-146602dd45cd)\"" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" Nov 28 11:15:06 crc kubenswrapper[4838]: I1128 11:15:06.583566 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="87f04482-860f-49ec-ade3-aebc08f2887c" path="/var/lib/kubelet/pods/87f04482-860f-49ec-ade3-aebc08f2887c/volumes" Nov 28 11:15:14 crc kubenswrapper[4838]: I1128 11:15:14.026437 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-r69n8/must-gather-mk428"] Nov 28 11:15:14 crc kubenswrapper[4838]: E1128 11:15:14.029504 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d77859b1-b60e-4249-998e-3fc94bfe7a31" containerName="collect-profiles" Nov 28 11:15:14 crc kubenswrapper[4838]: I1128 11:15:14.029536 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="d77859b1-b60e-4249-998e-3fc94bfe7a31" containerName="collect-profiles" Nov 28 11:15:14 crc kubenswrapper[4838]: I1128 11:15:14.029799 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="d77859b1-b60e-4249-998e-3fc94bfe7a31" containerName="collect-profiles" Nov 28 11:15:14 crc kubenswrapper[4838]: I1128 11:15:14.031209 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-r69n8/must-gather-mk428" Nov 28 11:15:14 crc kubenswrapper[4838]: I1128 11:15:14.033666 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-r69n8"/"openshift-service-ca.crt" Nov 28 11:15:14 crc kubenswrapper[4838]: I1128 11:15:14.034000 4838 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-must-gather-r69n8"/"default-dockercfg-jvqrs" Nov 28 11:15:14 crc kubenswrapper[4838]: I1128 11:15:14.034288 4838 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-r69n8"/"kube-root-ca.crt" Nov 28 11:15:14 crc kubenswrapper[4838]: I1128 11:15:14.052769 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-r69n8/must-gather-mk428"] Nov 28 11:15:14 crc kubenswrapper[4838]: I1128 11:15:14.123461 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vnv48\" (UniqueName: \"kubernetes.io/projected/573376ae-377c-42ef-96d5-4ff9704f3f4a-kube-api-access-vnv48\") pod \"must-gather-mk428\" (UID: \"573376ae-377c-42ef-96d5-4ff9704f3f4a\") " pod="openshift-must-gather-r69n8/must-gather-mk428" Nov 28 11:15:14 crc kubenswrapper[4838]: I1128 11:15:14.123742 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/573376ae-377c-42ef-96d5-4ff9704f3f4a-must-gather-output\") pod \"must-gather-mk428\" (UID: \"573376ae-377c-42ef-96d5-4ff9704f3f4a\") " pod="openshift-must-gather-r69n8/must-gather-mk428" Nov 28 11:15:14 crc kubenswrapper[4838]: I1128 11:15:14.225772 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/573376ae-377c-42ef-96d5-4ff9704f3f4a-must-gather-output\") pod \"must-gather-mk428\" (UID: \"573376ae-377c-42ef-96d5-4ff9704f3f4a\") " pod="openshift-must-gather-r69n8/must-gather-mk428" Nov 28 11:15:14 crc kubenswrapper[4838]: I1128 11:15:14.225833 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vnv48\" (UniqueName: \"kubernetes.io/projected/573376ae-377c-42ef-96d5-4ff9704f3f4a-kube-api-access-vnv48\") pod \"must-gather-mk428\" (UID: \"573376ae-377c-42ef-96d5-4ff9704f3f4a\") " pod="openshift-must-gather-r69n8/must-gather-mk428" Nov 28 11:15:14 crc kubenswrapper[4838]: I1128 11:15:14.226403 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/573376ae-377c-42ef-96d5-4ff9704f3f4a-must-gather-output\") pod \"must-gather-mk428\" (UID: \"573376ae-377c-42ef-96d5-4ff9704f3f4a\") " pod="openshift-must-gather-r69n8/must-gather-mk428" Nov 28 11:15:14 crc kubenswrapper[4838]: I1128 11:15:14.253675 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vnv48\" (UniqueName: \"kubernetes.io/projected/573376ae-377c-42ef-96d5-4ff9704f3f4a-kube-api-access-vnv48\") pod \"must-gather-mk428\" (UID: \"573376ae-377c-42ef-96d5-4ff9704f3f4a\") " pod="openshift-must-gather-r69n8/must-gather-mk428" Nov 28 11:15:14 crc kubenswrapper[4838]: I1128 11:15:14.349790 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-r69n8/must-gather-mk428" Nov 28 11:15:14 crc kubenswrapper[4838]: I1128 11:15:14.827487 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-r69n8/must-gather-mk428"] Nov 28 11:15:14 crc kubenswrapper[4838]: I1128 11:15:14.953662 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-r69n8/must-gather-mk428" event={"ID":"573376ae-377c-42ef-96d5-4ff9704f3f4a","Type":"ContainerStarted","Data":"1d9478339b36043b7bc9d049b32e5b2c59fac1d42cf620da4c5484c1f2903514"} Nov 28 11:15:15 crc kubenswrapper[4838]: I1128 11:15:15.967334 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-r69n8/must-gather-mk428" event={"ID":"573376ae-377c-42ef-96d5-4ff9704f3f4a","Type":"ContainerStarted","Data":"e4197b334988c6f15111214ffe1b9f981e1f23d2115db2b874a5a6d9059ca857"} Nov 28 11:15:16 crc kubenswrapper[4838]: I1128 11:15:16.980795 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-r69n8/must-gather-mk428" event={"ID":"573376ae-377c-42ef-96d5-4ff9704f3f4a","Type":"ContainerStarted","Data":"d0a2af1606bfb75df271044cea2d92f491499773e690921744b0ca73987b6bf6"} Nov 28 11:15:17 crc kubenswrapper[4838]: I1128 11:15:17.020297 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-r69n8/must-gather-mk428" podStartSLOduration=4.020270692 podStartE2EDuration="4.020270692s" podCreationTimestamp="2025-11-28 11:15:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 11:15:16.997594844 +0000 UTC m=+4688.696569014" watchObservedRunningTime="2025-11-28 11:15:17.020270692 +0000 UTC m=+4688.719244872" Nov 28 11:15:17 crc kubenswrapper[4838]: I1128 11:15:17.562636 4838 scope.go:117] "RemoveContainer" containerID="b8237eaaf27ec7883da43a41ebf9c501dd653ec36cf7eded33b05d3eea15582e" Nov 28 11:15:17 crc kubenswrapper[4838]: E1128 11:15:17.563706 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-5dxdd_openshift-machine-config-operator(5c3daa53-8c4e-4e30-aeba-146602dd45cd)\"" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" Nov 28 11:15:20 crc kubenswrapper[4838]: I1128 11:15:20.116772 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-r69n8/crc-debug-zb922"] Nov 28 11:15:20 crc kubenswrapper[4838]: I1128 11:15:20.126047 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-r69n8/crc-debug-zb922" Nov 28 11:15:20 crc kubenswrapper[4838]: I1128 11:15:20.150017 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/f91dd8da-9cf7-4742-be2b-e1cd6c763e96-host\") pod \"crc-debug-zb922\" (UID: \"f91dd8da-9cf7-4742-be2b-e1cd6c763e96\") " pod="openshift-must-gather-r69n8/crc-debug-zb922" Nov 28 11:15:20 crc kubenswrapper[4838]: I1128 11:15:20.150255 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h2l44\" (UniqueName: \"kubernetes.io/projected/f91dd8da-9cf7-4742-be2b-e1cd6c763e96-kube-api-access-h2l44\") pod \"crc-debug-zb922\" (UID: \"f91dd8da-9cf7-4742-be2b-e1cd6c763e96\") " pod="openshift-must-gather-r69n8/crc-debug-zb922" Nov 28 11:15:20 crc kubenswrapper[4838]: I1128 11:15:20.252466 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/f91dd8da-9cf7-4742-be2b-e1cd6c763e96-host\") pod \"crc-debug-zb922\" (UID: \"f91dd8da-9cf7-4742-be2b-e1cd6c763e96\") " pod="openshift-must-gather-r69n8/crc-debug-zb922" Nov 28 11:15:20 crc kubenswrapper[4838]: I1128 11:15:20.252563 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h2l44\" (UniqueName: \"kubernetes.io/projected/f91dd8da-9cf7-4742-be2b-e1cd6c763e96-kube-api-access-h2l44\") pod \"crc-debug-zb922\" (UID: \"f91dd8da-9cf7-4742-be2b-e1cd6c763e96\") " pod="openshift-must-gather-r69n8/crc-debug-zb922" Nov 28 11:15:20 crc kubenswrapper[4838]: I1128 11:15:20.252604 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/f91dd8da-9cf7-4742-be2b-e1cd6c763e96-host\") pod \"crc-debug-zb922\" (UID: \"f91dd8da-9cf7-4742-be2b-e1cd6c763e96\") " pod="openshift-must-gather-r69n8/crc-debug-zb922" Nov 28 11:15:20 crc kubenswrapper[4838]: I1128 11:15:20.276152 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h2l44\" (UniqueName: \"kubernetes.io/projected/f91dd8da-9cf7-4742-be2b-e1cd6c763e96-kube-api-access-h2l44\") pod \"crc-debug-zb922\" (UID: \"f91dd8da-9cf7-4742-be2b-e1cd6c763e96\") " pod="openshift-must-gather-r69n8/crc-debug-zb922" Nov 28 11:15:20 crc kubenswrapper[4838]: I1128 11:15:20.457233 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-r69n8/crc-debug-zb922" Nov 28 11:15:20 crc kubenswrapper[4838]: W1128 11:15:20.482374 4838 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf91dd8da_9cf7_4742_be2b_e1cd6c763e96.slice/crio-97085270257a3ec978ce44dc1961d78f137eedb41e805909c518e1fea615ebcc WatchSource:0}: Error finding container 97085270257a3ec978ce44dc1961d78f137eedb41e805909c518e1fea615ebcc: Status 404 returned error can't find the container with id 97085270257a3ec978ce44dc1961d78f137eedb41e805909c518e1fea615ebcc Nov 28 11:15:21 crc kubenswrapper[4838]: I1128 11:15:21.016591 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-r69n8/crc-debug-zb922" event={"ID":"f91dd8da-9cf7-4742-be2b-e1cd6c763e96","Type":"ContainerStarted","Data":"9394e44c4c8165997bec0a501a23bcf9e9f36962519e1de8ef6db8730c24a00b"} Nov 28 11:15:21 crc kubenswrapper[4838]: I1128 11:15:21.017425 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-r69n8/crc-debug-zb922" event={"ID":"f91dd8da-9cf7-4742-be2b-e1cd6c763e96","Type":"ContainerStarted","Data":"97085270257a3ec978ce44dc1961d78f137eedb41e805909c518e1fea615ebcc"} Nov 28 11:15:21 crc kubenswrapper[4838]: I1128 11:15:21.032198 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-r69n8/crc-debug-zb922" podStartSLOduration=1.032168792 podStartE2EDuration="1.032168792s" podCreationTimestamp="2025-11-28 11:15:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 11:15:21.029918641 +0000 UTC m=+4692.728892811" watchObservedRunningTime="2025-11-28 11:15:21.032168792 +0000 UTC m=+4692.731142962" Nov 28 11:15:29 crc kubenswrapper[4838]: I1128 11:15:29.562523 4838 scope.go:117] "RemoveContainer" containerID="b8237eaaf27ec7883da43a41ebf9c501dd653ec36cf7eded33b05d3eea15582e" Nov 28 11:15:29 crc kubenswrapper[4838]: E1128 11:15:29.563441 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-5dxdd_openshift-machine-config-operator(5c3daa53-8c4e-4e30-aeba-146602dd45cd)\"" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" Nov 28 11:15:29 crc kubenswrapper[4838]: I1128 11:15:29.990207 4838 scope.go:117] "RemoveContainer" containerID="ed54aeed2cce2fcd18ac1cb31258dd61eaaebb97db5e04a6255d1c6ff9500798" Nov 28 11:15:40 crc kubenswrapper[4838]: I1128 11:15:40.561966 4838 scope.go:117] "RemoveContainer" containerID="b8237eaaf27ec7883da43a41ebf9c501dd653ec36cf7eded33b05d3eea15582e" Nov 28 11:15:40 crc kubenswrapper[4838]: E1128 11:15:40.562706 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-5dxdd_openshift-machine-config-operator(5c3daa53-8c4e-4e30-aeba-146602dd45cd)\"" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" Nov 28 11:15:53 crc kubenswrapper[4838]: I1128 11:15:53.562241 4838 scope.go:117] "RemoveContainer" containerID="b8237eaaf27ec7883da43a41ebf9c501dd653ec36cf7eded33b05d3eea15582e" Nov 28 11:15:53 crc kubenswrapper[4838]: E1128 11:15:53.563024 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-5dxdd_openshift-machine-config-operator(5c3daa53-8c4e-4e30-aeba-146602dd45cd)\"" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" Nov 28 11:15:59 crc kubenswrapper[4838]: I1128 11:15:59.424268 4838 generic.go:334] "Generic (PLEG): container finished" podID="f91dd8da-9cf7-4742-be2b-e1cd6c763e96" containerID="9394e44c4c8165997bec0a501a23bcf9e9f36962519e1de8ef6db8730c24a00b" exitCode=0 Nov 28 11:15:59 crc kubenswrapper[4838]: I1128 11:15:59.424362 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-r69n8/crc-debug-zb922" event={"ID":"f91dd8da-9cf7-4742-be2b-e1cd6c763e96","Type":"ContainerDied","Data":"9394e44c4c8165997bec0a501a23bcf9e9f36962519e1de8ef6db8730c24a00b"} Nov 28 11:16:00 crc kubenswrapper[4838]: I1128 11:16:00.565780 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-r69n8/crc-debug-zb922" Nov 28 11:16:00 crc kubenswrapper[4838]: I1128 11:16:00.625427 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-r69n8/crc-debug-zb922"] Nov 28 11:16:00 crc kubenswrapper[4838]: I1128 11:16:00.634287 4838 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-r69n8/crc-debug-zb922"] Nov 28 11:16:00 crc kubenswrapper[4838]: I1128 11:16:00.693624 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/f91dd8da-9cf7-4742-be2b-e1cd6c763e96-host\") pod \"f91dd8da-9cf7-4742-be2b-e1cd6c763e96\" (UID: \"f91dd8da-9cf7-4742-be2b-e1cd6c763e96\") " Nov 28 11:16:00 crc kubenswrapper[4838]: I1128 11:16:00.693783 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f91dd8da-9cf7-4742-be2b-e1cd6c763e96-host" (OuterVolumeSpecName: "host") pod "f91dd8da-9cf7-4742-be2b-e1cd6c763e96" (UID: "f91dd8da-9cf7-4742-be2b-e1cd6c763e96"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 11:16:00 crc kubenswrapper[4838]: I1128 11:16:00.693828 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-h2l44\" (UniqueName: \"kubernetes.io/projected/f91dd8da-9cf7-4742-be2b-e1cd6c763e96-kube-api-access-h2l44\") pod \"f91dd8da-9cf7-4742-be2b-e1cd6c763e96\" (UID: \"f91dd8da-9cf7-4742-be2b-e1cd6c763e96\") " Nov 28 11:16:00 crc kubenswrapper[4838]: I1128 11:16:00.695078 4838 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/f91dd8da-9cf7-4742-be2b-e1cd6c763e96-host\") on node \"crc\" DevicePath \"\"" Nov 28 11:16:00 crc kubenswrapper[4838]: I1128 11:16:00.703793 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f91dd8da-9cf7-4742-be2b-e1cd6c763e96-kube-api-access-h2l44" (OuterVolumeSpecName: "kube-api-access-h2l44") pod "f91dd8da-9cf7-4742-be2b-e1cd6c763e96" (UID: "f91dd8da-9cf7-4742-be2b-e1cd6c763e96"). InnerVolumeSpecName "kube-api-access-h2l44". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:16:00 crc kubenswrapper[4838]: I1128 11:16:00.797260 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-h2l44\" (UniqueName: \"kubernetes.io/projected/f91dd8da-9cf7-4742-be2b-e1cd6c763e96-kube-api-access-h2l44\") on node \"crc\" DevicePath \"\"" Nov 28 11:16:01 crc kubenswrapper[4838]: I1128 11:16:01.455525 4838 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="97085270257a3ec978ce44dc1961d78f137eedb41e805909c518e1fea615ebcc" Nov 28 11:16:01 crc kubenswrapper[4838]: I1128 11:16:01.455654 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-r69n8/crc-debug-zb922" Nov 28 11:16:01 crc kubenswrapper[4838]: I1128 11:16:01.861712 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-r69n8/crc-debug-lh42q"] Nov 28 11:16:01 crc kubenswrapper[4838]: E1128 11:16:01.862340 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f91dd8da-9cf7-4742-be2b-e1cd6c763e96" containerName="container-00" Nov 28 11:16:01 crc kubenswrapper[4838]: I1128 11:16:01.862355 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="f91dd8da-9cf7-4742-be2b-e1cd6c763e96" containerName="container-00" Nov 28 11:16:01 crc kubenswrapper[4838]: I1128 11:16:01.862597 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="f91dd8da-9cf7-4742-be2b-e1cd6c763e96" containerName="container-00" Nov 28 11:16:01 crc kubenswrapper[4838]: I1128 11:16:01.863307 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-r69n8/crc-debug-lh42q" Nov 28 11:16:01 crc kubenswrapper[4838]: I1128 11:16:01.920576 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/64a73469-6583-47ba-8da7-5b2344fb8e55-host\") pod \"crc-debug-lh42q\" (UID: \"64a73469-6583-47ba-8da7-5b2344fb8e55\") " pod="openshift-must-gather-r69n8/crc-debug-lh42q" Nov 28 11:16:01 crc kubenswrapper[4838]: I1128 11:16:01.920805 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-249gs\" (UniqueName: \"kubernetes.io/projected/64a73469-6583-47ba-8da7-5b2344fb8e55-kube-api-access-249gs\") pod \"crc-debug-lh42q\" (UID: \"64a73469-6583-47ba-8da7-5b2344fb8e55\") " pod="openshift-must-gather-r69n8/crc-debug-lh42q" Nov 28 11:16:02 crc kubenswrapper[4838]: I1128 11:16:02.022827 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/64a73469-6583-47ba-8da7-5b2344fb8e55-host\") pod \"crc-debug-lh42q\" (UID: \"64a73469-6583-47ba-8da7-5b2344fb8e55\") " pod="openshift-must-gather-r69n8/crc-debug-lh42q" Nov 28 11:16:02 crc kubenswrapper[4838]: I1128 11:16:02.022978 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-249gs\" (UniqueName: \"kubernetes.io/projected/64a73469-6583-47ba-8da7-5b2344fb8e55-kube-api-access-249gs\") pod \"crc-debug-lh42q\" (UID: \"64a73469-6583-47ba-8da7-5b2344fb8e55\") " pod="openshift-must-gather-r69n8/crc-debug-lh42q" Nov 28 11:16:02 crc kubenswrapper[4838]: I1128 11:16:02.022999 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/64a73469-6583-47ba-8da7-5b2344fb8e55-host\") pod \"crc-debug-lh42q\" (UID: \"64a73469-6583-47ba-8da7-5b2344fb8e55\") " pod="openshift-must-gather-r69n8/crc-debug-lh42q" Nov 28 11:16:02 crc kubenswrapper[4838]: I1128 11:16:02.062736 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-249gs\" (UniqueName: \"kubernetes.io/projected/64a73469-6583-47ba-8da7-5b2344fb8e55-kube-api-access-249gs\") pod \"crc-debug-lh42q\" (UID: \"64a73469-6583-47ba-8da7-5b2344fb8e55\") " pod="openshift-must-gather-r69n8/crc-debug-lh42q" Nov 28 11:16:02 crc kubenswrapper[4838]: I1128 11:16:02.186015 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-r69n8/crc-debug-lh42q" Nov 28 11:16:02 crc kubenswrapper[4838]: I1128 11:16:02.466084 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-r69n8/crc-debug-lh42q" event={"ID":"64a73469-6583-47ba-8da7-5b2344fb8e55","Type":"ContainerStarted","Data":"4320f5f2e08affb1f3b5bcfbbafae328b76208d5ce6df40d2b2aaff1fa855410"} Nov 28 11:16:02 crc kubenswrapper[4838]: I1128 11:16:02.466124 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-r69n8/crc-debug-lh42q" event={"ID":"64a73469-6583-47ba-8da7-5b2344fb8e55","Type":"ContainerStarted","Data":"a05c77e2b226c80c5a84888b8efd4c4912bd17ce00edaa8cc81b47d4dcee353c"} Nov 28 11:16:02 crc kubenswrapper[4838]: I1128 11:16:02.495625 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-r69n8/crc-debug-lh42q" podStartSLOduration=1.4956046299999999 podStartE2EDuration="1.49560463s" podCreationTimestamp="2025-11-28 11:16:01 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 11:16:02.48389349 +0000 UTC m=+4734.182867670" watchObservedRunningTime="2025-11-28 11:16:02.49560463 +0000 UTC m=+4734.194578800" Nov 28 11:16:02 crc kubenswrapper[4838]: I1128 11:16:02.577624 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f91dd8da-9cf7-4742-be2b-e1cd6c763e96" path="/var/lib/kubelet/pods/f91dd8da-9cf7-4742-be2b-e1cd6c763e96/volumes" Nov 28 11:16:02 crc kubenswrapper[4838]: I1128 11:16:02.799800 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-kkzm5"] Nov 28 11:16:02 crc kubenswrapper[4838]: I1128 11:16:02.802110 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-kkzm5" Nov 28 11:16:02 crc kubenswrapper[4838]: I1128 11:16:02.807901 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-kkzm5"] Nov 28 11:16:02 crc kubenswrapper[4838]: I1128 11:16:02.840703 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s82xg\" (UniqueName: \"kubernetes.io/projected/4783327e-4a17-41ee-a140-ea36c00654b8-kube-api-access-s82xg\") pod \"redhat-marketplace-kkzm5\" (UID: \"4783327e-4a17-41ee-a140-ea36c00654b8\") " pod="openshift-marketplace/redhat-marketplace-kkzm5" Nov 28 11:16:02 crc kubenswrapper[4838]: I1128 11:16:02.840811 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4783327e-4a17-41ee-a140-ea36c00654b8-utilities\") pod \"redhat-marketplace-kkzm5\" (UID: \"4783327e-4a17-41ee-a140-ea36c00654b8\") " pod="openshift-marketplace/redhat-marketplace-kkzm5" Nov 28 11:16:02 crc kubenswrapper[4838]: I1128 11:16:02.840842 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4783327e-4a17-41ee-a140-ea36c00654b8-catalog-content\") pod \"redhat-marketplace-kkzm5\" (UID: \"4783327e-4a17-41ee-a140-ea36c00654b8\") " pod="openshift-marketplace/redhat-marketplace-kkzm5" Nov 28 11:16:02 crc kubenswrapper[4838]: I1128 11:16:02.942140 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4783327e-4a17-41ee-a140-ea36c00654b8-utilities\") pod \"redhat-marketplace-kkzm5\" (UID: \"4783327e-4a17-41ee-a140-ea36c00654b8\") " pod="openshift-marketplace/redhat-marketplace-kkzm5" Nov 28 11:16:02 crc kubenswrapper[4838]: I1128 11:16:02.942185 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4783327e-4a17-41ee-a140-ea36c00654b8-catalog-content\") pod \"redhat-marketplace-kkzm5\" (UID: \"4783327e-4a17-41ee-a140-ea36c00654b8\") " pod="openshift-marketplace/redhat-marketplace-kkzm5" Nov 28 11:16:02 crc kubenswrapper[4838]: I1128 11:16:02.942306 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s82xg\" (UniqueName: \"kubernetes.io/projected/4783327e-4a17-41ee-a140-ea36c00654b8-kube-api-access-s82xg\") pod \"redhat-marketplace-kkzm5\" (UID: \"4783327e-4a17-41ee-a140-ea36c00654b8\") " pod="openshift-marketplace/redhat-marketplace-kkzm5" Nov 28 11:16:02 crc kubenswrapper[4838]: I1128 11:16:02.942614 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4783327e-4a17-41ee-a140-ea36c00654b8-utilities\") pod \"redhat-marketplace-kkzm5\" (UID: \"4783327e-4a17-41ee-a140-ea36c00654b8\") " pod="openshift-marketplace/redhat-marketplace-kkzm5" Nov 28 11:16:02 crc kubenswrapper[4838]: I1128 11:16:02.942710 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4783327e-4a17-41ee-a140-ea36c00654b8-catalog-content\") pod \"redhat-marketplace-kkzm5\" (UID: \"4783327e-4a17-41ee-a140-ea36c00654b8\") " pod="openshift-marketplace/redhat-marketplace-kkzm5" Nov 28 11:16:02 crc kubenswrapper[4838]: I1128 11:16:02.971445 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s82xg\" (UniqueName: \"kubernetes.io/projected/4783327e-4a17-41ee-a140-ea36c00654b8-kube-api-access-s82xg\") pod \"redhat-marketplace-kkzm5\" (UID: \"4783327e-4a17-41ee-a140-ea36c00654b8\") " pod="openshift-marketplace/redhat-marketplace-kkzm5" Nov 28 11:16:03 crc kubenswrapper[4838]: I1128 11:16:03.156177 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-kkzm5" Nov 28 11:16:03 crc kubenswrapper[4838]: I1128 11:16:03.485148 4838 generic.go:334] "Generic (PLEG): container finished" podID="64a73469-6583-47ba-8da7-5b2344fb8e55" containerID="4320f5f2e08affb1f3b5bcfbbafae328b76208d5ce6df40d2b2aaff1fa855410" exitCode=0 Nov 28 11:16:03 crc kubenswrapper[4838]: I1128 11:16:03.485449 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-r69n8/crc-debug-lh42q" event={"ID":"64a73469-6583-47ba-8da7-5b2344fb8e55","Type":"ContainerDied","Data":"4320f5f2e08affb1f3b5bcfbbafae328b76208d5ce6df40d2b2aaff1fa855410"} Nov 28 11:16:03 crc kubenswrapper[4838]: I1128 11:16:03.666161 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-kkzm5"] Nov 28 11:16:04 crc kubenswrapper[4838]: I1128 11:16:04.499025 4838 generic.go:334] "Generic (PLEG): container finished" podID="4783327e-4a17-41ee-a140-ea36c00654b8" containerID="330b3086c0f665c984fbe5e2a4d22dd670f85e9c680a8cd9d41a8ccbb250bda4" exitCode=0 Nov 28 11:16:04 crc kubenswrapper[4838]: I1128 11:16:04.499208 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-kkzm5" event={"ID":"4783327e-4a17-41ee-a140-ea36c00654b8","Type":"ContainerDied","Data":"330b3086c0f665c984fbe5e2a4d22dd670f85e9c680a8cd9d41a8ccbb250bda4"} Nov 28 11:16:04 crc kubenswrapper[4838]: I1128 11:16:04.499544 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-kkzm5" event={"ID":"4783327e-4a17-41ee-a140-ea36c00654b8","Type":"ContainerStarted","Data":"518ce182fc33a19bd8d50c23ed33f4ee6ffc427620785b79d39bf397e21da91a"} Nov 28 11:16:04 crc kubenswrapper[4838]: I1128 11:16:04.507322 4838 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 28 11:16:04 crc kubenswrapper[4838]: I1128 11:16:04.586639 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-r69n8/crc-debug-lh42q" Nov 28 11:16:04 crc kubenswrapper[4838]: I1128 11:16:04.678565 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/64a73469-6583-47ba-8da7-5b2344fb8e55-host\") pod \"64a73469-6583-47ba-8da7-5b2344fb8e55\" (UID: \"64a73469-6583-47ba-8da7-5b2344fb8e55\") " Nov 28 11:16:04 crc kubenswrapper[4838]: I1128 11:16:04.678644 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-249gs\" (UniqueName: \"kubernetes.io/projected/64a73469-6583-47ba-8da7-5b2344fb8e55-kube-api-access-249gs\") pod \"64a73469-6583-47ba-8da7-5b2344fb8e55\" (UID: \"64a73469-6583-47ba-8da7-5b2344fb8e55\") " Nov 28 11:16:04 crc kubenswrapper[4838]: I1128 11:16:04.678761 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/64a73469-6583-47ba-8da7-5b2344fb8e55-host" (OuterVolumeSpecName: "host") pod "64a73469-6583-47ba-8da7-5b2344fb8e55" (UID: "64a73469-6583-47ba-8da7-5b2344fb8e55"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 11:16:04 crc kubenswrapper[4838]: I1128 11:16:04.679846 4838 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/64a73469-6583-47ba-8da7-5b2344fb8e55-host\") on node \"crc\" DevicePath \"\"" Nov 28 11:16:04 crc kubenswrapper[4838]: I1128 11:16:04.691252 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/64a73469-6583-47ba-8da7-5b2344fb8e55-kube-api-access-249gs" (OuterVolumeSpecName: "kube-api-access-249gs") pod "64a73469-6583-47ba-8da7-5b2344fb8e55" (UID: "64a73469-6583-47ba-8da7-5b2344fb8e55"). InnerVolumeSpecName "kube-api-access-249gs". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:16:04 crc kubenswrapper[4838]: I1128 11:16:04.712886 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-r69n8/crc-debug-lh42q"] Nov 28 11:16:04 crc kubenswrapper[4838]: I1128 11:16:04.719964 4838 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-r69n8/crc-debug-lh42q"] Nov 28 11:16:04 crc kubenswrapper[4838]: I1128 11:16:04.781538 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-249gs\" (UniqueName: \"kubernetes.io/projected/64a73469-6583-47ba-8da7-5b2344fb8e55-kube-api-access-249gs\") on node \"crc\" DevicePath \"\"" Nov 28 11:16:05 crc kubenswrapper[4838]: I1128 11:16:05.510306 4838 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a05c77e2b226c80c5a84888b8efd4c4912bd17ce00edaa8cc81b47d4dcee353c" Nov 28 11:16:05 crc kubenswrapper[4838]: I1128 11:16:05.510334 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-r69n8/crc-debug-lh42q" Nov 28 11:16:05 crc kubenswrapper[4838]: I1128 11:16:05.513508 4838 generic.go:334] "Generic (PLEG): container finished" podID="4783327e-4a17-41ee-a140-ea36c00654b8" containerID="685d1bae469344fbd87dac58931b80add3ef11a2ece467ef28ff7e06bad8f813" exitCode=0 Nov 28 11:16:05 crc kubenswrapper[4838]: I1128 11:16:05.513576 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-kkzm5" event={"ID":"4783327e-4a17-41ee-a140-ea36c00654b8","Type":"ContainerDied","Data":"685d1bae469344fbd87dac58931b80add3ef11a2ece467ef28ff7e06bad8f813"} Nov 28 11:16:05 crc kubenswrapper[4838]: I1128 11:16:05.917742 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-r69n8/crc-debug-ts6km"] Nov 28 11:16:05 crc kubenswrapper[4838]: E1128 11:16:05.918120 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="64a73469-6583-47ba-8da7-5b2344fb8e55" containerName="container-00" Nov 28 11:16:05 crc kubenswrapper[4838]: I1128 11:16:05.918137 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="64a73469-6583-47ba-8da7-5b2344fb8e55" containerName="container-00" Nov 28 11:16:05 crc kubenswrapper[4838]: I1128 11:16:05.918331 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="64a73469-6583-47ba-8da7-5b2344fb8e55" containerName="container-00" Nov 28 11:16:05 crc kubenswrapper[4838]: I1128 11:16:05.918978 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-r69n8/crc-debug-ts6km" Nov 28 11:16:06 crc kubenswrapper[4838]: I1128 11:16:06.004945 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/0ef83e4f-0be2-454f-b81e-46d679fcd529-host\") pod \"crc-debug-ts6km\" (UID: \"0ef83e4f-0be2-454f-b81e-46d679fcd529\") " pod="openshift-must-gather-r69n8/crc-debug-ts6km" Nov 28 11:16:06 crc kubenswrapper[4838]: I1128 11:16:06.005228 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zn57q\" (UniqueName: \"kubernetes.io/projected/0ef83e4f-0be2-454f-b81e-46d679fcd529-kube-api-access-zn57q\") pod \"crc-debug-ts6km\" (UID: \"0ef83e4f-0be2-454f-b81e-46d679fcd529\") " pod="openshift-must-gather-r69n8/crc-debug-ts6km" Nov 28 11:16:06 crc kubenswrapper[4838]: I1128 11:16:06.107082 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/0ef83e4f-0be2-454f-b81e-46d679fcd529-host\") pod \"crc-debug-ts6km\" (UID: \"0ef83e4f-0be2-454f-b81e-46d679fcd529\") " pod="openshift-must-gather-r69n8/crc-debug-ts6km" Nov 28 11:16:06 crc kubenswrapper[4838]: I1128 11:16:06.107148 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zn57q\" (UniqueName: \"kubernetes.io/projected/0ef83e4f-0be2-454f-b81e-46d679fcd529-kube-api-access-zn57q\") pod \"crc-debug-ts6km\" (UID: \"0ef83e4f-0be2-454f-b81e-46d679fcd529\") " pod="openshift-must-gather-r69n8/crc-debug-ts6km" Nov 28 11:16:06 crc kubenswrapper[4838]: I1128 11:16:06.107474 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/0ef83e4f-0be2-454f-b81e-46d679fcd529-host\") pod \"crc-debug-ts6km\" (UID: \"0ef83e4f-0be2-454f-b81e-46d679fcd529\") " pod="openshift-must-gather-r69n8/crc-debug-ts6km" Nov 28 11:16:06 crc kubenswrapper[4838]: I1128 11:16:06.132619 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zn57q\" (UniqueName: \"kubernetes.io/projected/0ef83e4f-0be2-454f-b81e-46d679fcd529-kube-api-access-zn57q\") pod \"crc-debug-ts6km\" (UID: \"0ef83e4f-0be2-454f-b81e-46d679fcd529\") " pod="openshift-must-gather-r69n8/crc-debug-ts6km" Nov 28 11:16:06 crc kubenswrapper[4838]: I1128 11:16:06.235732 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-r69n8/crc-debug-ts6km" Nov 28 11:16:06 crc kubenswrapper[4838]: W1128 11:16:06.262013 4838 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0ef83e4f_0be2_454f_b81e_46d679fcd529.slice/crio-393b03f4d3884c52700534792f9dd964c97d8f6819c8c7f24d6221227bffe07c WatchSource:0}: Error finding container 393b03f4d3884c52700534792f9dd964c97d8f6819c8c7f24d6221227bffe07c: Status 404 returned error can't find the container with id 393b03f4d3884c52700534792f9dd964c97d8f6819c8c7f24d6221227bffe07c Nov 28 11:16:06 crc kubenswrapper[4838]: I1128 11:16:06.523782 4838 generic.go:334] "Generic (PLEG): container finished" podID="0ef83e4f-0be2-454f-b81e-46d679fcd529" containerID="59372b49860ff5ffac5405903dfe76020550208b00feb28f42dff4b599ae2c19" exitCode=0 Nov 28 11:16:06 crc kubenswrapper[4838]: I1128 11:16:06.523857 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-r69n8/crc-debug-ts6km" event={"ID":"0ef83e4f-0be2-454f-b81e-46d679fcd529","Type":"ContainerDied","Data":"59372b49860ff5ffac5405903dfe76020550208b00feb28f42dff4b599ae2c19"} Nov 28 11:16:06 crc kubenswrapper[4838]: I1128 11:16:06.524211 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-r69n8/crc-debug-ts6km" event={"ID":"0ef83e4f-0be2-454f-b81e-46d679fcd529","Type":"ContainerStarted","Data":"393b03f4d3884c52700534792f9dd964c97d8f6819c8c7f24d6221227bffe07c"} Nov 28 11:16:06 crc kubenswrapper[4838]: I1128 11:16:06.530538 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-kkzm5" event={"ID":"4783327e-4a17-41ee-a140-ea36c00654b8","Type":"ContainerStarted","Data":"8c3b3a268401df208865741170050d40f3514c0c933be22eeaee142ff9f7b3f8"} Nov 28 11:16:06 crc kubenswrapper[4838]: I1128 11:16:06.567026 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-kkzm5" podStartSLOduration=2.947138963 podStartE2EDuration="4.56700482s" podCreationTimestamp="2025-11-28 11:16:02 +0000 UTC" firstStartedPulling="2025-11-28 11:16:04.505092631 +0000 UTC m=+4736.204066801" lastFinishedPulling="2025-11-28 11:16:06.124958488 +0000 UTC m=+4737.823932658" observedRunningTime="2025-11-28 11:16:06.557673136 +0000 UTC m=+4738.256647336" watchObservedRunningTime="2025-11-28 11:16:06.56700482 +0000 UTC m=+4738.265979000" Nov 28 11:16:06 crc kubenswrapper[4838]: I1128 11:16:06.573527 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="64a73469-6583-47ba-8da7-5b2344fb8e55" path="/var/lib/kubelet/pods/64a73469-6583-47ba-8da7-5b2344fb8e55/volumes" Nov 28 11:16:06 crc kubenswrapper[4838]: I1128 11:16:06.592689 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-r69n8/crc-debug-ts6km"] Nov 28 11:16:06 crc kubenswrapper[4838]: I1128 11:16:06.601335 4838 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-r69n8/crc-debug-ts6km"] Nov 28 11:16:07 crc kubenswrapper[4838]: I1128 11:16:07.642740 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-r69n8/crc-debug-ts6km" Nov 28 11:16:07 crc kubenswrapper[4838]: I1128 11:16:07.736662 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zn57q\" (UniqueName: \"kubernetes.io/projected/0ef83e4f-0be2-454f-b81e-46d679fcd529-kube-api-access-zn57q\") pod \"0ef83e4f-0be2-454f-b81e-46d679fcd529\" (UID: \"0ef83e4f-0be2-454f-b81e-46d679fcd529\") " Nov 28 11:16:07 crc kubenswrapper[4838]: I1128 11:16:07.736972 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/0ef83e4f-0be2-454f-b81e-46d679fcd529-host\") pod \"0ef83e4f-0be2-454f-b81e-46d679fcd529\" (UID: \"0ef83e4f-0be2-454f-b81e-46d679fcd529\") " Nov 28 11:16:07 crc kubenswrapper[4838]: I1128 11:16:07.737072 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/0ef83e4f-0be2-454f-b81e-46d679fcd529-host" (OuterVolumeSpecName: "host") pod "0ef83e4f-0be2-454f-b81e-46d679fcd529" (UID: "0ef83e4f-0be2-454f-b81e-46d679fcd529"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 11:16:07 crc kubenswrapper[4838]: I1128 11:16:07.737605 4838 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/0ef83e4f-0be2-454f-b81e-46d679fcd529-host\") on node \"crc\" DevicePath \"\"" Nov 28 11:16:07 crc kubenswrapper[4838]: I1128 11:16:07.744145 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0ef83e4f-0be2-454f-b81e-46d679fcd529-kube-api-access-zn57q" (OuterVolumeSpecName: "kube-api-access-zn57q") pod "0ef83e4f-0be2-454f-b81e-46d679fcd529" (UID: "0ef83e4f-0be2-454f-b81e-46d679fcd529"). InnerVolumeSpecName "kube-api-access-zn57q". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:16:07 crc kubenswrapper[4838]: I1128 11:16:07.840061 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zn57q\" (UniqueName: \"kubernetes.io/projected/0ef83e4f-0be2-454f-b81e-46d679fcd529-kube-api-access-zn57q\") on node \"crc\" DevicePath \"\"" Nov 28 11:16:08 crc kubenswrapper[4838]: I1128 11:16:08.552020 4838 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="393b03f4d3884c52700534792f9dd964c97d8f6819c8c7f24d6221227bffe07c" Nov 28 11:16:08 crc kubenswrapper[4838]: I1128 11:16:08.552080 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-r69n8/crc-debug-ts6km" Nov 28 11:16:08 crc kubenswrapper[4838]: I1128 11:16:08.573178 4838 scope.go:117] "RemoveContainer" containerID="b8237eaaf27ec7883da43a41ebf9c501dd653ec36cf7eded33b05d3eea15582e" Nov 28 11:16:08 crc kubenswrapper[4838]: E1128 11:16:08.573446 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-5dxdd_openshift-machine-config-operator(5c3daa53-8c4e-4e30-aeba-146602dd45cd)\"" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" Nov 28 11:16:08 crc kubenswrapper[4838]: I1128 11:16:08.575612 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0ef83e4f-0be2-454f-b81e-46d679fcd529" path="/var/lib/kubelet/pods/0ef83e4f-0be2-454f-b81e-46d679fcd529/volumes" Nov 28 11:16:09 crc kubenswrapper[4838]: E1128 11:16:09.073193 4838 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0ef83e4f_0be2_454f_b81e_46d679fcd529.slice\": RecentStats: unable to find data in memory cache]" Nov 28 11:16:13 crc kubenswrapper[4838]: I1128 11:16:13.156639 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-kkzm5" Nov 28 11:16:13 crc kubenswrapper[4838]: I1128 11:16:13.157178 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-kkzm5" Nov 28 11:16:13 crc kubenswrapper[4838]: I1128 11:16:13.219132 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-kkzm5" Nov 28 11:16:13 crc kubenswrapper[4838]: I1128 11:16:13.670926 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-kkzm5" Nov 28 11:16:13 crc kubenswrapper[4838]: I1128 11:16:13.720163 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-kkzm5"] Nov 28 11:16:15 crc kubenswrapper[4838]: I1128 11:16:15.618630 4838 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-kkzm5" podUID="4783327e-4a17-41ee-a140-ea36c00654b8" containerName="registry-server" containerID="cri-o://8c3b3a268401df208865741170050d40f3514c0c933be22eeaee142ff9f7b3f8" gracePeriod=2 Nov 28 11:16:16 crc kubenswrapper[4838]: I1128 11:16:16.137958 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-kkzm5" Nov 28 11:16:16 crc kubenswrapper[4838]: I1128 11:16:16.205082 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s82xg\" (UniqueName: \"kubernetes.io/projected/4783327e-4a17-41ee-a140-ea36c00654b8-kube-api-access-s82xg\") pod \"4783327e-4a17-41ee-a140-ea36c00654b8\" (UID: \"4783327e-4a17-41ee-a140-ea36c00654b8\") " Nov 28 11:16:16 crc kubenswrapper[4838]: I1128 11:16:16.205189 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4783327e-4a17-41ee-a140-ea36c00654b8-catalog-content\") pod \"4783327e-4a17-41ee-a140-ea36c00654b8\" (UID: \"4783327e-4a17-41ee-a140-ea36c00654b8\") " Nov 28 11:16:16 crc kubenswrapper[4838]: I1128 11:16:16.205514 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4783327e-4a17-41ee-a140-ea36c00654b8-utilities\") pod \"4783327e-4a17-41ee-a140-ea36c00654b8\" (UID: \"4783327e-4a17-41ee-a140-ea36c00654b8\") " Nov 28 11:16:16 crc kubenswrapper[4838]: I1128 11:16:16.206509 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4783327e-4a17-41ee-a140-ea36c00654b8-utilities" (OuterVolumeSpecName: "utilities") pod "4783327e-4a17-41ee-a140-ea36c00654b8" (UID: "4783327e-4a17-41ee-a140-ea36c00654b8"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 11:16:16 crc kubenswrapper[4838]: I1128 11:16:16.210931 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4783327e-4a17-41ee-a140-ea36c00654b8-kube-api-access-s82xg" (OuterVolumeSpecName: "kube-api-access-s82xg") pod "4783327e-4a17-41ee-a140-ea36c00654b8" (UID: "4783327e-4a17-41ee-a140-ea36c00654b8"). InnerVolumeSpecName "kube-api-access-s82xg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:16:16 crc kubenswrapper[4838]: I1128 11:16:16.223029 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4783327e-4a17-41ee-a140-ea36c00654b8-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "4783327e-4a17-41ee-a140-ea36c00654b8" (UID: "4783327e-4a17-41ee-a140-ea36c00654b8"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 11:16:16 crc kubenswrapper[4838]: I1128 11:16:16.307908 4838 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4783327e-4a17-41ee-a140-ea36c00654b8-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 11:16:16 crc kubenswrapper[4838]: I1128 11:16:16.308398 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s82xg\" (UniqueName: \"kubernetes.io/projected/4783327e-4a17-41ee-a140-ea36c00654b8-kube-api-access-s82xg\") on node \"crc\" DevicePath \"\"" Nov 28 11:16:16 crc kubenswrapper[4838]: I1128 11:16:16.308424 4838 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4783327e-4a17-41ee-a140-ea36c00654b8-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 11:16:16 crc kubenswrapper[4838]: I1128 11:16:16.639169 4838 generic.go:334] "Generic (PLEG): container finished" podID="4783327e-4a17-41ee-a140-ea36c00654b8" containerID="8c3b3a268401df208865741170050d40f3514c0c933be22eeaee142ff9f7b3f8" exitCode=0 Nov 28 11:16:16 crc kubenswrapper[4838]: I1128 11:16:16.639234 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-kkzm5" event={"ID":"4783327e-4a17-41ee-a140-ea36c00654b8","Type":"ContainerDied","Data":"8c3b3a268401df208865741170050d40f3514c0c933be22eeaee142ff9f7b3f8"} Nov 28 11:16:16 crc kubenswrapper[4838]: I1128 11:16:16.639274 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-kkzm5" event={"ID":"4783327e-4a17-41ee-a140-ea36c00654b8","Type":"ContainerDied","Data":"518ce182fc33a19bd8d50c23ed33f4ee6ffc427620785b79d39bf397e21da91a"} Nov 28 11:16:16 crc kubenswrapper[4838]: I1128 11:16:16.639304 4838 scope.go:117] "RemoveContainer" containerID="8c3b3a268401df208865741170050d40f3514c0c933be22eeaee142ff9f7b3f8" Nov 28 11:16:16 crc kubenswrapper[4838]: I1128 11:16:16.639489 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-kkzm5" Nov 28 11:16:16 crc kubenswrapper[4838]: I1128 11:16:16.677062 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-kkzm5"] Nov 28 11:16:16 crc kubenswrapper[4838]: I1128 11:16:16.692437 4838 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-kkzm5"] Nov 28 11:16:16 crc kubenswrapper[4838]: I1128 11:16:16.695829 4838 scope.go:117] "RemoveContainer" containerID="685d1bae469344fbd87dac58931b80add3ef11a2ece467ef28ff7e06bad8f813" Nov 28 11:16:16 crc kubenswrapper[4838]: I1128 11:16:16.723923 4838 scope.go:117] "RemoveContainer" containerID="330b3086c0f665c984fbe5e2a4d22dd670f85e9c680a8cd9d41a8ccbb250bda4" Nov 28 11:16:16 crc kubenswrapper[4838]: I1128 11:16:16.774046 4838 scope.go:117] "RemoveContainer" containerID="8c3b3a268401df208865741170050d40f3514c0c933be22eeaee142ff9f7b3f8" Nov 28 11:16:16 crc kubenswrapper[4838]: E1128 11:16:16.774768 4838 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8c3b3a268401df208865741170050d40f3514c0c933be22eeaee142ff9f7b3f8\": container with ID starting with 8c3b3a268401df208865741170050d40f3514c0c933be22eeaee142ff9f7b3f8 not found: ID does not exist" containerID="8c3b3a268401df208865741170050d40f3514c0c933be22eeaee142ff9f7b3f8" Nov 28 11:16:16 crc kubenswrapper[4838]: I1128 11:16:16.774814 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8c3b3a268401df208865741170050d40f3514c0c933be22eeaee142ff9f7b3f8"} err="failed to get container status \"8c3b3a268401df208865741170050d40f3514c0c933be22eeaee142ff9f7b3f8\": rpc error: code = NotFound desc = could not find container \"8c3b3a268401df208865741170050d40f3514c0c933be22eeaee142ff9f7b3f8\": container with ID starting with 8c3b3a268401df208865741170050d40f3514c0c933be22eeaee142ff9f7b3f8 not found: ID does not exist" Nov 28 11:16:16 crc kubenswrapper[4838]: I1128 11:16:16.774843 4838 scope.go:117] "RemoveContainer" containerID="685d1bae469344fbd87dac58931b80add3ef11a2ece467ef28ff7e06bad8f813" Nov 28 11:16:16 crc kubenswrapper[4838]: E1128 11:16:16.775496 4838 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"685d1bae469344fbd87dac58931b80add3ef11a2ece467ef28ff7e06bad8f813\": container with ID starting with 685d1bae469344fbd87dac58931b80add3ef11a2ece467ef28ff7e06bad8f813 not found: ID does not exist" containerID="685d1bae469344fbd87dac58931b80add3ef11a2ece467ef28ff7e06bad8f813" Nov 28 11:16:16 crc kubenswrapper[4838]: I1128 11:16:16.776742 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"685d1bae469344fbd87dac58931b80add3ef11a2ece467ef28ff7e06bad8f813"} err="failed to get container status \"685d1bae469344fbd87dac58931b80add3ef11a2ece467ef28ff7e06bad8f813\": rpc error: code = NotFound desc = could not find container \"685d1bae469344fbd87dac58931b80add3ef11a2ece467ef28ff7e06bad8f813\": container with ID starting with 685d1bae469344fbd87dac58931b80add3ef11a2ece467ef28ff7e06bad8f813 not found: ID does not exist" Nov 28 11:16:16 crc kubenswrapper[4838]: I1128 11:16:16.776763 4838 scope.go:117] "RemoveContainer" containerID="330b3086c0f665c984fbe5e2a4d22dd670f85e9c680a8cd9d41a8ccbb250bda4" Nov 28 11:16:16 crc kubenswrapper[4838]: E1128 11:16:16.777214 4838 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"330b3086c0f665c984fbe5e2a4d22dd670f85e9c680a8cd9d41a8ccbb250bda4\": container with ID starting with 330b3086c0f665c984fbe5e2a4d22dd670f85e9c680a8cd9d41a8ccbb250bda4 not found: ID does not exist" containerID="330b3086c0f665c984fbe5e2a4d22dd670f85e9c680a8cd9d41a8ccbb250bda4" Nov 28 11:16:16 crc kubenswrapper[4838]: I1128 11:16:16.777248 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"330b3086c0f665c984fbe5e2a4d22dd670f85e9c680a8cd9d41a8ccbb250bda4"} err="failed to get container status \"330b3086c0f665c984fbe5e2a4d22dd670f85e9c680a8cd9d41a8ccbb250bda4\": rpc error: code = NotFound desc = could not find container \"330b3086c0f665c984fbe5e2a4d22dd670f85e9c680a8cd9d41a8ccbb250bda4\": container with ID starting with 330b3086c0f665c984fbe5e2a4d22dd670f85e9c680a8cd9d41a8ccbb250bda4 not found: ID does not exist" Nov 28 11:16:18 crc kubenswrapper[4838]: I1128 11:16:18.578906 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4783327e-4a17-41ee-a140-ea36c00654b8" path="/var/lib/kubelet/pods/4783327e-4a17-41ee-a140-ea36c00654b8/volumes" Nov 28 11:16:19 crc kubenswrapper[4838]: E1128 11:16:19.385388 4838 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0ef83e4f_0be2_454f_b81e_46d679fcd529.slice\": RecentStats: unable to find data in memory cache]" Nov 28 11:16:20 crc kubenswrapper[4838]: I1128 11:16:20.563107 4838 scope.go:117] "RemoveContainer" containerID="b8237eaaf27ec7883da43a41ebf9c501dd653ec36cf7eded33b05d3eea15582e" Nov 28 11:16:20 crc kubenswrapper[4838]: E1128 11:16:20.564003 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-5dxdd_openshift-machine-config-operator(5c3daa53-8c4e-4e30-aeba-146602dd45cd)\"" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" Nov 28 11:16:29 crc kubenswrapper[4838]: E1128 11:16:29.631498 4838 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0ef83e4f_0be2_454f_b81e_46d679fcd529.slice\": RecentStats: unable to find data in memory cache]" Nov 28 11:16:33 crc kubenswrapper[4838]: I1128 11:16:33.564992 4838 scope.go:117] "RemoveContainer" containerID="b8237eaaf27ec7883da43a41ebf9c501dd653ec36cf7eded33b05d3eea15582e" Nov 28 11:16:33 crc kubenswrapper[4838]: E1128 11:16:33.567501 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-5dxdd_openshift-machine-config-operator(5c3daa53-8c4e-4e30-aeba-146602dd45cd)\"" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" Nov 28 11:16:39 crc kubenswrapper[4838]: E1128 11:16:39.894874 4838 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0ef83e4f_0be2_454f_b81e_46d679fcd529.slice\": RecentStats: unable to find data in memory cache]" Nov 28 11:16:47 crc kubenswrapper[4838]: I1128 11:16:47.562946 4838 scope.go:117] "RemoveContainer" containerID="b8237eaaf27ec7883da43a41ebf9c501dd653ec36cf7eded33b05d3eea15582e" Nov 28 11:16:47 crc kubenswrapper[4838]: E1128 11:16:47.563574 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-5dxdd_openshift-machine-config-operator(5c3daa53-8c4e-4e30-aeba-146602dd45cd)\"" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" Nov 28 11:16:50 crc kubenswrapper[4838]: E1128 11:16:50.207547 4838 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0ef83e4f_0be2_454f_b81e_46d679fcd529.slice\": RecentStats: unable to find data in memory cache]" Nov 28 11:16:53 crc kubenswrapper[4838]: I1128 11:16:53.712271 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-6dfcbd5794-mx784_54957e0e-0d82-418e-9786-612dd3d121f0/barbican-api/0.log" Nov 28 11:16:53 crc kubenswrapper[4838]: I1128 11:16:53.878515 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-6dfcbd5794-mx784_54957e0e-0d82-418e-9786-612dd3d121f0/barbican-api-log/0.log" Nov 28 11:16:53 crc kubenswrapper[4838]: I1128 11:16:53.934254 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-65687b7854-r7rh5_de6a5f4a-30c6-4f42-88e7-3f113c1ed53b/barbican-keystone-listener/0.log" Nov 28 11:16:54 crc kubenswrapper[4838]: I1128 11:16:54.182126 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-6669b75dd9-q6nlg_39e38efd-bd92-419d-90e8-f6630032e7d7/barbican-worker/0.log" Nov 28 11:16:54 crc kubenswrapper[4838]: I1128 11:16:54.203288 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-6669b75dd9-q6nlg_39e38efd-bd92-419d-90e8-f6630032e7d7/barbican-worker-log/0.log" Nov 28 11:16:54 crc kubenswrapper[4838]: I1128 11:16:54.206883 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-65687b7854-r7rh5_de6a5f4a-30c6-4f42-88e7-3f113c1ed53b/barbican-keystone-listener-log/0.log" Nov 28 11:16:54 crc kubenswrapper[4838]: I1128 11:16:54.405695 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_bootstrap-edpm-deployment-openstack-edpm-ipam-lbwtr_226a6e3a-8fcf-4284-b8a5-3f4055ae9838/bootstrap-edpm-deployment-openstack-edpm-ipam/0.log" Nov 28 11:16:54 crc kubenswrapper[4838]: I1128 11:16:54.424779 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_afc12091-3d32-4b69-8e6c-29d521764b7c/ceilometer-central-agent/0.log" Nov 28 11:16:54 crc kubenswrapper[4838]: I1128 11:16:54.581382 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_afc12091-3d32-4b69-8e6c-29d521764b7c/ceilometer-notification-agent/0.log" Nov 28 11:16:54 crc kubenswrapper[4838]: I1128 11:16:54.592208 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_afc12091-3d32-4b69-8e6c-29d521764b7c/proxy-httpd/0.log" Nov 28 11:16:54 crc kubenswrapper[4838]: I1128 11:16:54.619820 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_afc12091-3d32-4b69-8e6c-29d521764b7c/sg-core/0.log" Nov 28 11:16:54 crc kubenswrapper[4838]: I1128 11:16:54.776568 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceph-client-edpm-deployment-openstack-edpm-ipam-npvhp_6ce03fb4-60ff-4aff-a42d-ad1e083b1d3a/ceph-client-edpm-deployment-openstack-edpm-ipam/0.log" Nov 28 11:16:54 crc kubenswrapper[4838]: I1128 11:16:54.806940 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-l5xjj_882b8dc3-77a7-42d9-a380-c9e27ff2a3e0/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam/0.log" Nov 28 11:16:54 crc kubenswrapper[4838]: I1128 11:16:54.975100 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_e94e5f12-61ab-40e8-97ce-dc6f3c706583/cinder-api-log/0.log" Nov 28 11:16:55 crc kubenswrapper[4838]: I1128 11:16:55.036118 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_e94e5f12-61ab-40e8-97ce-dc6f3c706583/cinder-api/0.log" Nov 28 11:16:55 crc kubenswrapper[4838]: I1128 11:16:55.333789 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-backup-0_8525f0f2-643f-4177-a4f8-12ca22b43363/cinder-backup/0.log" Nov 28 11:16:55 crc kubenswrapper[4838]: I1128 11:16:55.821085 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-backup-0_8525f0f2-643f-4177-a4f8-12ca22b43363/probe/0.log" Nov 28 11:16:55 crc kubenswrapper[4838]: I1128 11:16:55.835919 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_289f7c5f-5d1c-44fa-9231-281ed2d83e7a/cinder-scheduler/0.log" Nov 28 11:16:55 crc kubenswrapper[4838]: I1128 11:16:55.841987 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_289f7c5f-5d1c-44fa-9231-281ed2d83e7a/probe/0.log" Nov 28 11:16:56 crc kubenswrapper[4838]: I1128 11:16:56.027485 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-volume-volume1-0_5efcb033-775b-46d6-8c77-2bafc360c749/probe/0.log" Nov 28 11:16:56 crc kubenswrapper[4838]: I1128 11:16:56.101964 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-volume-volume1-0_5efcb033-775b-46d6-8c77-2bafc360c749/cinder-volume/0.log" Nov 28 11:16:56 crc kubenswrapper[4838]: I1128 11:16:56.227512 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_configure-network-edpm-deployment-openstack-edpm-ipam-b8l59_c2c8e67b-9151-44fa-b8f3-f86621d4fd67/configure-network-edpm-deployment-openstack-edpm-ipam/0.log" Nov 28 11:16:56 crc kubenswrapper[4838]: I1128 11:16:56.293703 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_configure-os-edpm-deployment-openstack-edpm-ipam-tb7vp_345f52bd-a4c3-4f71-bd23-9141bc780bfb/configure-os-edpm-deployment-openstack-edpm-ipam/0.log" Nov 28 11:16:56 crc kubenswrapper[4838]: I1128 11:16:56.421643 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-76b5fdb995-4hbv2_05ad03f2-cfab-4825-9740-5c405550e376/init/0.log" Nov 28 11:16:56 crc kubenswrapper[4838]: I1128 11:16:56.595968 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-76b5fdb995-4hbv2_05ad03f2-cfab-4825-9740-5c405550e376/init/0.log" Nov 28 11:16:56 crc kubenswrapper[4838]: I1128 11:16:56.625728 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_65afdfae-6cab-4f19-9c41-49b9409a7352/glance-httpd/0.log" Nov 28 11:16:56 crc kubenswrapper[4838]: I1128 11:16:56.630175 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-76b5fdb995-4hbv2_05ad03f2-cfab-4825-9740-5c405550e376/dnsmasq-dns/0.log" Nov 28 11:16:56 crc kubenswrapper[4838]: I1128 11:16:56.813093 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_65afdfae-6cab-4f19-9c41-49b9409a7352/glance-log/0.log" Nov 28 11:16:56 crc kubenswrapper[4838]: I1128 11:16:56.818285 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_89b4bc38-621a-4f06-acb9-a59089d304c1/glance-httpd/0.log" Nov 28 11:16:56 crc kubenswrapper[4838]: I1128 11:16:56.879131 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_89b4bc38-621a-4f06-acb9-a59089d304c1/glance-log/0.log" Nov 28 11:16:57 crc kubenswrapper[4838]: I1128 11:16:57.114278 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_install-certs-edpm-deployment-openstack-edpm-ipam-bbz46_145f5aa8-896b-4b3c-846d-e896d932097d/install-certs-edpm-deployment-openstack-edpm-ipam/0.log" Nov 28 11:16:57 crc kubenswrapper[4838]: I1128 11:16:57.124210 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_horizon-77d65cd94d-8f62l_97cbb2f0-d45e-4b75-ad50-becba9e4db9b/horizon/0.log" Nov 28 11:16:57 crc kubenswrapper[4838]: I1128 11:16:57.278543 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_horizon-77d65cd94d-8f62l_97cbb2f0-d45e-4b75-ad50-becba9e4db9b/horizon-log/0.log" Nov 28 11:16:57 crc kubenswrapper[4838]: I1128 11:16:57.825788 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_install-os-edpm-deployment-openstack-edpm-ipam-ht9vn_02540331-1ba6-45ee-824c-52e9b076f511/install-os-edpm-deployment-openstack-edpm-ipam/0.log" Nov 28 11:16:58 crc kubenswrapper[4838]: I1128 11:16:58.032653 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-cron-29405461-fclpk_b9119237-dcca-4d01-b6e2-6deddc18f8f6/keystone-cron/0.log" Nov 28 11:16:58 crc kubenswrapper[4838]: I1128 11:16:58.219040 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_kube-state-metrics-0_9d3e1aba-11d2-478c-9715-49ba175c7b03/kube-state-metrics/0.log" Nov 28 11:16:58 crc kubenswrapper[4838]: I1128 11:16:58.277302 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_libvirt-edpm-deployment-openstack-edpm-ipam-8958p_d861b650-a017-43fc-8da3-b65d8f9e8ce8/libvirt-edpm-deployment-openstack-edpm-ipam/0.log" Nov 28 11:16:58 crc kubenswrapper[4838]: I1128 11:16:58.571573 4838 scope.go:117] "RemoveContainer" containerID="b8237eaaf27ec7883da43a41ebf9c501dd653ec36cf7eded33b05d3eea15582e" Nov 28 11:16:58 crc kubenswrapper[4838]: E1128 11:16:58.571823 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-5dxdd_openshift-machine-config-operator(5c3daa53-8c4e-4e30-aeba-146602dd45cd)\"" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" Nov 28 11:16:58 crc kubenswrapper[4838]: I1128 11:16:58.755761 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_manila-scheduler-0_9b49fbfc-a8f5-48aa-bb7b-96d82967eecb/probe/0.log" Nov 28 11:16:58 crc kubenswrapper[4838]: I1128 11:16:58.884416 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-698bf66db7-q4nv6_4d77c8cd-d0c5-4bb9-84a3-e3a00f7c9a99/keystone-api/0.log" Nov 28 11:16:58 crc kubenswrapper[4838]: I1128 11:16:58.903416 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_manila-scheduler-0_9b49fbfc-a8f5-48aa-bb7b-96d82967eecb/manila-scheduler/0.log" Nov 28 11:16:58 crc kubenswrapper[4838]: I1128 11:16:58.972153 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_manila-api-0_a16e601a-3619-49a9-82d1-67129c2e2413/manila-api/0.log" Nov 28 11:16:59 crc kubenswrapper[4838]: I1128 11:16:59.224092 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_manila-api-0_a16e601a-3619-49a9-82d1-67129c2e2413/manila-api-log/0.log" Nov 28 11:16:59 crc kubenswrapper[4838]: I1128 11:16:59.389405 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_manila-share-share1-0_4697ec9a-896b-4703-87c0-84a7741b8724/probe/0.log" Nov 28 11:16:59 crc kubenswrapper[4838]: I1128 11:16:59.578777 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_manila-share-share1-0_4697ec9a-896b-4703-87c0-84a7741b8724/manila-share/0.log" Nov 28 11:16:59 crc kubenswrapper[4838]: I1128 11:16:59.668454 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-69bc8cb85-2qbr6_5f9ddfd3-3f45-40e8-a9f8-2976dd20280f/neutron-httpd/0.log" Nov 28 11:16:59 crc kubenswrapper[4838]: I1128 11:16:59.810213 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-metadata-edpm-deployment-openstack-edpm-ipam-nnxmj_777a7bbd-ba32-4b20-a263-de82be50d3b1/neutron-metadata-edpm-deployment-openstack-edpm-ipam/0.log" Nov 28 11:16:59 crc kubenswrapper[4838]: I1128 11:16:59.813081 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-69bc8cb85-2qbr6_5f9ddfd3-3f45-40e8-a9f8-2976dd20280f/neutron-api/0.log" Nov 28 11:17:00 crc kubenswrapper[4838]: I1128 11:17:00.204484 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_3305f5c4-7a09-439f-bf0f-534b3dea0b05/nova-api-log/0.log" Nov 28 11:17:00 crc kubenswrapper[4838]: I1128 11:17:00.363780 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell0-conductor-0_71625902-2a8a-4e8b-beb2-faaee7714ed2/nova-cell0-conductor-conductor/0.log" Nov 28 11:17:00 crc kubenswrapper[4838]: E1128 11:17:00.414545 4838 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0ef83e4f_0be2_454f_b81e_46d679fcd529.slice\": RecentStats: unable to find data in memory cache]" Nov 28 11:17:00 crc kubenswrapper[4838]: I1128 11:17:00.604939 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-conductor-0_88e49157-0dd0-455d-a9bd-5a13c3d95087/nova-cell1-conductor-conductor/0.log" Nov 28 11:17:00 crc kubenswrapper[4838]: I1128 11:17:00.633736 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_3305f5c4-7a09-439f-bf0f-534b3dea0b05/nova-api-api/0.log" Nov 28 11:17:00 crc kubenswrapper[4838]: I1128 11:17:00.701293 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-novncproxy-0_71d3fada-848c-4e73-ad9e-f63e8fdde48e/nova-cell1-novncproxy-novncproxy/0.log" Nov 28 11:17:00 crc kubenswrapper[4838]: I1128 11:17:00.868433 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-cv52x_54e4c0ee-74da-434c-bb61-702d4e78c663/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam/0.log" Nov 28 11:17:01 crc kubenswrapper[4838]: I1128 11:17:01.088271 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_a524de62-c36c-4abf-9a45-57247679c4e7/nova-metadata-log/0.log" Nov 28 11:17:01 crc kubenswrapper[4838]: I1128 11:17:01.255419 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_061baebe-5a1a-4090-a396-84571f88b105/mysql-bootstrap/0.log" Nov 28 11:17:01 crc kubenswrapper[4838]: I1128 11:17:01.297708 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-scheduler-0_3a05bed3-8b30-4959-9ee2-4b25a928b0e5/nova-scheduler-scheduler/0.log" Nov 28 11:17:01 crc kubenswrapper[4838]: I1128 11:17:01.537807 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_061baebe-5a1a-4090-a396-84571f88b105/galera/0.log" Nov 28 11:17:01 crc kubenswrapper[4838]: I1128 11:17:01.591586 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_061baebe-5a1a-4090-a396-84571f88b105/mysql-bootstrap/0.log" Nov 28 11:17:01 crc kubenswrapper[4838]: I1128 11:17:01.797129 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_55cfa883-d16d-4231-95e7-fd0b3ad9b702/mysql-bootstrap/0.log" Nov 28 11:17:01 crc kubenswrapper[4838]: I1128 11:17:01.955253 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_55cfa883-d16d-4231-95e7-fd0b3ad9b702/mysql-bootstrap/0.log" Nov 28 11:17:01 crc kubenswrapper[4838]: I1128 11:17:01.971613 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_55cfa883-d16d-4231-95e7-fd0b3ad9b702/galera/0.log" Nov 28 11:17:02 crc kubenswrapper[4838]: I1128 11:17:02.163056 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstackclient_b87ad3f7-9446-43b7-9141-2279794386a0/openstackclient/0.log" Nov 28 11:17:02 crc kubenswrapper[4838]: I1128 11:17:02.236822 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-dqjd8_50cdff0a-cfe5-41e1-8eed-67b23079335f/ovn-controller/0.log" Nov 28 11:17:02 crc kubenswrapper[4838]: I1128 11:17:02.447137 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-metrics-b7fh7_77ac18d8-c660-4742-8367-281a06a82e37/openstack-network-exporter/0.log" Nov 28 11:17:02 crc kubenswrapper[4838]: I1128 11:17:02.591473 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-zt4fz_4cb6513a-e07a-40b9-a3ad-f147b8b4a96d/ovsdb-server-init/0.log" Nov 28 11:17:02 crc kubenswrapper[4838]: I1128 11:17:02.756792 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_a524de62-c36c-4abf-9a45-57247679c4e7/nova-metadata-metadata/0.log" Nov 28 11:17:02 crc kubenswrapper[4838]: I1128 11:17:02.801102 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-zt4fz_4cb6513a-e07a-40b9-a3ad-f147b8b4a96d/ovs-vswitchd/0.log" Nov 28 11:17:02 crc kubenswrapper[4838]: I1128 11:17:02.815442 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-zt4fz_4cb6513a-e07a-40b9-a3ad-f147b8b4a96d/ovsdb-server/0.log" Nov 28 11:17:02 crc kubenswrapper[4838]: I1128 11:17:02.818417 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-zt4fz_4cb6513a-e07a-40b9-a3ad-f147b8b4a96d/ovsdb-server-init/0.log" Nov 28 11:17:03 crc kubenswrapper[4838]: I1128 11:17:03.038697 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_d62996a9-1816-49c7-9280-f115770a83ad/openstack-network-exporter/0.log" Nov 28 11:17:03 crc kubenswrapper[4838]: I1128 11:17:03.075450 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-edpm-deployment-openstack-edpm-ipam-49cxw_e2bfe003-bf8c-444c-97ea-57a3b0a1c4ae/ovn-edpm-deployment-openstack-edpm-ipam/0.log" Nov 28 11:17:03 crc kubenswrapper[4838]: I1128 11:17:03.182874 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_d62996a9-1816-49c7-9280-f115770a83ad/ovn-northd/0.log" Nov 28 11:17:03 crc kubenswrapper[4838]: I1128 11:17:03.255501 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_20476ab2-8070-42b3-a05c-d2c07c111ea9/openstack-network-exporter/0.log" Nov 28 11:17:03 crc kubenswrapper[4838]: I1128 11:17:03.387582 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_20476ab2-8070-42b3-a05c-d2c07c111ea9/ovsdbserver-nb/0.log" Nov 28 11:17:03 crc kubenswrapper[4838]: I1128 11:17:03.462951 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_c2037801-5d52-402d-9d8c-4b17928fb33a/openstack-network-exporter/0.log" Nov 28 11:17:03 crc kubenswrapper[4838]: I1128 11:17:03.502684 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_c2037801-5d52-402d-9d8c-4b17928fb33a/ovsdbserver-sb/0.log" Nov 28 11:17:03 crc kubenswrapper[4838]: I1128 11:17:03.775096 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-7f5f5f8b64-f2wff_f4ad309b-1078-40e9-abd7-d1b476971fce/placement-api/0.log" Nov 28 11:17:03 crc kubenswrapper[4838]: I1128 11:17:03.842122 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-7f5f5f8b64-f2wff_f4ad309b-1078-40e9-abd7-d1b476971fce/placement-log/0.log" Nov 28 11:17:03 crc kubenswrapper[4838]: I1128 11:17:03.929171 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_091769fb-bf67-454a-b0da-3e33589799f9/setup-container/0.log" Nov 28 11:17:04 crc kubenswrapper[4838]: I1128 11:17:04.096376 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_091769fb-bf67-454a-b0da-3e33589799f9/setup-container/0.log" Nov 28 11:17:04 crc kubenswrapper[4838]: I1128 11:17:04.168324 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_091769fb-bf67-454a-b0da-3e33589799f9/rabbitmq/0.log" Nov 28 11:17:04 crc kubenswrapper[4838]: I1128 11:17:04.220670 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_969e66ab-e24e-4a63-9543-8214980ccbe3/setup-container/0.log" Nov 28 11:17:04 crc kubenswrapper[4838]: I1128 11:17:04.371246 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_969e66ab-e24e-4a63-9543-8214980ccbe3/setup-container/0.log" Nov 28 11:17:04 crc kubenswrapper[4838]: I1128 11:17:04.452254 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_969e66ab-e24e-4a63-9543-8214980ccbe3/rabbitmq/0.log" Nov 28 11:17:04 crc kubenswrapper[4838]: I1128 11:17:04.571581 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_reboot-os-edpm-deployment-openstack-edpm-ipam-p5n7c_801ebab7-c9de-423e-910d-32e56be5cc7b/reboot-os-edpm-deployment-openstack-edpm-ipam/0.log" Nov 28 11:17:04 crc kubenswrapper[4838]: I1128 11:17:04.678470 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_repo-setup-edpm-deployment-openstack-edpm-ipam-sjch6_277c54af-c091-426d-bf0d-523eca9b41fb/repo-setup-edpm-deployment-openstack-edpm-ipam/0.log" Nov 28 11:17:04 crc kubenswrapper[4838]: I1128 11:17:04.853865 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_run-os-edpm-deployment-openstack-edpm-ipam-f8h25_ea469a90-76a4-4712-90a7-af038c331ee2/run-os-edpm-deployment-openstack-edpm-ipam/0.log" Nov 28 11:17:05 crc kubenswrapper[4838]: I1128 11:17:05.065949 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ssh-known-hosts-edpm-deployment-zfmqh_fcf22c85-ed90-47d9-9131-365fa3620686/ssh-known-hosts-edpm-deployment/0.log" Nov 28 11:17:05 crc kubenswrapper[4838]: I1128 11:17:05.121997 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_tempest-tests-tempest_7899bfa9-2025-457b-9c46-194188b7f52e/tempest-tests-tempest-tests-runner/0.log" Nov 28 11:17:05 crc kubenswrapper[4838]: I1128 11:17:05.228421 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_test-operator-logs-pod-tempest-tempest-tests-tempest_9d260724-3275-4670-966b-0c961bf54da5/test-operator-logs-container/0.log" Nov 28 11:17:05 crc kubenswrapper[4838]: I1128 11:17:05.352002 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_validate-network-edpm-deployment-openstack-edpm-ipam-mrkhc_d1fe48d6-01d5-4805-8359-921c9b8888a4/validate-network-edpm-deployment-openstack-edpm-ipam/0.log" Nov 28 11:17:09 crc kubenswrapper[4838]: I1128 11:17:09.561619 4838 scope.go:117] "RemoveContainer" containerID="b8237eaaf27ec7883da43a41ebf9c501dd653ec36cf7eded33b05d3eea15582e" Nov 28 11:17:09 crc kubenswrapper[4838]: E1128 11:17:09.562443 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-5dxdd_openshift-machine-config-operator(5c3daa53-8c4e-4e30-aeba-146602dd45cd)\"" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" Nov 28 11:17:21 crc kubenswrapper[4838]: I1128 11:17:21.932925 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_memcached-0_09948818-c683-4cab-ab8e-c4bfa54809a1/memcached/0.log" Nov 28 11:17:22 crc kubenswrapper[4838]: I1128 11:17:22.562189 4838 scope.go:117] "RemoveContainer" containerID="b8237eaaf27ec7883da43a41ebf9c501dd653ec36cf7eded33b05d3eea15582e" Nov 28 11:17:22 crc kubenswrapper[4838]: E1128 11:17:22.563132 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-5dxdd_openshift-machine-config-operator(5c3daa53-8c4e-4e30-aeba-146602dd45cd)\"" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" Nov 28 11:17:34 crc kubenswrapper[4838]: I1128 11:17:34.562465 4838 scope.go:117] "RemoveContainer" containerID="b8237eaaf27ec7883da43a41ebf9c501dd653ec36cf7eded33b05d3eea15582e" Nov 28 11:17:34 crc kubenswrapper[4838]: E1128 11:17:34.563186 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-5dxdd_openshift-machine-config-operator(5c3daa53-8c4e-4e30-aeba-146602dd45cd)\"" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" Nov 28 11:17:35 crc kubenswrapper[4838]: I1128 11:17:35.875319 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_703c3d28e1f0f4fdc243d52db993b95e20d3eb98fc494e62d3c67e00b725j9s_b2da9f3e-de9a-430d-887e-6b75fa6133d1/util/0.log" Nov 28 11:17:36 crc kubenswrapper[4838]: I1128 11:17:36.032215 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_703c3d28e1f0f4fdc243d52db993b95e20d3eb98fc494e62d3c67e00b725j9s_b2da9f3e-de9a-430d-887e-6b75fa6133d1/util/0.log" Nov 28 11:17:36 crc kubenswrapper[4838]: I1128 11:17:36.063101 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_703c3d28e1f0f4fdc243d52db993b95e20d3eb98fc494e62d3c67e00b725j9s_b2da9f3e-de9a-430d-887e-6b75fa6133d1/pull/0.log" Nov 28 11:17:36 crc kubenswrapper[4838]: I1128 11:17:36.080791 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_703c3d28e1f0f4fdc243d52db993b95e20d3eb98fc494e62d3c67e00b725j9s_b2da9f3e-de9a-430d-887e-6b75fa6133d1/pull/0.log" Nov 28 11:17:36 crc kubenswrapper[4838]: I1128 11:17:36.241443 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_703c3d28e1f0f4fdc243d52db993b95e20d3eb98fc494e62d3c67e00b725j9s_b2da9f3e-de9a-430d-887e-6b75fa6133d1/util/0.log" Nov 28 11:17:36 crc kubenswrapper[4838]: I1128 11:17:36.241833 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_703c3d28e1f0f4fdc243d52db993b95e20d3eb98fc494e62d3c67e00b725j9s_b2da9f3e-de9a-430d-887e-6b75fa6133d1/pull/0.log" Nov 28 11:17:36 crc kubenswrapper[4838]: I1128 11:17:36.274388 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_703c3d28e1f0f4fdc243d52db993b95e20d3eb98fc494e62d3c67e00b725j9s_b2da9f3e-de9a-430d-887e-6b75fa6133d1/extract/0.log" Nov 28 11:17:36 crc kubenswrapper[4838]: I1128 11:17:36.450524 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-7b64f4fb85-zsv5q_2d42b4ea-468b-482a-8d06-57d2cd7d40f0/kube-rbac-proxy/0.log" Nov 28 11:17:36 crc kubenswrapper[4838]: I1128 11:17:36.477213 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-7b64f4fb85-zsv5q_2d42b4ea-468b-482a-8d06-57d2cd7d40f0/manager/0.log" Nov 28 11:17:36 crc kubenswrapper[4838]: I1128 11:17:36.511129 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-6b7f75547b-z8m7s_458f4354-42e8-46d1-a571-0a0d1a852574/kube-rbac-proxy/0.log" Nov 28 11:17:36 crc kubenswrapper[4838]: I1128 11:17:36.677706 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-6b7f75547b-z8m7s_458f4354-42e8-46d1-a571-0a0d1a852574/manager/0.log" Nov 28 11:17:36 crc kubenswrapper[4838]: I1128 11:17:36.697954 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-955677c94-jtm69_aceca209-5955-4644-a139-2dfc5d36bb48/kube-rbac-proxy/0.log" Nov 28 11:17:36 crc kubenswrapper[4838]: I1128 11:17:36.704270 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-955677c94-jtm69_aceca209-5955-4644-a139-2dfc5d36bb48/manager/0.log" Nov 28 11:17:36 crc kubenswrapper[4838]: I1128 11:17:36.856933 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-589cbd6b5b-sgt4v_20706334-3560-47c0-beee-0eacda6e2eeb/kube-rbac-proxy/0.log" Nov 28 11:17:36 crc kubenswrapper[4838]: I1128 11:17:36.977226 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-589cbd6b5b-sgt4v_20706334-3560-47c0-beee-0eacda6e2eeb/manager/0.log" Nov 28 11:17:37 crc kubenswrapper[4838]: I1128 11:17:37.067930 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-5b77f656f-mfvxw_bfc8796f-9498-4707-ae79-225de0c3d39f/kube-rbac-proxy/0.log" Nov 28 11:17:37 crc kubenswrapper[4838]: I1128 11:17:37.093474 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-5b77f656f-mfvxw_bfc8796f-9498-4707-ae79-225de0c3d39f/manager/0.log" Nov 28 11:17:37 crc kubenswrapper[4838]: I1128 11:17:37.185214 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-5d494799bf-99pnl_f79a2f03-43b3-47d0-89f8-55374a730a22/kube-rbac-proxy/0.log" Nov 28 11:17:37 crc kubenswrapper[4838]: I1128 11:17:37.275471 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-5d494799bf-99pnl_f79a2f03-43b3-47d0-89f8-55374a730a22/manager/0.log" Nov 28 11:17:37 crc kubenswrapper[4838]: I1128 11:17:37.326138 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-57548d458d-7qwxs_64a7b90b-6294-429b-b7f8-7820d9a5514e/kube-rbac-proxy/0.log" Nov 28 11:17:37 crc kubenswrapper[4838]: I1128 11:17:37.512636 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-67cb4dc6d4-dldwc_00ae7528-ac6c-4ceb-9e8f-80e588aced3d/kube-rbac-proxy/0.log" Nov 28 11:17:37 crc kubenswrapper[4838]: I1128 11:17:37.556950 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-57548d458d-7qwxs_64a7b90b-6294-429b-b7f8-7820d9a5514e/manager/0.log" Nov 28 11:17:37 crc kubenswrapper[4838]: I1128 11:17:37.570500 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-67cb4dc6d4-dldwc_00ae7528-ac6c-4ceb-9e8f-80e588aced3d/manager/0.log" Nov 28 11:17:37 crc kubenswrapper[4838]: I1128 11:17:37.663453 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-7b4567c7cf-qt6hp_f9825746-5143-4716-9458-aad44231b721/kube-rbac-proxy/0.log" Nov 28 11:17:37 crc kubenswrapper[4838]: I1128 11:17:37.785933 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-7b4567c7cf-qt6hp_f9825746-5143-4716-9458-aad44231b721/manager/0.log" Nov 28 11:17:37 crc kubenswrapper[4838]: I1128 11:17:37.856259 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-67764766d7-5dcgh_d8596d98-979f-4b13-bef4-ccaabbcf155e/kube-rbac-proxy/0.log" Nov 28 11:17:37 crc kubenswrapper[4838]: I1128 11:17:37.921183 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-67764766d7-5dcgh_d8596d98-979f-4b13-bef4-ccaabbcf155e/manager/0.log" Nov 28 11:17:37 crc kubenswrapper[4838]: I1128 11:17:37.962004 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-66f4dd4bc7-lppm4_aef566ed-5e8f-4ce9-9fa4-75bfef26a65e/kube-rbac-proxy/0.log" Nov 28 11:17:38 crc kubenswrapper[4838]: I1128 11:17:38.040850 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-66f4dd4bc7-lppm4_aef566ed-5e8f-4ce9-9fa4-75bfef26a65e/manager/0.log" Nov 28 11:17:38 crc kubenswrapper[4838]: I1128 11:17:38.107875 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-6fdcddb789-nw7wx_03bb7fb2-31ae-4e18-b77d-e6dad8007460/kube-rbac-proxy/0.log" Nov 28 11:17:38 crc kubenswrapper[4838]: I1128 11:17:38.219636 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-6fdcddb789-nw7wx_03bb7fb2-31ae-4e18-b77d-e6dad8007460/manager/0.log" Nov 28 11:17:38 crc kubenswrapper[4838]: I1128 11:17:38.363091 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-79556f57fc-zrl6r_90e6e6d2-fd36-40e9-9002-d3a5e4c53f4e/kube-rbac-proxy/0.log" Nov 28 11:17:38 crc kubenswrapper[4838]: I1128 11:17:38.448392 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-79556f57fc-zrl6r_90e6e6d2-fd36-40e9-9002-d3a5e4c53f4e/manager/0.log" Nov 28 11:17:38 crc kubenswrapper[4838]: I1128 11:17:38.477449 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-64cdc6ff96-tjbnw_40466f6c-03c1-4aa4-9d03-947168f4068c/kube-rbac-proxy/0.log" Nov 28 11:17:38 crc kubenswrapper[4838]: I1128 11:17:38.550371 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-64cdc6ff96-tjbnw_40466f6c-03c1-4aa4-9d03-947168f4068c/manager/0.log" Nov 28 11:17:38 crc kubenswrapper[4838]: I1128 11:17:38.621736 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-5fcdb54b6bpf8p6_dd6ab766-6c66-4d3e-8089-9fe2faf6a28a/kube-rbac-proxy/0.log" Nov 28 11:17:38 crc kubenswrapper[4838]: I1128 11:17:38.687651 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-5fcdb54b6bpf8p6_dd6ab766-6c66-4d3e-8089-9fe2faf6a28a/manager/0.log" Nov 28 11:17:39 crc kubenswrapper[4838]: I1128 11:17:39.054538 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-index-8j4q4_c89c4ded-08e4-4d58-bb51-0f0e997b76a6/registry-server/0.log" Nov 28 11:17:39 crc kubenswrapper[4838]: I1128 11:17:39.059398 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-operator-5d66f99678-q8k4k_50baf569-8340-4264-8e08-28049728c9ad/operator/0.log" Nov 28 11:17:39 crc kubenswrapper[4838]: I1128 11:17:39.227797 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-56897c768d-jk9v2_eb245c1e-92f6-486e-be63-0093a22ed7b0/kube-rbac-proxy/0.log" Nov 28 11:17:39 crc kubenswrapper[4838]: I1128 11:17:39.395227 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-56897c768d-jk9v2_eb245c1e-92f6-486e-be63-0093a22ed7b0/manager/0.log" Nov 28 11:17:39 crc kubenswrapper[4838]: I1128 11:17:39.437076 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-57988cc5b5-t8tj5_f74a2c89-ae8d-428e-b8b2-d2d58e943f8e/kube-rbac-proxy/0.log" Nov 28 11:17:39 crc kubenswrapper[4838]: I1128 11:17:39.596502 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-57988cc5b5-t8tj5_f74a2c89-ae8d-428e-b8b2-d2d58e943f8e/manager/0.log" Nov 28 11:17:39 crc kubenswrapper[4838]: I1128 11:17:39.689367 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_rabbitmq-cluster-operator-manager-668c99d594-xj7cz_6979c02b-5bc1-4eec-aa05-086f449ffd93/operator/0.log" Nov 28 11:17:39 crc kubenswrapper[4838]: I1128 11:17:39.835515 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-d77b94747-2mxtr_fb9e8fa1-8798-424a-a435-daae465a8e79/manager/0.log" Nov 28 11:17:39 crc kubenswrapper[4838]: I1128 11:17:39.849563 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-d77b94747-2mxtr_fb9e8fa1-8798-424a-a435-daae465a8e79/kube-rbac-proxy/0.log" Nov 28 11:17:39 crc kubenswrapper[4838]: I1128 11:17:39.925738 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-76cc84c6bb-2xswp_5f3be357-f971-4dd1-bb7e-82098aaad7b4/kube-rbac-proxy/0.log" Nov 28 11:17:40 crc kubenswrapper[4838]: I1128 11:17:40.014523 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-manager-8599fd59b5-m2r97_397a8728-e2ae-4db0-8446-9044007df4e1/manager/0.log" Nov 28 11:17:40 crc kubenswrapper[4838]: I1128 11:17:40.141536 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-5cd6c7f4c8-jrtsw_7b19f93b-ae7d-4e10-acce-53f0c65bbce0/kube-rbac-proxy/0.log" Nov 28 11:17:40 crc kubenswrapper[4838]: I1128 11:17:40.182969 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-5cd6c7f4c8-jrtsw_7b19f93b-ae7d-4e10-acce-53f0c65bbce0/manager/0.log" Nov 28 11:17:40 crc kubenswrapper[4838]: I1128 11:17:40.350383 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-656dcb59d4-pqwfn_431c4d08-781a-4925-96cd-153997f72239/kube-rbac-proxy/0.log" Nov 28 11:17:40 crc kubenswrapper[4838]: I1128 11:17:40.469561 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-656dcb59d4-pqwfn_431c4d08-781a-4925-96cd-153997f72239/manager/0.log" Nov 28 11:17:41 crc kubenswrapper[4838]: I1128 11:17:41.065864 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-76cc84c6bb-2xswp_5f3be357-f971-4dd1-bb7e-82098aaad7b4/manager/0.log" Nov 28 11:17:47 crc kubenswrapper[4838]: I1128 11:17:47.562551 4838 scope.go:117] "RemoveContainer" containerID="b8237eaaf27ec7883da43a41ebf9c501dd653ec36cf7eded33b05d3eea15582e" Nov 28 11:17:47 crc kubenswrapper[4838]: E1128 11:17:47.563292 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-5dxdd_openshift-machine-config-operator(5c3daa53-8c4e-4e30-aeba-146602dd45cd)\"" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" Nov 28 11:18:00 crc kubenswrapper[4838]: I1128 11:18:00.562603 4838 scope.go:117] "RemoveContainer" containerID="b8237eaaf27ec7883da43a41ebf9c501dd653ec36cf7eded33b05d3eea15582e" Nov 28 11:18:00 crc kubenswrapper[4838]: E1128 11:18:00.563399 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-5dxdd_openshift-machine-config-operator(5c3daa53-8c4e-4e30-aeba-146602dd45cd)\"" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" Nov 28 11:18:01 crc kubenswrapper[4838]: I1128 11:18:01.142474 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_control-plane-machine-set-operator-78cbb6b69f-vrzwk_502acc2d-a5e3-4240-b2fb-7f67b7518b82/control-plane-machine-set-operator/0.log" Nov 28 11:18:01 crc kubenswrapper[4838]: I1128 11:18:01.324689 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-bs85m_fb0f7dc9-74c6-4031-8edb-7b10c219df34/kube-rbac-proxy/0.log" Nov 28 11:18:01 crc kubenswrapper[4838]: I1128 11:18:01.380565 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-bs85m_fb0f7dc9-74c6-4031-8edb-7b10c219df34/machine-api-operator/0.log" Nov 28 11:18:11 crc kubenswrapper[4838]: I1128 11:18:11.561877 4838 scope.go:117] "RemoveContainer" containerID="b8237eaaf27ec7883da43a41ebf9c501dd653ec36cf7eded33b05d3eea15582e" Nov 28 11:18:11 crc kubenswrapper[4838]: E1128 11:18:11.562913 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-5dxdd_openshift-machine-config-operator(5c3daa53-8c4e-4e30-aeba-146602dd45cd)\"" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" Nov 28 11:18:14 crc kubenswrapper[4838]: I1128 11:18:14.923229 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-5b446d88c5-gcqpn_e850d813-cc68-49bd-aa4d-ab3271b36d41/cert-manager-controller/0.log" Nov 28 11:18:15 crc kubenswrapper[4838]: I1128 11:18:15.029706 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-cainjector-7f985d654d-pc7lt_34c85aea-53ac-4f8b-b4b7-a5262768ea9a/cert-manager-cainjector/0.log" Nov 28 11:18:15 crc kubenswrapper[4838]: I1128 11:18:15.091458 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-webhook-5655c58dd6-zldj2_863773f7-97a9-4bcc-8c5d-86b5533f1c6b/cert-manager-webhook/0.log" Nov 28 11:18:26 crc kubenswrapper[4838]: I1128 11:18:26.563279 4838 scope.go:117] "RemoveContainer" containerID="b8237eaaf27ec7883da43a41ebf9c501dd653ec36cf7eded33b05d3eea15582e" Nov 28 11:18:26 crc kubenswrapper[4838]: E1128 11:18:26.564174 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-5dxdd_openshift-machine-config-operator(5c3daa53-8c4e-4e30-aeba-146602dd45cd)\"" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" Nov 28 11:18:28 crc kubenswrapper[4838]: I1128 11:18:28.503298 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-console-plugin-7fbb5f6569-qvrkm_2205c062-150d-43c9-8e91-9187c92a1908/nmstate-console-plugin/0.log" Nov 28 11:18:28 crc kubenswrapper[4838]: I1128 11:18:28.712760 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-handler-sgkvp_7d076e50-6cc5-4258-b334-faa9d4f1a3b4/nmstate-handler/0.log" Nov 28 11:18:28 crc kubenswrapper[4838]: I1128 11:18:28.722703 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-7f946cbc9-hrdrc_1190df35-2195-49c1-abb5-1a5e11626ec4/kube-rbac-proxy/0.log" Nov 28 11:18:28 crc kubenswrapper[4838]: I1128 11:18:28.768476 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-7f946cbc9-hrdrc_1190df35-2195-49c1-abb5-1a5e11626ec4/nmstate-metrics/0.log" Nov 28 11:18:28 crc kubenswrapper[4838]: I1128 11:18:28.936910 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-operator-5b5b58f5c8-9n28j_8726f2de-449f-4a3d-ae20-cf2e1f14abe2/nmstate-operator/0.log" Nov 28 11:18:29 crc kubenswrapper[4838]: I1128 11:18:29.020551 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-webhook-5f6d4c5ccb-pf4kk_27f91860-f5ee-4232-b298-bf97137a1d12/nmstate-webhook/0.log" Nov 28 11:18:41 crc kubenswrapper[4838]: I1128 11:18:41.563350 4838 scope.go:117] "RemoveContainer" containerID="b8237eaaf27ec7883da43a41ebf9c501dd653ec36cf7eded33b05d3eea15582e" Nov 28 11:18:41 crc kubenswrapper[4838]: E1128 11:18:41.564648 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-5dxdd_openshift-machine-config-operator(5c3daa53-8c4e-4e30-aeba-146602dd45cd)\"" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" Nov 28 11:18:45 crc kubenswrapper[4838]: I1128 11:18:45.595611 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-f8648f98b-p4xws_ef168536-335c-417d-b9d5-7dc2affb0b62/kube-rbac-proxy/0.log" Nov 28 11:18:45 crc kubenswrapper[4838]: I1128 11:18:45.724159 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-f8648f98b-p4xws_ef168536-335c-417d-b9d5-7dc2affb0b62/controller/0.log" Nov 28 11:18:45 crc kubenswrapper[4838]: I1128 11:18:45.848435 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-hdb2b_9e6ea3f2-6774-4bbb-b0fc-fcb8b6106b53/cp-frr-files/0.log" Nov 28 11:18:45 crc kubenswrapper[4838]: I1128 11:18:45.962820 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-hdb2b_9e6ea3f2-6774-4bbb-b0fc-fcb8b6106b53/cp-frr-files/0.log" Nov 28 11:18:46 crc kubenswrapper[4838]: I1128 11:18:46.001830 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-hdb2b_9e6ea3f2-6774-4bbb-b0fc-fcb8b6106b53/cp-reloader/0.log" Nov 28 11:18:46 crc kubenswrapper[4838]: I1128 11:18:46.015428 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-hdb2b_9e6ea3f2-6774-4bbb-b0fc-fcb8b6106b53/cp-metrics/0.log" Nov 28 11:18:46 crc kubenswrapper[4838]: I1128 11:18:46.050651 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-hdb2b_9e6ea3f2-6774-4bbb-b0fc-fcb8b6106b53/cp-reloader/0.log" Nov 28 11:18:46 crc kubenswrapper[4838]: I1128 11:18:46.228289 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-hdb2b_9e6ea3f2-6774-4bbb-b0fc-fcb8b6106b53/cp-frr-files/0.log" Nov 28 11:18:46 crc kubenswrapper[4838]: I1128 11:18:46.236402 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-hdb2b_9e6ea3f2-6774-4bbb-b0fc-fcb8b6106b53/cp-reloader/0.log" Nov 28 11:18:46 crc kubenswrapper[4838]: I1128 11:18:46.275482 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-hdb2b_9e6ea3f2-6774-4bbb-b0fc-fcb8b6106b53/cp-metrics/0.log" Nov 28 11:18:46 crc kubenswrapper[4838]: I1128 11:18:46.299545 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-hdb2b_9e6ea3f2-6774-4bbb-b0fc-fcb8b6106b53/cp-metrics/0.log" Nov 28 11:18:46 crc kubenswrapper[4838]: I1128 11:18:46.418992 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-hdb2b_9e6ea3f2-6774-4bbb-b0fc-fcb8b6106b53/cp-frr-files/0.log" Nov 28 11:18:46 crc kubenswrapper[4838]: I1128 11:18:46.428522 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-hdb2b_9e6ea3f2-6774-4bbb-b0fc-fcb8b6106b53/cp-metrics/0.log" Nov 28 11:18:46 crc kubenswrapper[4838]: I1128 11:18:46.461662 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-hdb2b_9e6ea3f2-6774-4bbb-b0fc-fcb8b6106b53/cp-reloader/0.log" Nov 28 11:18:46 crc kubenswrapper[4838]: I1128 11:18:46.485640 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-hdb2b_9e6ea3f2-6774-4bbb-b0fc-fcb8b6106b53/controller/0.log" Nov 28 11:18:46 crc kubenswrapper[4838]: I1128 11:18:46.648484 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-hdb2b_9e6ea3f2-6774-4bbb-b0fc-fcb8b6106b53/frr-metrics/0.log" Nov 28 11:18:46 crc kubenswrapper[4838]: I1128 11:18:46.677008 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-hdb2b_9e6ea3f2-6774-4bbb-b0fc-fcb8b6106b53/kube-rbac-proxy-frr/0.log" Nov 28 11:18:46 crc kubenswrapper[4838]: I1128 11:18:46.679591 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-hdb2b_9e6ea3f2-6774-4bbb-b0fc-fcb8b6106b53/kube-rbac-proxy/0.log" Nov 28 11:18:46 crc kubenswrapper[4838]: I1128 11:18:46.836820 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-hdb2b_9e6ea3f2-6774-4bbb-b0fc-fcb8b6106b53/reloader/0.log" Nov 28 11:18:46 crc kubenswrapper[4838]: I1128 11:18:46.860047 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-webhook-server-7fcb986d4-8frbg_8ef825e1-e9dc-4a29-94cd-722613098926/frr-k8s-webhook-server/0.log" Nov 28 11:18:47 crc kubenswrapper[4838]: I1128 11:18:47.050431 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-controller-manager-84ddbcdd65-dqrq8_3eb34f58-6fc9-4220-87a6-74090d0f1874/manager/0.log" Nov 28 11:18:47 crc kubenswrapper[4838]: I1128 11:18:47.219072 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-webhook-server-5bf7d9b684-tdkmz_4588ca95-c23d-4709-8160-ab77a17f858d/webhook-server/0.log" Nov 28 11:18:47 crc kubenswrapper[4838]: I1128 11:18:47.341408 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-xrbxd_7f764b03-48a1-45af-b406-40c60c1e912c/kube-rbac-proxy/0.log" Nov 28 11:18:47 crc kubenswrapper[4838]: I1128 11:18:47.819367 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-xrbxd_7f764b03-48a1-45af-b406-40c60c1e912c/speaker/0.log" Nov 28 11:18:48 crc kubenswrapper[4838]: I1128 11:18:48.213623 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-hdb2b_9e6ea3f2-6774-4bbb-b0fc-fcb8b6106b53/frr/0.log" Nov 28 11:18:52 crc kubenswrapper[4838]: I1128 11:18:52.950427 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-x8rmj"] Nov 28 11:18:52 crc kubenswrapper[4838]: E1128 11:18:52.951513 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4783327e-4a17-41ee-a140-ea36c00654b8" containerName="extract-content" Nov 28 11:18:52 crc kubenswrapper[4838]: I1128 11:18:52.951530 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="4783327e-4a17-41ee-a140-ea36c00654b8" containerName="extract-content" Nov 28 11:18:52 crc kubenswrapper[4838]: E1128 11:18:52.951552 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4783327e-4a17-41ee-a140-ea36c00654b8" containerName="extract-utilities" Nov 28 11:18:52 crc kubenswrapper[4838]: I1128 11:18:52.951560 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="4783327e-4a17-41ee-a140-ea36c00654b8" containerName="extract-utilities" Nov 28 11:18:52 crc kubenswrapper[4838]: E1128 11:18:52.951586 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4783327e-4a17-41ee-a140-ea36c00654b8" containerName="registry-server" Nov 28 11:18:52 crc kubenswrapper[4838]: I1128 11:18:52.951593 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="4783327e-4a17-41ee-a140-ea36c00654b8" containerName="registry-server" Nov 28 11:18:52 crc kubenswrapper[4838]: E1128 11:18:52.951607 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0ef83e4f-0be2-454f-b81e-46d679fcd529" containerName="container-00" Nov 28 11:18:52 crc kubenswrapper[4838]: I1128 11:18:52.951614 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="0ef83e4f-0be2-454f-b81e-46d679fcd529" containerName="container-00" Nov 28 11:18:52 crc kubenswrapper[4838]: I1128 11:18:52.951831 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="0ef83e4f-0be2-454f-b81e-46d679fcd529" containerName="container-00" Nov 28 11:18:52 crc kubenswrapper[4838]: I1128 11:18:52.951861 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="4783327e-4a17-41ee-a140-ea36c00654b8" containerName="registry-server" Nov 28 11:18:52 crc kubenswrapper[4838]: I1128 11:18:52.957707 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-x8rmj" Nov 28 11:18:52 crc kubenswrapper[4838]: I1128 11:18:52.970439 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-x8rmj"] Nov 28 11:18:53 crc kubenswrapper[4838]: I1128 11:18:53.034251 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6907aaf9-441d-4253-a302-1939b127083b-utilities\") pod \"community-operators-x8rmj\" (UID: \"6907aaf9-441d-4253-a302-1939b127083b\") " pod="openshift-marketplace/community-operators-x8rmj" Nov 28 11:18:53 crc kubenswrapper[4838]: I1128 11:18:53.034411 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-djf6r\" (UniqueName: \"kubernetes.io/projected/6907aaf9-441d-4253-a302-1939b127083b-kube-api-access-djf6r\") pod \"community-operators-x8rmj\" (UID: \"6907aaf9-441d-4253-a302-1939b127083b\") " pod="openshift-marketplace/community-operators-x8rmj" Nov 28 11:18:53 crc kubenswrapper[4838]: I1128 11:18:53.034469 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6907aaf9-441d-4253-a302-1939b127083b-catalog-content\") pod \"community-operators-x8rmj\" (UID: \"6907aaf9-441d-4253-a302-1939b127083b\") " pod="openshift-marketplace/community-operators-x8rmj" Nov 28 11:18:53 crc kubenswrapper[4838]: I1128 11:18:53.135786 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6907aaf9-441d-4253-a302-1939b127083b-utilities\") pod \"community-operators-x8rmj\" (UID: \"6907aaf9-441d-4253-a302-1939b127083b\") " pod="openshift-marketplace/community-operators-x8rmj" Nov 28 11:18:53 crc kubenswrapper[4838]: I1128 11:18:53.135873 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-djf6r\" (UniqueName: \"kubernetes.io/projected/6907aaf9-441d-4253-a302-1939b127083b-kube-api-access-djf6r\") pod \"community-operators-x8rmj\" (UID: \"6907aaf9-441d-4253-a302-1939b127083b\") " pod="openshift-marketplace/community-operators-x8rmj" Nov 28 11:18:53 crc kubenswrapper[4838]: I1128 11:18:53.135919 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6907aaf9-441d-4253-a302-1939b127083b-catalog-content\") pod \"community-operators-x8rmj\" (UID: \"6907aaf9-441d-4253-a302-1939b127083b\") " pod="openshift-marketplace/community-operators-x8rmj" Nov 28 11:18:53 crc kubenswrapper[4838]: I1128 11:18:53.136383 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6907aaf9-441d-4253-a302-1939b127083b-utilities\") pod \"community-operators-x8rmj\" (UID: \"6907aaf9-441d-4253-a302-1939b127083b\") " pod="openshift-marketplace/community-operators-x8rmj" Nov 28 11:18:53 crc kubenswrapper[4838]: I1128 11:18:53.136449 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6907aaf9-441d-4253-a302-1939b127083b-catalog-content\") pod \"community-operators-x8rmj\" (UID: \"6907aaf9-441d-4253-a302-1939b127083b\") " pod="openshift-marketplace/community-operators-x8rmj" Nov 28 11:18:53 crc kubenswrapper[4838]: I1128 11:18:53.155386 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-djf6r\" (UniqueName: \"kubernetes.io/projected/6907aaf9-441d-4253-a302-1939b127083b-kube-api-access-djf6r\") pod \"community-operators-x8rmj\" (UID: \"6907aaf9-441d-4253-a302-1939b127083b\") " pod="openshift-marketplace/community-operators-x8rmj" Nov 28 11:18:53 crc kubenswrapper[4838]: I1128 11:18:53.279052 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-x8rmj" Nov 28 11:18:53 crc kubenswrapper[4838]: I1128 11:18:53.564748 4838 scope.go:117] "RemoveContainer" containerID="b8237eaaf27ec7883da43a41ebf9c501dd653ec36cf7eded33b05d3eea15582e" Nov 28 11:18:53 crc kubenswrapper[4838]: E1128 11:18:53.565756 4838 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-5dxdd_openshift-machine-config-operator(5c3daa53-8c4e-4e30-aeba-146602dd45cd)\"" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" Nov 28 11:18:53 crc kubenswrapper[4838]: I1128 11:18:53.728170 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-x8rmj"] Nov 28 11:18:54 crc kubenswrapper[4838]: I1128 11:18:54.220512 4838 generic.go:334] "Generic (PLEG): container finished" podID="6907aaf9-441d-4253-a302-1939b127083b" containerID="1a6cbabfec6724917aa9da6f3f0501410f582db0510f3f862ca79f89c327f227" exitCode=0 Nov 28 11:18:54 crc kubenswrapper[4838]: I1128 11:18:54.220628 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-x8rmj" event={"ID":"6907aaf9-441d-4253-a302-1939b127083b","Type":"ContainerDied","Data":"1a6cbabfec6724917aa9da6f3f0501410f582db0510f3f862ca79f89c327f227"} Nov 28 11:18:54 crc kubenswrapper[4838]: I1128 11:18:54.220899 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-x8rmj" event={"ID":"6907aaf9-441d-4253-a302-1939b127083b","Type":"ContainerStarted","Data":"7399bd7b6f3f41dbec5a5f0726cd3f92f3fb4d15432e4cc3d52e50f11c13b503"} Nov 28 11:18:56 crc kubenswrapper[4838]: I1128 11:18:56.244012 4838 generic.go:334] "Generic (PLEG): container finished" podID="6907aaf9-441d-4253-a302-1939b127083b" containerID="c07386197ee52ab2faeab872e6964ae56cb7679a452641243eee4fe9418fe18c" exitCode=0 Nov 28 11:18:56 crc kubenswrapper[4838]: I1128 11:18:56.244078 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-x8rmj" event={"ID":"6907aaf9-441d-4253-a302-1939b127083b","Type":"ContainerDied","Data":"c07386197ee52ab2faeab872e6964ae56cb7679a452641243eee4fe9418fe18c"} Nov 28 11:18:57 crc kubenswrapper[4838]: I1128 11:18:57.258170 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-x8rmj" event={"ID":"6907aaf9-441d-4253-a302-1939b127083b","Type":"ContainerStarted","Data":"cf0b1255868f978928b5e631f85281a409f16e9c811ea9f2b6ae3f6282dd2b66"} Nov 28 11:18:57 crc kubenswrapper[4838]: I1128 11:18:57.280394 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-x8rmj" podStartSLOduration=2.776897539 podStartE2EDuration="5.280373235s" podCreationTimestamp="2025-11-28 11:18:52 +0000 UTC" firstStartedPulling="2025-11-28 11:18:54.222585802 +0000 UTC m=+4905.921559972" lastFinishedPulling="2025-11-28 11:18:56.726061488 +0000 UTC m=+4908.425035668" observedRunningTime="2025-11-28 11:18:57.272893121 +0000 UTC m=+4908.971867311" watchObservedRunningTime="2025-11-28 11:18:57.280373235 +0000 UTC m=+4908.979347405" Nov 28 11:19:01 crc kubenswrapper[4838]: I1128 11:19:01.047245 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f6zxql_607ae4a0-90c8-48ff-afa9-21eb5b545fce/util/0.log" Nov 28 11:19:01 crc kubenswrapper[4838]: I1128 11:19:01.217827 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f6zxql_607ae4a0-90c8-48ff-afa9-21eb5b545fce/util/0.log" Nov 28 11:19:01 crc kubenswrapper[4838]: I1128 11:19:01.228296 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f6zxql_607ae4a0-90c8-48ff-afa9-21eb5b545fce/pull/0.log" Nov 28 11:19:01 crc kubenswrapper[4838]: I1128 11:19:01.228398 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f6zxql_607ae4a0-90c8-48ff-afa9-21eb5b545fce/pull/0.log" Nov 28 11:19:01 crc kubenswrapper[4838]: I1128 11:19:01.426160 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f6zxql_607ae4a0-90c8-48ff-afa9-21eb5b545fce/extract/0.log" Nov 28 11:19:01 crc kubenswrapper[4838]: I1128 11:19:01.454115 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f6zxql_607ae4a0-90c8-48ff-afa9-21eb5b545fce/pull/0.log" Nov 28 11:19:01 crc kubenswrapper[4838]: I1128 11:19:01.464417 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f6zxql_607ae4a0-90c8-48ff-afa9-21eb5b545fce/util/0.log" Nov 28 11:19:01 crc kubenswrapper[4838]: I1128 11:19:01.596530 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f8375dl7_29970a13-5b66-4cf6-8515-3b4bd570dd2f/util/0.log" Nov 28 11:19:02 crc kubenswrapper[4838]: I1128 11:19:02.641622 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f8375dl7_29970a13-5b66-4cf6-8515-3b4bd570dd2f/pull/0.log" Nov 28 11:19:02 crc kubenswrapper[4838]: I1128 11:19:02.706582 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f8375dl7_29970a13-5b66-4cf6-8515-3b4bd570dd2f/pull/0.log" Nov 28 11:19:02 crc kubenswrapper[4838]: I1128 11:19:02.862897 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f8375dl7_29970a13-5b66-4cf6-8515-3b4bd570dd2f/extract/0.log" Nov 28 11:19:02 crc kubenswrapper[4838]: I1128 11:19:02.942296 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f8375dl7_29970a13-5b66-4cf6-8515-3b4bd570dd2f/util/0.log" Nov 28 11:19:02 crc kubenswrapper[4838]: I1128 11:19:02.949706 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f8375dl7_29970a13-5b66-4cf6-8515-3b4bd570dd2f/util/0.log" Nov 28 11:19:02 crc kubenswrapper[4838]: I1128 11:19:02.960954 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f8375dl7_29970a13-5b66-4cf6-8515-3b4bd570dd2f/pull/0.log" Nov 28 11:19:03 crc kubenswrapper[4838]: I1128 11:19:03.135919 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-j64zj_c38d1237-07ae-448a-9a53-5432a944fd83/extract-utilities/0.log" Nov 28 11:19:03 crc kubenswrapper[4838]: I1128 11:19:03.279910 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-x8rmj" Nov 28 11:19:03 crc kubenswrapper[4838]: I1128 11:19:03.279976 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-x8rmj" Nov 28 11:19:03 crc kubenswrapper[4838]: I1128 11:19:03.295244 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-j64zj_c38d1237-07ae-448a-9a53-5432a944fd83/extract-utilities/0.log" Nov 28 11:19:03 crc kubenswrapper[4838]: I1128 11:19:03.319889 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-j64zj_c38d1237-07ae-448a-9a53-5432a944fd83/extract-content/0.log" Nov 28 11:19:03 crc kubenswrapper[4838]: I1128 11:19:03.326445 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-j64zj_c38d1237-07ae-448a-9a53-5432a944fd83/extract-content/0.log" Nov 28 11:19:03 crc kubenswrapper[4838]: I1128 11:19:03.338287 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-x8rmj" Nov 28 11:19:03 crc kubenswrapper[4838]: I1128 11:19:03.393091 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-x8rmj" Nov 28 11:19:03 crc kubenswrapper[4838]: I1128 11:19:03.574180 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-j64zj_c38d1237-07ae-448a-9a53-5432a944fd83/extract-utilities/0.log" Nov 28 11:19:03 crc kubenswrapper[4838]: I1128 11:19:03.588634 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-x8rmj"] Nov 28 11:19:03 crc kubenswrapper[4838]: I1128 11:19:03.665513 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-j64zj_c38d1237-07ae-448a-9a53-5432a944fd83/extract-content/0.log" Nov 28 11:19:03 crc kubenswrapper[4838]: I1128 11:19:03.797597 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-n4cg9_9afd2484-54f9-4dd9-b081-6537c075864f/extract-utilities/0.log" Nov 28 11:19:04 crc kubenswrapper[4838]: I1128 11:19:04.034118 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-n4cg9_9afd2484-54f9-4dd9-b081-6537c075864f/extract-utilities/0.log" Nov 28 11:19:04 crc kubenswrapper[4838]: I1128 11:19:04.051524 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-n4cg9_9afd2484-54f9-4dd9-b081-6537c075864f/extract-content/0.log" Nov 28 11:19:04 crc kubenswrapper[4838]: I1128 11:19:04.195988 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-n4cg9_9afd2484-54f9-4dd9-b081-6537c075864f/extract-content/0.log" Nov 28 11:19:04 crc kubenswrapper[4838]: I1128 11:19:04.205518 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-j64zj_c38d1237-07ae-448a-9a53-5432a944fd83/registry-server/0.log" Nov 28 11:19:04 crc kubenswrapper[4838]: I1128 11:19:04.400740 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-n4cg9_9afd2484-54f9-4dd9-b081-6537c075864f/extract-utilities/0.log" Nov 28 11:19:04 crc kubenswrapper[4838]: I1128 11:19:04.425093 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-n4cg9_9afd2484-54f9-4dd9-b081-6537c075864f/extract-content/0.log" Nov 28 11:19:04 crc kubenswrapper[4838]: I1128 11:19:04.611610 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-x8rmj_6907aaf9-441d-4253-a302-1939b127083b/extract-utilities/0.log" Nov 28 11:19:04 crc kubenswrapper[4838]: I1128 11:19:04.923915 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-x8rmj_6907aaf9-441d-4253-a302-1939b127083b/extract-content/0.log" Nov 28 11:19:04 crc kubenswrapper[4838]: I1128 11:19:04.924191 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-x8rmj_6907aaf9-441d-4253-a302-1939b127083b/extract-content/0.log" Nov 28 11:19:04 crc kubenswrapper[4838]: I1128 11:19:04.929431 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-x8rmj_6907aaf9-441d-4253-a302-1939b127083b/extract-utilities/0.log" Nov 28 11:19:05 crc kubenswrapper[4838]: I1128 11:19:05.035675 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-x8rmj_6907aaf9-441d-4253-a302-1939b127083b/extract-utilities/0.log" Nov 28 11:19:05 crc kubenswrapper[4838]: I1128 11:19:05.149276 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-x8rmj_6907aaf9-441d-4253-a302-1939b127083b/extract-content/0.log" Nov 28 11:19:05 crc kubenswrapper[4838]: I1128 11:19:05.192783 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-x8rmj_6907aaf9-441d-4253-a302-1939b127083b/registry-server/0.log" Nov 28 11:19:05 crc kubenswrapper[4838]: I1128 11:19:05.203925 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-n4cg9_9afd2484-54f9-4dd9-b081-6537c075864f/registry-server/0.log" Nov 28 11:19:05 crc kubenswrapper[4838]: I1128 11:19:05.213025 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-6mmw4_b0c9680e-7b0a-47a9-87dc-4da8cfbfce77/marketplace-operator/0.log" Nov 28 11:19:05 crc kubenswrapper[4838]: I1128 11:19:05.338022 4838 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-x8rmj" podUID="6907aaf9-441d-4253-a302-1939b127083b" containerName="registry-server" containerID="cri-o://cf0b1255868f978928b5e631f85281a409f16e9c811ea9f2b6ae3f6282dd2b66" gracePeriod=2 Nov 28 11:19:05 crc kubenswrapper[4838]: I1128 11:19:05.346007 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-trs27_1b0a5899-5f8c-42e9-b686-cbc3ae3b33c8/extract-utilities/0.log" Nov 28 11:19:05 crc kubenswrapper[4838]: I1128 11:19:05.498269 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-trs27_1b0a5899-5f8c-42e9-b686-cbc3ae3b33c8/extract-utilities/0.log" Nov 28 11:19:05 crc kubenswrapper[4838]: I1128 11:19:05.525745 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-trs27_1b0a5899-5f8c-42e9-b686-cbc3ae3b33c8/extract-content/0.log" Nov 28 11:19:05 crc kubenswrapper[4838]: I1128 11:19:05.542978 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-trs27_1b0a5899-5f8c-42e9-b686-cbc3ae3b33c8/extract-content/0.log" Nov 28 11:19:05 crc kubenswrapper[4838]: I1128 11:19:05.715169 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-trs27_1b0a5899-5f8c-42e9-b686-cbc3ae3b33c8/extract-content/0.log" Nov 28 11:19:05 crc kubenswrapper[4838]: I1128 11:19:05.735034 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-cmcj8_01087280-c77b-4764-91ba-468b21f32427/extract-utilities/0.log" Nov 28 11:19:05 crc kubenswrapper[4838]: I1128 11:19:05.843780 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-x8rmj" Nov 28 11:19:05 crc kubenswrapper[4838]: I1128 11:19:05.855871 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-trs27_1b0a5899-5f8c-42e9-b686-cbc3ae3b33c8/extract-utilities/0.log" Nov 28 11:19:05 crc kubenswrapper[4838]: I1128 11:19:05.907343 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-trs27_1b0a5899-5f8c-42e9-b686-cbc3ae3b33c8/registry-server/0.log" Nov 28 11:19:05 crc kubenswrapper[4838]: I1128 11:19:05.917303 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6907aaf9-441d-4253-a302-1939b127083b-utilities\") pod \"6907aaf9-441d-4253-a302-1939b127083b\" (UID: \"6907aaf9-441d-4253-a302-1939b127083b\") " Nov 28 11:19:05 crc kubenswrapper[4838]: I1128 11:19:05.917356 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-djf6r\" (UniqueName: \"kubernetes.io/projected/6907aaf9-441d-4253-a302-1939b127083b-kube-api-access-djf6r\") pod \"6907aaf9-441d-4253-a302-1939b127083b\" (UID: \"6907aaf9-441d-4253-a302-1939b127083b\") " Nov 28 11:19:05 crc kubenswrapper[4838]: I1128 11:19:05.917418 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6907aaf9-441d-4253-a302-1939b127083b-catalog-content\") pod \"6907aaf9-441d-4253-a302-1939b127083b\" (UID: \"6907aaf9-441d-4253-a302-1939b127083b\") " Nov 28 11:19:05 crc kubenswrapper[4838]: I1128 11:19:05.918700 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6907aaf9-441d-4253-a302-1939b127083b-utilities" (OuterVolumeSpecName: "utilities") pod "6907aaf9-441d-4253-a302-1939b127083b" (UID: "6907aaf9-441d-4253-a302-1939b127083b"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 11:19:05 crc kubenswrapper[4838]: I1128 11:19:05.920145 4838 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6907aaf9-441d-4253-a302-1939b127083b-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 11:19:05 crc kubenswrapper[4838]: I1128 11:19:05.948150 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6907aaf9-441d-4253-a302-1939b127083b-kube-api-access-djf6r" (OuterVolumeSpecName: "kube-api-access-djf6r") pod "6907aaf9-441d-4253-a302-1939b127083b" (UID: "6907aaf9-441d-4253-a302-1939b127083b"). InnerVolumeSpecName "kube-api-access-djf6r". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:19:05 crc kubenswrapper[4838]: I1128 11:19:05.965379 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6907aaf9-441d-4253-a302-1939b127083b-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "6907aaf9-441d-4253-a302-1939b127083b" (UID: "6907aaf9-441d-4253-a302-1939b127083b"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 11:19:06 crc kubenswrapper[4838]: I1128 11:19:06.022059 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-djf6r\" (UniqueName: \"kubernetes.io/projected/6907aaf9-441d-4253-a302-1939b127083b-kube-api-access-djf6r\") on node \"crc\" DevicePath \"\"" Nov 28 11:19:06 crc kubenswrapper[4838]: I1128 11:19:06.022099 4838 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6907aaf9-441d-4253-a302-1939b127083b-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 11:19:06 crc kubenswrapper[4838]: I1128 11:19:06.036891 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-cmcj8_01087280-c77b-4764-91ba-468b21f32427/extract-utilities/0.log" Nov 28 11:19:06 crc kubenswrapper[4838]: I1128 11:19:06.061005 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-cmcj8_01087280-c77b-4764-91ba-468b21f32427/extract-content/0.log" Nov 28 11:19:06 crc kubenswrapper[4838]: I1128 11:19:06.063490 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-cmcj8_01087280-c77b-4764-91ba-468b21f32427/extract-content/0.log" Nov 28 11:19:06 crc kubenswrapper[4838]: I1128 11:19:06.224517 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-cmcj8_01087280-c77b-4764-91ba-468b21f32427/extract-content/0.log" Nov 28 11:19:06 crc kubenswrapper[4838]: I1128 11:19:06.232645 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-cmcj8_01087280-c77b-4764-91ba-468b21f32427/extract-utilities/0.log" Nov 28 11:19:06 crc kubenswrapper[4838]: I1128 11:19:06.364398 4838 generic.go:334] "Generic (PLEG): container finished" podID="6907aaf9-441d-4253-a302-1939b127083b" containerID="cf0b1255868f978928b5e631f85281a409f16e9c811ea9f2b6ae3f6282dd2b66" exitCode=0 Nov 28 11:19:06 crc kubenswrapper[4838]: I1128 11:19:06.364455 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-x8rmj" event={"ID":"6907aaf9-441d-4253-a302-1939b127083b","Type":"ContainerDied","Data":"cf0b1255868f978928b5e631f85281a409f16e9c811ea9f2b6ae3f6282dd2b66"} Nov 28 11:19:06 crc kubenswrapper[4838]: I1128 11:19:06.364488 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-x8rmj" event={"ID":"6907aaf9-441d-4253-a302-1939b127083b","Type":"ContainerDied","Data":"7399bd7b6f3f41dbec5a5f0726cd3f92f3fb4d15432e4cc3d52e50f11c13b503"} Nov 28 11:19:06 crc kubenswrapper[4838]: I1128 11:19:06.364542 4838 scope.go:117] "RemoveContainer" containerID="cf0b1255868f978928b5e631f85281a409f16e9c811ea9f2b6ae3f6282dd2b66" Nov 28 11:19:06 crc kubenswrapper[4838]: I1128 11:19:06.364682 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-x8rmj" Nov 28 11:19:06 crc kubenswrapper[4838]: I1128 11:19:06.390083 4838 scope.go:117] "RemoveContainer" containerID="c07386197ee52ab2faeab872e6964ae56cb7679a452641243eee4fe9418fe18c" Nov 28 11:19:06 crc kubenswrapper[4838]: I1128 11:19:06.421372 4838 scope.go:117] "RemoveContainer" containerID="1a6cbabfec6724917aa9da6f3f0501410f582db0510f3f862ca79f89c327f227" Nov 28 11:19:06 crc kubenswrapper[4838]: I1128 11:19:06.427194 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-x8rmj"] Nov 28 11:19:06 crc kubenswrapper[4838]: I1128 11:19:06.437125 4838 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-x8rmj"] Nov 28 11:19:06 crc kubenswrapper[4838]: I1128 11:19:06.479470 4838 scope.go:117] "RemoveContainer" containerID="cf0b1255868f978928b5e631f85281a409f16e9c811ea9f2b6ae3f6282dd2b66" Nov 28 11:19:06 crc kubenswrapper[4838]: E1128 11:19:06.484065 4838 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cf0b1255868f978928b5e631f85281a409f16e9c811ea9f2b6ae3f6282dd2b66\": container with ID starting with cf0b1255868f978928b5e631f85281a409f16e9c811ea9f2b6ae3f6282dd2b66 not found: ID does not exist" containerID="cf0b1255868f978928b5e631f85281a409f16e9c811ea9f2b6ae3f6282dd2b66" Nov 28 11:19:06 crc kubenswrapper[4838]: I1128 11:19:06.484100 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cf0b1255868f978928b5e631f85281a409f16e9c811ea9f2b6ae3f6282dd2b66"} err="failed to get container status \"cf0b1255868f978928b5e631f85281a409f16e9c811ea9f2b6ae3f6282dd2b66\": rpc error: code = NotFound desc = could not find container \"cf0b1255868f978928b5e631f85281a409f16e9c811ea9f2b6ae3f6282dd2b66\": container with ID starting with cf0b1255868f978928b5e631f85281a409f16e9c811ea9f2b6ae3f6282dd2b66 not found: ID does not exist" Nov 28 11:19:06 crc kubenswrapper[4838]: I1128 11:19:06.484125 4838 scope.go:117] "RemoveContainer" containerID="c07386197ee52ab2faeab872e6964ae56cb7679a452641243eee4fe9418fe18c" Nov 28 11:19:06 crc kubenswrapper[4838]: E1128 11:19:06.484557 4838 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c07386197ee52ab2faeab872e6964ae56cb7679a452641243eee4fe9418fe18c\": container with ID starting with c07386197ee52ab2faeab872e6964ae56cb7679a452641243eee4fe9418fe18c not found: ID does not exist" containerID="c07386197ee52ab2faeab872e6964ae56cb7679a452641243eee4fe9418fe18c" Nov 28 11:19:06 crc kubenswrapper[4838]: I1128 11:19:06.484580 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c07386197ee52ab2faeab872e6964ae56cb7679a452641243eee4fe9418fe18c"} err="failed to get container status \"c07386197ee52ab2faeab872e6964ae56cb7679a452641243eee4fe9418fe18c\": rpc error: code = NotFound desc = could not find container \"c07386197ee52ab2faeab872e6964ae56cb7679a452641243eee4fe9418fe18c\": container with ID starting with c07386197ee52ab2faeab872e6964ae56cb7679a452641243eee4fe9418fe18c not found: ID does not exist" Nov 28 11:19:06 crc kubenswrapper[4838]: I1128 11:19:06.484593 4838 scope.go:117] "RemoveContainer" containerID="1a6cbabfec6724917aa9da6f3f0501410f582db0510f3f862ca79f89c327f227" Nov 28 11:19:06 crc kubenswrapper[4838]: E1128 11:19:06.484893 4838 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1a6cbabfec6724917aa9da6f3f0501410f582db0510f3f862ca79f89c327f227\": container with ID starting with 1a6cbabfec6724917aa9da6f3f0501410f582db0510f3f862ca79f89c327f227 not found: ID does not exist" containerID="1a6cbabfec6724917aa9da6f3f0501410f582db0510f3f862ca79f89c327f227" Nov 28 11:19:06 crc kubenswrapper[4838]: I1128 11:19:06.484922 4838 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1a6cbabfec6724917aa9da6f3f0501410f582db0510f3f862ca79f89c327f227"} err="failed to get container status \"1a6cbabfec6724917aa9da6f3f0501410f582db0510f3f862ca79f89c327f227\": rpc error: code = NotFound desc = could not find container \"1a6cbabfec6724917aa9da6f3f0501410f582db0510f3f862ca79f89c327f227\": container with ID starting with 1a6cbabfec6724917aa9da6f3f0501410f582db0510f3f862ca79f89c327f227 not found: ID does not exist" Nov 28 11:19:06 crc kubenswrapper[4838]: I1128 11:19:06.561937 4838 scope.go:117] "RemoveContainer" containerID="b8237eaaf27ec7883da43a41ebf9c501dd653ec36cf7eded33b05d3eea15582e" Nov 28 11:19:06 crc kubenswrapper[4838]: I1128 11:19:06.575544 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6907aaf9-441d-4253-a302-1939b127083b" path="/var/lib/kubelet/pods/6907aaf9-441d-4253-a302-1939b127083b/volumes" Nov 28 11:19:06 crc kubenswrapper[4838]: I1128 11:19:06.835433 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-cmcj8_01087280-c77b-4764-91ba-468b21f32427/registry-server/0.log" Nov 28 11:19:07 crc kubenswrapper[4838]: I1128 11:19:07.385280 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" event={"ID":"5c3daa53-8c4e-4e30-aeba-146602dd45cd","Type":"ContainerStarted","Data":"de4ed4da0dc32a1d3a9d92a015a3f975cc9489bdd3b1bb3bb95fc88a257a1ecf"} Nov 28 11:20:58 crc kubenswrapper[4838]: I1128 11:20:58.661844 4838 generic.go:334] "Generic (PLEG): container finished" podID="573376ae-377c-42ef-96d5-4ff9704f3f4a" containerID="e4197b334988c6f15111214ffe1b9f981e1f23d2115db2b874a5a6d9059ca857" exitCode=0 Nov 28 11:20:58 crc kubenswrapper[4838]: I1128 11:20:58.661946 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-r69n8/must-gather-mk428" event={"ID":"573376ae-377c-42ef-96d5-4ff9704f3f4a","Type":"ContainerDied","Data":"e4197b334988c6f15111214ffe1b9f981e1f23d2115db2b874a5a6d9059ca857"} Nov 28 11:20:58 crc kubenswrapper[4838]: I1128 11:20:58.664037 4838 scope.go:117] "RemoveContainer" containerID="e4197b334988c6f15111214ffe1b9f981e1f23d2115db2b874a5a6d9059ca857" Nov 28 11:20:58 crc kubenswrapper[4838]: I1128 11:20:58.836054 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-r69n8_must-gather-mk428_573376ae-377c-42ef-96d5-4ff9704f3f4a/gather/0.log" Nov 28 11:21:10 crc kubenswrapper[4838]: I1128 11:21:10.639498 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-r69n8/must-gather-mk428"] Nov 28 11:21:10 crc kubenswrapper[4838]: I1128 11:21:10.640330 4838 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-must-gather-r69n8/must-gather-mk428" podUID="573376ae-377c-42ef-96d5-4ff9704f3f4a" containerName="copy" containerID="cri-o://d0a2af1606bfb75df271044cea2d92f491499773e690921744b0ca73987b6bf6" gracePeriod=2 Nov 28 11:21:10 crc kubenswrapper[4838]: I1128 11:21:10.646583 4838 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-r69n8/must-gather-mk428"] Nov 28 11:21:10 crc kubenswrapper[4838]: I1128 11:21:10.805762 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-r69n8_must-gather-mk428_573376ae-377c-42ef-96d5-4ff9704f3f4a/copy/0.log" Nov 28 11:21:10 crc kubenswrapper[4838]: I1128 11:21:10.806413 4838 generic.go:334] "Generic (PLEG): container finished" podID="573376ae-377c-42ef-96d5-4ff9704f3f4a" containerID="d0a2af1606bfb75df271044cea2d92f491499773e690921744b0ca73987b6bf6" exitCode=143 Nov 28 11:21:11 crc kubenswrapper[4838]: I1128 11:21:11.084290 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-r69n8_must-gather-mk428_573376ae-377c-42ef-96d5-4ff9704f3f4a/copy/0.log" Nov 28 11:21:11 crc kubenswrapper[4838]: I1128 11:21:11.084577 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-r69n8/must-gather-mk428" Nov 28 11:21:11 crc kubenswrapper[4838]: I1128 11:21:11.225491 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/573376ae-377c-42ef-96d5-4ff9704f3f4a-must-gather-output\") pod \"573376ae-377c-42ef-96d5-4ff9704f3f4a\" (UID: \"573376ae-377c-42ef-96d5-4ff9704f3f4a\") " Nov 28 11:21:11 crc kubenswrapper[4838]: I1128 11:21:11.225598 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vnv48\" (UniqueName: \"kubernetes.io/projected/573376ae-377c-42ef-96d5-4ff9704f3f4a-kube-api-access-vnv48\") pod \"573376ae-377c-42ef-96d5-4ff9704f3f4a\" (UID: \"573376ae-377c-42ef-96d5-4ff9704f3f4a\") " Nov 28 11:21:11 crc kubenswrapper[4838]: I1128 11:21:11.231956 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/573376ae-377c-42ef-96d5-4ff9704f3f4a-kube-api-access-vnv48" (OuterVolumeSpecName: "kube-api-access-vnv48") pod "573376ae-377c-42ef-96d5-4ff9704f3f4a" (UID: "573376ae-377c-42ef-96d5-4ff9704f3f4a"). InnerVolumeSpecName "kube-api-access-vnv48". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:21:11 crc kubenswrapper[4838]: I1128 11:21:11.328182 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vnv48\" (UniqueName: \"kubernetes.io/projected/573376ae-377c-42ef-96d5-4ff9704f3f4a-kube-api-access-vnv48\") on node \"crc\" DevicePath \"\"" Nov 28 11:21:11 crc kubenswrapper[4838]: I1128 11:21:11.430843 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/573376ae-377c-42ef-96d5-4ff9704f3f4a-must-gather-output" (OuterVolumeSpecName: "must-gather-output") pod "573376ae-377c-42ef-96d5-4ff9704f3f4a" (UID: "573376ae-377c-42ef-96d5-4ff9704f3f4a"). InnerVolumeSpecName "must-gather-output". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 11:21:11 crc kubenswrapper[4838]: I1128 11:21:11.533453 4838 reconciler_common.go:293] "Volume detached for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/573376ae-377c-42ef-96d5-4ff9704f3f4a-must-gather-output\") on node \"crc\" DevicePath \"\"" Nov 28 11:21:11 crc kubenswrapper[4838]: I1128 11:21:11.825244 4838 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-r69n8_must-gather-mk428_573376ae-377c-42ef-96d5-4ff9704f3f4a/copy/0.log" Nov 28 11:21:11 crc kubenswrapper[4838]: I1128 11:21:11.828346 4838 scope.go:117] "RemoveContainer" containerID="d0a2af1606bfb75df271044cea2d92f491499773e690921744b0ca73987b6bf6" Nov 28 11:21:11 crc kubenswrapper[4838]: I1128 11:21:11.828566 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-r69n8/must-gather-mk428" Nov 28 11:21:11 crc kubenswrapper[4838]: I1128 11:21:11.863962 4838 scope.go:117] "RemoveContainer" containerID="e4197b334988c6f15111214ffe1b9f981e1f23d2115db2b874a5a6d9059ca857" Nov 28 11:21:12 crc kubenswrapper[4838]: I1128 11:21:12.585375 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="573376ae-377c-42ef-96d5-4ff9704f3f4a" path="/var/lib/kubelet/pods/573376ae-377c-42ef-96d5-4ff9704f3f4a/volumes" Nov 28 11:21:23 crc kubenswrapper[4838]: I1128 11:21:23.940279 4838 patch_prober.go:28] interesting pod/machine-config-daemon-5dxdd container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 11:21:23 crc kubenswrapper[4838]: I1128 11:21:23.940767 4838 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 11:21:31 crc kubenswrapper[4838]: I1128 11:21:31.078344 4838 scope.go:117] "RemoveContainer" containerID="9394e44c4c8165997bec0a501a23bcf9e9f36962519e1de8ef6db8730c24a00b" Nov 28 11:21:53 crc kubenswrapper[4838]: I1128 11:21:53.940154 4838 patch_prober.go:28] interesting pod/machine-config-daemon-5dxdd container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 11:21:53 crc kubenswrapper[4838]: I1128 11:21:53.940891 4838 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 11:22:01 crc kubenswrapper[4838]: I1128 11:22:01.090837 4838 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-tc9bv"] Nov 28 11:22:01 crc kubenswrapper[4838]: E1128 11:22:01.092474 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="573376ae-377c-42ef-96d5-4ff9704f3f4a" containerName="gather" Nov 28 11:22:01 crc kubenswrapper[4838]: I1128 11:22:01.092499 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="573376ae-377c-42ef-96d5-4ff9704f3f4a" containerName="gather" Nov 28 11:22:01 crc kubenswrapper[4838]: E1128 11:22:01.092546 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6907aaf9-441d-4253-a302-1939b127083b" containerName="extract-content" Nov 28 11:22:01 crc kubenswrapper[4838]: I1128 11:22:01.092564 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="6907aaf9-441d-4253-a302-1939b127083b" containerName="extract-content" Nov 28 11:22:01 crc kubenswrapper[4838]: E1128 11:22:01.092597 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="573376ae-377c-42ef-96d5-4ff9704f3f4a" containerName="copy" Nov 28 11:22:01 crc kubenswrapper[4838]: I1128 11:22:01.092609 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="573376ae-377c-42ef-96d5-4ff9704f3f4a" containerName="copy" Nov 28 11:22:01 crc kubenswrapper[4838]: E1128 11:22:01.092667 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6907aaf9-441d-4253-a302-1939b127083b" containerName="extract-utilities" Nov 28 11:22:01 crc kubenswrapper[4838]: I1128 11:22:01.092677 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="6907aaf9-441d-4253-a302-1939b127083b" containerName="extract-utilities" Nov 28 11:22:01 crc kubenswrapper[4838]: E1128 11:22:01.092701 4838 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6907aaf9-441d-4253-a302-1939b127083b" containerName="registry-server" Nov 28 11:22:01 crc kubenswrapper[4838]: I1128 11:22:01.092709 4838 state_mem.go:107] "Deleted CPUSet assignment" podUID="6907aaf9-441d-4253-a302-1939b127083b" containerName="registry-server" Nov 28 11:22:01 crc kubenswrapper[4838]: I1128 11:22:01.093206 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="573376ae-377c-42ef-96d5-4ff9704f3f4a" containerName="gather" Nov 28 11:22:01 crc kubenswrapper[4838]: I1128 11:22:01.093247 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="6907aaf9-441d-4253-a302-1939b127083b" containerName="registry-server" Nov 28 11:22:01 crc kubenswrapper[4838]: I1128 11:22:01.093279 4838 memory_manager.go:354] "RemoveStaleState removing state" podUID="573376ae-377c-42ef-96d5-4ff9704f3f4a" containerName="copy" Nov 28 11:22:01 crc kubenswrapper[4838]: I1128 11:22:01.103994 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-tc9bv" Nov 28 11:22:01 crc kubenswrapper[4838]: I1128 11:22:01.116340 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-tc9bv"] Nov 28 11:22:01 crc kubenswrapper[4838]: I1128 11:22:01.266504 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dt9jf\" (UniqueName: \"kubernetes.io/projected/49460217-bdf2-45ce-a681-34d62ea80b03-kube-api-access-dt9jf\") pod \"certified-operators-tc9bv\" (UID: \"49460217-bdf2-45ce-a681-34d62ea80b03\") " pod="openshift-marketplace/certified-operators-tc9bv" Nov 28 11:22:01 crc kubenswrapper[4838]: I1128 11:22:01.266570 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/49460217-bdf2-45ce-a681-34d62ea80b03-utilities\") pod \"certified-operators-tc9bv\" (UID: \"49460217-bdf2-45ce-a681-34d62ea80b03\") " pod="openshift-marketplace/certified-operators-tc9bv" Nov 28 11:22:01 crc kubenswrapper[4838]: I1128 11:22:01.266627 4838 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/49460217-bdf2-45ce-a681-34d62ea80b03-catalog-content\") pod \"certified-operators-tc9bv\" (UID: \"49460217-bdf2-45ce-a681-34d62ea80b03\") " pod="openshift-marketplace/certified-operators-tc9bv" Nov 28 11:22:01 crc kubenswrapper[4838]: I1128 11:22:01.369962 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dt9jf\" (UniqueName: \"kubernetes.io/projected/49460217-bdf2-45ce-a681-34d62ea80b03-kube-api-access-dt9jf\") pod \"certified-operators-tc9bv\" (UID: \"49460217-bdf2-45ce-a681-34d62ea80b03\") " pod="openshift-marketplace/certified-operators-tc9bv" Nov 28 11:22:01 crc kubenswrapper[4838]: I1128 11:22:01.370010 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/49460217-bdf2-45ce-a681-34d62ea80b03-utilities\") pod \"certified-operators-tc9bv\" (UID: \"49460217-bdf2-45ce-a681-34d62ea80b03\") " pod="openshift-marketplace/certified-operators-tc9bv" Nov 28 11:22:01 crc kubenswrapper[4838]: I1128 11:22:01.370052 4838 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/49460217-bdf2-45ce-a681-34d62ea80b03-catalog-content\") pod \"certified-operators-tc9bv\" (UID: \"49460217-bdf2-45ce-a681-34d62ea80b03\") " pod="openshift-marketplace/certified-operators-tc9bv" Nov 28 11:22:01 crc kubenswrapper[4838]: I1128 11:22:01.370439 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/49460217-bdf2-45ce-a681-34d62ea80b03-utilities\") pod \"certified-operators-tc9bv\" (UID: \"49460217-bdf2-45ce-a681-34d62ea80b03\") " pod="openshift-marketplace/certified-operators-tc9bv" Nov 28 11:22:01 crc kubenswrapper[4838]: I1128 11:22:01.370476 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/49460217-bdf2-45ce-a681-34d62ea80b03-catalog-content\") pod \"certified-operators-tc9bv\" (UID: \"49460217-bdf2-45ce-a681-34d62ea80b03\") " pod="openshift-marketplace/certified-operators-tc9bv" Nov 28 11:22:01 crc kubenswrapper[4838]: I1128 11:22:01.404568 4838 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dt9jf\" (UniqueName: \"kubernetes.io/projected/49460217-bdf2-45ce-a681-34d62ea80b03-kube-api-access-dt9jf\") pod \"certified-operators-tc9bv\" (UID: \"49460217-bdf2-45ce-a681-34d62ea80b03\") " pod="openshift-marketplace/certified-operators-tc9bv" Nov 28 11:22:01 crc kubenswrapper[4838]: I1128 11:22:01.439073 4838 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-tc9bv" Nov 28 11:22:01 crc kubenswrapper[4838]: I1128 11:22:01.911436 4838 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-tc9bv"] Nov 28 11:22:02 crc kubenswrapper[4838]: I1128 11:22:02.431007 4838 generic.go:334] "Generic (PLEG): container finished" podID="49460217-bdf2-45ce-a681-34d62ea80b03" containerID="00e08f8b3c211c9ed6262a904c9566a7e92d14d2b8e916373a8159f3450c0410" exitCode=0 Nov 28 11:22:02 crc kubenswrapper[4838]: I1128 11:22:02.431086 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tc9bv" event={"ID":"49460217-bdf2-45ce-a681-34d62ea80b03","Type":"ContainerDied","Data":"00e08f8b3c211c9ed6262a904c9566a7e92d14d2b8e916373a8159f3450c0410"} Nov 28 11:22:02 crc kubenswrapper[4838]: I1128 11:22:02.431437 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tc9bv" event={"ID":"49460217-bdf2-45ce-a681-34d62ea80b03","Type":"ContainerStarted","Data":"9bc73440202b89aff17ea2f8fe4703da52d4a0b5eb156135b7895d4eaa92ccf5"} Nov 28 11:22:02 crc kubenswrapper[4838]: I1128 11:22:02.439452 4838 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 28 11:22:03 crc kubenswrapper[4838]: I1128 11:22:03.443576 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tc9bv" event={"ID":"49460217-bdf2-45ce-a681-34d62ea80b03","Type":"ContainerStarted","Data":"d039b2a273df3cd6ce074135aaebd8f127146ed786a68d04095d96a9bd575238"} Nov 28 11:22:04 crc kubenswrapper[4838]: I1128 11:22:04.463209 4838 generic.go:334] "Generic (PLEG): container finished" podID="49460217-bdf2-45ce-a681-34d62ea80b03" containerID="d039b2a273df3cd6ce074135aaebd8f127146ed786a68d04095d96a9bd575238" exitCode=0 Nov 28 11:22:04 crc kubenswrapper[4838]: I1128 11:22:04.463640 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tc9bv" event={"ID":"49460217-bdf2-45ce-a681-34d62ea80b03","Type":"ContainerDied","Data":"d039b2a273df3cd6ce074135aaebd8f127146ed786a68d04095d96a9bd575238"} Nov 28 11:22:05 crc kubenswrapper[4838]: I1128 11:22:05.475957 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tc9bv" event={"ID":"49460217-bdf2-45ce-a681-34d62ea80b03","Type":"ContainerStarted","Data":"aa6ce6319083f2677f42ce7f49da8f9202817ba36c97962b472e6cfbf4dabcf7"} Nov 28 11:22:05 crc kubenswrapper[4838]: I1128 11:22:05.516276 4838 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-tc9bv" podStartSLOduration=1.920142638 podStartE2EDuration="4.516255692s" podCreationTimestamp="2025-11-28 11:22:01 +0000 UTC" firstStartedPulling="2025-11-28 11:22:02.439075231 +0000 UTC m=+5094.138049431" lastFinishedPulling="2025-11-28 11:22:05.035188295 +0000 UTC m=+5096.734162485" observedRunningTime="2025-11-28 11:22:05.504286556 +0000 UTC m=+5097.203260746" watchObservedRunningTime="2025-11-28 11:22:05.516255692 +0000 UTC m=+5097.215229872" Nov 28 11:22:11 crc kubenswrapper[4838]: I1128 11:22:11.441112 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-tc9bv" Nov 28 11:22:11 crc kubenswrapper[4838]: I1128 11:22:11.441741 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-tc9bv" Nov 28 11:22:11 crc kubenswrapper[4838]: I1128 11:22:11.518133 4838 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-tc9bv" Nov 28 11:22:11 crc kubenswrapper[4838]: I1128 11:22:11.646260 4838 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-tc9bv" Nov 28 11:22:11 crc kubenswrapper[4838]: I1128 11:22:11.777383 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-tc9bv"] Nov 28 11:22:13 crc kubenswrapper[4838]: I1128 11:22:13.573028 4838 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-tc9bv" podUID="49460217-bdf2-45ce-a681-34d62ea80b03" containerName="registry-server" containerID="cri-o://aa6ce6319083f2677f42ce7f49da8f9202817ba36c97962b472e6cfbf4dabcf7" gracePeriod=2 Nov 28 11:22:14 crc kubenswrapper[4838]: I1128 11:22:14.593953 4838 generic.go:334] "Generic (PLEG): container finished" podID="49460217-bdf2-45ce-a681-34d62ea80b03" containerID="aa6ce6319083f2677f42ce7f49da8f9202817ba36c97962b472e6cfbf4dabcf7" exitCode=0 Nov 28 11:22:14 crc kubenswrapper[4838]: I1128 11:22:14.594063 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tc9bv" event={"ID":"49460217-bdf2-45ce-a681-34d62ea80b03","Type":"ContainerDied","Data":"aa6ce6319083f2677f42ce7f49da8f9202817ba36c97962b472e6cfbf4dabcf7"} Nov 28 11:22:15 crc kubenswrapper[4838]: I1128 11:22:15.221890 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-tc9bv" Nov 28 11:22:15 crc kubenswrapper[4838]: I1128 11:22:15.394547 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/49460217-bdf2-45ce-a681-34d62ea80b03-catalog-content\") pod \"49460217-bdf2-45ce-a681-34d62ea80b03\" (UID: \"49460217-bdf2-45ce-a681-34d62ea80b03\") " Nov 28 11:22:15 crc kubenswrapper[4838]: I1128 11:22:15.395313 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dt9jf\" (UniqueName: \"kubernetes.io/projected/49460217-bdf2-45ce-a681-34d62ea80b03-kube-api-access-dt9jf\") pod \"49460217-bdf2-45ce-a681-34d62ea80b03\" (UID: \"49460217-bdf2-45ce-a681-34d62ea80b03\") " Nov 28 11:22:15 crc kubenswrapper[4838]: I1128 11:22:15.395405 4838 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/49460217-bdf2-45ce-a681-34d62ea80b03-utilities\") pod \"49460217-bdf2-45ce-a681-34d62ea80b03\" (UID: \"49460217-bdf2-45ce-a681-34d62ea80b03\") " Nov 28 11:22:15 crc kubenswrapper[4838]: I1128 11:22:15.396058 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/49460217-bdf2-45ce-a681-34d62ea80b03-utilities" (OuterVolumeSpecName: "utilities") pod "49460217-bdf2-45ce-a681-34d62ea80b03" (UID: "49460217-bdf2-45ce-a681-34d62ea80b03"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 11:22:15 crc kubenswrapper[4838]: I1128 11:22:15.396338 4838 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/49460217-bdf2-45ce-a681-34d62ea80b03-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 11:22:15 crc kubenswrapper[4838]: I1128 11:22:15.402868 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49460217-bdf2-45ce-a681-34d62ea80b03-kube-api-access-dt9jf" (OuterVolumeSpecName: "kube-api-access-dt9jf") pod "49460217-bdf2-45ce-a681-34d62ea80b03" (UID: "49460217-bdf2-45ce-a681-34d62ea80b03"). InnerVolumeSpecName "kube-api-access-dt9jf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 11:22:15 crc kubenswrapper[4838]: I1128 11:22:15.443159 4838 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/49460217-bdf2-45ce-a681-34d62ea80b03-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "49460217-bdf2-45ce-a681-34d62ea80b03" (UID: "49460217-bdf2-45ce-a681-34d62ea80b03"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 11:22:15 crc kubenswrapper[4838]: I1128 11:22:15.498886 4838 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dt9jf\" (UniqueName: \"kubernetes.io/projected/49460217-bdf2-45ce-a681-34d62ea80b03-kube-api-access-dt9jf\") on node \"crc\" DevicePath \"\"" Nov 28 11:22:15 crc kubenswrapper[4838]: I1128 11:22:15.498932 4838 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/49460217-bdf2-45ce-a681-34d62ea80b03-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 11:22:15 crc kubenswrapper[4838]: I1128 11:22:15.611935 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tc9bv" event={"ID":"49460217-bdf2-45ce-a681-34d62ea80b03","Type":"ContainerDied","Data":"9bc73440202b89aff17ea2f8fe4703da52d4a0b5eb156135b7895d4eaa92ccf5"} Nov 28 11:22:15 crc kubenswrapper[4838]: I1128 11:22:15.612004 4838 scope.go:117] "RemoveContainer" containerID="aa6ce6319083f2677f42ce7f49da8f9202817ba36c97962b472e6cfbf4dabcf7" Nov 28 11:22:15 crc kubenswrapper[4838]: I1128 11:22:15.612047 4838 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-tc9bv" Nov 28 11:22:15 crc kubenswrapper[4838]: I1128 11:22:15.659049 4838 scope.go:117] "RemoveContainer" containerID="d039b2a273df3cd6ce074135aaebd8f127146ed786a68d04095d96a9bd575238" Nov 28 11:22:15 crc kubenswrapper[4838]: I1128 11:22:15.683937 4838 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-tc9bv"] Nov 28 11:22:15 crc kubenswrapper[4838]: I1128 11:22:15.693081 4838 scope.go:117] "RemoveContainer" containerID="00e08f8b3c211c9ed6262a904c9566a7e92d14d2b8e916373a8159f3450c0410" Nov 28 11:22:15 crc kubenswrapper[4838]: I1128 11:22:15.702369 4838 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-tc9bv"] Nov 28 11:22:16 crc kubenswrapper[4838]: I1128 11:22:16.582911 4838 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49460217-bdf2-45ce-a681-34d62ea80b03" path="/var/lib/kubelet/pods/49460217-bdf2-45ce-a681-34d62ea80b03/volumes" Nov 28 11:22:23 crc kubenswrapper[4838]: I1128 11:22:23.939996 4838 patch_prober.go:28] interesting pod/machine-config-daemon-5dxdd container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 11:22:23 crc kubenswrapper[4838]: I1128 11:22:23.940417 4838 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 11:22:23 crc kubenswrapper[4838]: I1128 11:22:23.940496 4838 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" Nov 28 11:22:23 crc kubenswrapper[4838]: I1128 11:22:23.941708 4838 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"de4ed4da0dc32a1d3a9d92a015a3f975cc9489bdd3b1bb3bb95fc88a257a1ecf"} pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 11:22:23 crc kubenswrapper[4838]: I1128 11:22:23.941844 4838 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" podUID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" containerName="machine-config-daemon" containerID="cri-o://de4ed4da0dc32a1d3a9d92a015a3f975cc9489bdd3b1bb3bb95fc88a257a1ecf" gracePeriod=600 Nov 28 11:22:24 crc kubenswrapper[4838]: I1128 11:22:24.717000 4838 generic.go:334] "Generic (PLEG): container finished" podID="5c3daa53-8c4e-4e30-aeba-146602dd45cd" containerID="de4ed4da0dc32a1d3a9d92a015a3f975cc9489bdd3b1bb3bb95fc88a257a1ecf" exitCode=0 Nov 28 11:22:24 crc kubenswrapper[4838]: I1128 11:22:24.717586 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" event={"ID":"5c3daa53-8c4e-4e30-aeba-146602dd45cd","Type":"ContainerDied","Data":"de4ed4da0dc32a1d3a9d92a015a3f975cc9489bdd3b1bb3bb95fc88a257a1ecf"} Nov 28 11:22:24 crc kubenswrapper[4838]: I1128 11:22:24.717839 4838 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-5dxdd" event={"ID":"5c3daa53-8c4e-4e30-aeba-146602dd45cd","Type":"ContainerStarted","Data":"4181ac99489057a79c4e0d8224d66c7de775cf66f199a08db6a420e3375ee1a7"} Nov 28 11:22:24 crc kubenswrapper[4838]: I1128 11:22:24.717868 4838 scope.go:117] "RemoveContainer" containerID="b8237eaaf27ec7883da43a41ebf9c501dd653ec36cf7eded33b05d3eea15582e" Nov 28 11:22:31 crc kubenswrapper[4838]: I1128 11:22:31.141918 4838 scope.go:117] "RemoveContainer" containerID="59372b49860ff5ffac5405903dfe76020550208b00feb28f42dff4b599ae2c19" Nov 28 11:22:31 crc kubenswrapper[4838]: I1128 11:22:31.187263 4838 scope.go:117] "RemoveContainer" containerID="4320f5f2e08affb1f3b5bcfbbafae328b76208d5ce6df40d2b2aaff1fa855410" var/home/core/zuul-output/logs/crc-cloud-workdir-crc-all-logs.tar.gz0000644000175000000000000000005515112303164024441 0ustar coreroot‹íÁ  ÷Om7 €7šÞ'(var/home/core/zuul-output/logs/crc-cloud/0000755000175000000000000000000015112303164017356 5ustar corerootvar/home/core/zuul-output/artifacts/0000755000175000017500000000000015112270432016502 5ustar corecorevar/home/core/zuul-output/docs/0000755000175000017500000000000015112270432015452 5ustar corecore